1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/firmware.h> 22 #include <net/vxlan.h> 23 #include <linux/kthread.h> 24 #include "liquidio_common.h" 25 #include "octeon_droq.h" 26 #include "octeon_iq.h" 27 #include "response_manager.h" 28 #include "octeon_device.h" 29 #include "octeon_nic.h" 30 #include "octeon_main.h" 31 #include "octeon_network.h" 32 #include "cn66xx_regs.h" 33 #include "cn66xx_device.h" 34 #include "cn68xx_device.h" 35 #include "cn23xx_pf_device.h" 36 #include "liquidio_image.h" 37 #include "lio_vf_rep.h" 38 39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 41 MODULE_LICENSE("GPL"); 42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME 43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME 45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME 47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME 49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 50 51 static int ddr_timeout = 10000; 52 module_param(ddr_timeout, int, 0644); 53 MODULE_PARM_DESC(ddr_timeout, 54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 55 56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 57 58 static int debug = -1; 59 module_param(debug, int, 0644); 60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 61 62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; 63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); 64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); 65 66 static u32 console_bitmask; 67 module_param(console_bitmask, int, 0644); 68 MODULE_PARM_DESC(console_bitmask, 69 "Bitmask indicating which consoles have debug output redirected to syslog."); 70 71 /** 72 * octeon_console_debug_enabled - determines if a given console has debug enabled. 73 * @console: console to check 74 * Return: 1 = enabled. 0 otherwise 75 */ 76 static int octeon_console_debug_enabled(u32 console) 77 { 78 return (console_bitmask >> (console)) & 0x1; 79 } 80 81 /* Polling interval for determining when NIC application is alive */ 82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 83 84 /* runtime link query interval */ 85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 86 /* update localtime to octeon firmware every 60 seconds. 87 * make firmware to use same time reference, so that it will be easy to 88 * correlate firmware logged events/errors with host events, for debugging. 89 */ 90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 91 92 /* time to wait for possible in-flight requests in milliseconds */ 93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) 94 95 struct oct_timestamp_resp { 96 u64 rh; 97 u64 timestamp; 98 u64 status; 99 }; 100 101 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 102 103 union tx_info { 104 u64 u64; 105 struct { 106 #ifdef __BIG_ENDIAN_BITFIELD 107 u16 gso_size; 108 u16 gso_segs; 109 u32 reserved; 110 #else 111 u32 reserved; 112 u16 gso_segs; 113 u16 gso_size; 114 #endif 115 } s; 116 }; 117 118 /* Octeon device properties to be used by the NIC module. 119 * Each octeon device in the system will be represented 120 * by this structure in the NIC module. 121 */ 122 123 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 124 #define OCTNIC_GSO_MAX_SIZE \ 125 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 126 127 struct handshake { 128 struct completion init; 129 struct completion started; 130 struct pci_dev *pci_dev; 131 int init_ok; 132 int started_ok; 133 }; 134 135 #ifdef CONFIG_PCI_IOV 136 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); 137 #endif 138 139 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 140 char *prefix, char *suffix); 141 142 static int octeon_device_init(struct octeon_device *); 143 static int liquidio_stop(struct net_device *netdev); 144 static void liquidio_remove(struct pci_dev *pdev); 145 static int liquidio_probe(struct pci_dev *pdev, 146 const struct pci_device_id *ent); 147 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 148 int linkstate); 149 150 static struct handshake handshake[MAX_OCTEON_DEVICES]; 151 static struct completion first_stage; 152 153 static void octeon_droq_bh(struct tasklet_struct *t) 154 { 155 int q_no; 156 int reschedule = 0; 157 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t, 158 droq_tasklet); 159 struct octeon_device *oct = oct_priv->dev; 160 161 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { 162 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) 163 continue; 164 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 165 MAX_PACKET_BUDGET); 166 lio_enable_irq(oct->droq[q_no], NULL); 167 168 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 169 /* set time and cnt interrupt thresholds for this DROQ 170 * for NAPI 171 */ 172 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; 173 174 octeon_write_csr64( 175 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), 176 0x5700000040ULL); 177 octeon_write_csr64( 178 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); 179 } 180 } 181 182 if (reschedule) 183 tasklet_schedule(&oct_priv->droq_tasklet); 184 } 185 186 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 187 { 188 struct octeon_device_priv *oct_priv = oct->priv; 189 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 190 int i; 191 192 do { 193 pending_pkts = 0; 194 195 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 196 if (!(oct->io_qmask.oq & BIT_ULL(i))) 197 continue; 198 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 199 } 200 if (pkt_cnt > 0) { 201 pending_pkts += pkt_cnt; 202 tasklet_schedule(&oct_priv->droq_tasklet); 203 } 204 pkt_cnt = 0; 205 schedule_timeout_uninterruptible(1); 206 207 } while (retry-- && pending_pkts); 208 209 return pkt_cnt; 210 } 211 212 /** 213 * force_io_queues_off - Forces all IO queues off on a given device 214 * @oct: Pointer to Octeon device 215 */ 216 static void force_io_queues_off(struct octeon_device *oct) 217 { 218 if ((oct->chip_id == OCTEON_CN66XX) || 219 (oct->chip_id == OCTEON_CN68XX)) { 220 /* Reset the Enable bits for Input Queues. */ 221 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 222 223 /* Reset the Enable bits for Output Queues. */ 224 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 225 } 226 } 227 228 /** 229 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc 230 * @oct: Pointer to Octeon device 231 */ 232 static inline void pcierror_quiesce_device(struct octeon_device *oct) 233 { 234 int i; 235 236 /* Disable the input and output queues now. No more packets will 237 * arrive from Octeon, but we should wait for all packet processing 238 * to finish. 239 */ 240 force_io_queues_off(oct); 241 242 /* To allow for in-flight requests */ 243 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); 244 245 if (wait_for_pending_requests(oct)) 246 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 247 248 /* Force all requests waiting to be fetched by OCTEON to complete. */ 249 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 250 struct octeon_instr_queue *iq; 251 252 if (!(oct->io_qmask.iq & BIT_ULL(i))) 253 continue; 254 iq = oct->instr_queue[i]; 255 256 if (atomic_read(&iq->instr_pending)) { 257 spin_lock_bh(&iq->lock); 258 iq->fill_cnt = 0; 259 iq->octeon_read_index = iq->host_write_index; 260 iq->stats.instr_processed += 261 atomic_read(&iq->instr_pending); 262 lio_process_iq_request_list(oct, iq, 0); 263 spin_unlock_bh(&iq->lock); 264 } 265 } 266 267 /* Force all pending ordered list requests to time out. */ 268 lio_process_ordered_list(oct, 1); 269 270 /* We do not need to wait for output queue packets to be processed. */ 271 } 272 273 /** 274 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status 275 * @dev: Pointer to PCI device 276 */ 277 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 278 { 279 int pos = 0x100; 280 u32 status, mask; 281 282 pr_info("%s :\n", __func__); 283 284 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 285 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 286 if (dev->error_state == pci_channel_io_normal) 287 status &= ~mask; /* Clear corresponding nonfatal bits */ 288 else 289 status &= mask; /* Clear corresponding fatal bits */ 290 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 291 } 292 293 /** 294 * stop_pci_io - Stop all PCI IO to a given device 295 * @oct: Pointer to Octeon device 296 */ 297 static void stop_pci_io(struct octeon_device *oct) 298 { 299 /* No more instructions will be forwarded. */ 300 atomic_set(&oct->status, OCT_DEV_IN_RESET); 301 302 pci_disable_device(oct->pci_dev); 303 304 /* Disable interrupts */ 305 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 306 307 pcierror_quiesce_device(oct); 308 309 /* Release the interrupt line */ 310 free_irq(oct->pci_dev->irq, oct); 311 312 if (oct->flags & LIO_FLAG_MSI_ENABLED) 313 pci_disable_msi(oct->pci_dev); 314 315 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 316 lio_get_state_string(&oct->status)); 317 318 /* making it a common function for all OCTEON models */ 319 cleanup_aer_uncorrect_error_status(oct->pci_dev); 320 } 321 322 /** 323 * liquidio_pcie_error_detected - called when PCI error is detected 324 * @pdev: Pointer to PCI device 325 * @state: The current pci connection state 326 * 327 * This function is called after a PCI bus error affecting 328 * this device has been detected. 329 */ 330 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 331 pci_channel_state_t state) 332 { 333 struct octeon_device *oct = pci_get_drvdata(pdev); 334 335 /* Non-correctable Non-fatal errors */ 336 if (state == pci_channel_io_normal) { 337 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 338 cleanup_aer_uncorrect_error_status(oct->pci_dev); 339 return PCI_ERS_RESULT_CAN_RECOVER; 340 } 341 342 /* Non-correctable Fatal errors */ 343 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 344 stop_pci_io(oct); 345 346 /* Always return a DISCONNECT. There is no support for recovery but only 347 * for a clean shutdown. 348 */ 349 return PCI_ERS_RESULT_DISCONNECT; 350 } 351 352 /** 353 * liquidio_pcie_mmio_enabled - mmio handler 354 * @pdev: Pointer to PCI device 355 */ 356 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev) 357 { 358 /* We should never hit this since we never ask for a reset for a Fatal 359 * Error. We always return DISCONNECT in io_error above. 360 * But play safe and return RECOVERED for now. 361 */ 362 return PCI_ERS_RESULT_RECOVERED; 363 } 364 365 /** 366 * liquidio_pcie_slot_reset - called after the pci bus has been reset. 367 * @pdev: Pointer to PCI device 368 * 369 * Restart the card from scratch, as if from a cold-boot. Implementation 370 * resembles the first-half of the octeon_resume routine. 371 */ 372 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev) 373 { 374 /* We should never hit this since we never ask for a reset for a Fatal 375 * Error. We always return DISCONNECT in io_error above. 376 * But play safe and return RECOVERED for now. 377 */ 378 return PCI_ERS_RESULT_RECOVERED; 379 } 380 381 /** 382 * liquidio_pcie_resume - called when traffic can start flowing again. 383 * @pdev: Pointer to PCI device 384 * 385 * This callback is called when the error recovery driver tells us that 386 * its OK to resume normal operation. Implementation resembles the 387 * second-half of the octeon_resume routine. 388 */ 389 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev) 390 { 391 /* Nothing to be done here. */ 392 } 393 394 #define liquidio_suspend NULL 395 #define liquidio_resume NULL 396 397 /* For PCI-E Advanced Error Recovery (AER) Interface */ 398 static const struct pci_error_handlers liquidio_err_handler = { 399 .error_detected = liquidio_pcie_error_detected, 400 .mmio_enabled = liquidio_pcie_mmio_enabled, 401 .slot_reset = liquidio_pcie_slot_reset, 402 .resume = liquidio_pcie_resume, 403 }; 404 405 static const struct pci_device_id liquidio_pci_tbl[] = { 406 { /* 68xx */ 407 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 408 }, 409 { /* 66xx */ 410 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 411 }, 412 { /* 23xx pf */ 413 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 414 }, 415 { 416 0, 0, 0, 0, 0, 0, 0 417 } 418 }; 419 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 420 421 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); 422 423 static struct pci_driver liquidio_pci_driver = { 424 .name = "LiquidIO", 425 .id_table = liquidio_pci_tbl, 426 .probe = liquidio_probe, 427 .remove = liquidio_remove, 428 .err_handler = &liquidio_err_handler, /* For AER */ 429 .driver.pm = &liquidio_pm_ops, 430 #ifdef CONFIG_PCI_IOV 431 .sriov_configure = liquidio_enable_sriov, 432 #endif 433 }; 434 435 /** 436 * liquidio_init_pci - register PCI driver 437 */ 438 static int liquidio_init_pci(void) 439 { 440 return pci_register_driver(&liquidio_pci_driver); 441 } 442 443 /** 444 * liquidio_deinit_pci - unregister PCI driver 445 */ 446 static void liquidio_deinit_pci(void) 447 { 448 pci_unregister_driver(&liquidio_pci_driver); 449 } 450 451 /** 452 * check_txq_status - Check Tx queue status, and take appropriate action 453 * @lio: per-network private data 454 * Return: 0 if full, number of queues woken up otherwise 455 */ 456 static inline int check_txq_status(struct lio *lio) 457 { 458 int numqs = lio->netdev->real_num_tx_queues; 459 int ret_val = 0; 460 int q, iq; 461 462 /* check each sub-queue state */ 463 for (q = 0; q < numqs; q++) { 464 iq = lio->linfo.txpciq[q % 465 lio->oct_dev->num_iqs].s.q_no; 466 if (octnet_iq_is_full(lio->oct_dev, iq)) 467 continue; 468 if (__netif_subqueue_stopped(lio->netdev, q)) { 469 netif_wake_subqueue(lio->netdev, q); 470 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, 471 tx_restart, 1); 472 ret_val++; 473 } 474 } 475 476 return ret_val; 477 } 478 479 /** 480 * print_link_info - Print link information 481 * @netdev: network device 482 */ 483 static void print_link_info(struct net_device *netdev) 484 { 485 struct lio *lio = GET_LIO(netdev); 486 487 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 488 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 489 struct oct_link_info *linfo = &lio->linfo; 490 491 if (linfo->link.s.link_up) { 492 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 493 linfo->link.s.speed, 494 (linfo->link.s.duplex) ? "Full" : "Half"); 495 } else { 496 netif_info(lio, link, lio->netdev, "Link Down\n"); 497 } 498 } 499 } 500 501 /** 502 * octnet_link_status_change - Routine to notify MTU change 503 * @work: work_struct data structure 504 */ 505 static void octnet_link_status_change(struct work_struct *work) 506 { 507 struct cavium_wk *wk = (struct cavium_wk *)work; 508 struct lio *lio = (struct lio *)wk->ctxptr; 509 510 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 511 * this API is invoked only when new max-MTU of the interface is 512 * less than current MTU. 513 */ 514 rtnl_lock(); 515 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 516 rtnl_unlock(); 517 } 518 519 /** 520 * setup_link_status_change_wq - Sets up the mtu status change work 521 * @netdev: network device 522 */ 523 static inline int setup_link_status_change_wq(struct net_device *netdev) 524 { 525 struct lio *lio = GET_LIO(netdev); 526 struct octeon_device *oct = lio->oct_dev; 527 528 lio->link_status_wq.wq = alloc_workqueue("link-status", 529 WQ_MEM_RECLAIM | WQ_PERCPU, 530 0); 531 if (!lio->link_status_wq.wq) { 532 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 533 return -1; 534 } 535 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 536 octnet_link_status_change); 537 lio->link_status_wq.wk.ctxptr = lio; 538 539 return 0; 540 } 541 542 static inline void cleanup_link_status_change_wq(struct net_device *netdev) 543 { 544 struct lio *lio = GET_LIO(netdev); 545 546 if (lio->link_status_wq.wq) { 547 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 548 destroy_workqueue(lio->link_status_wq.wq); 549 } 550 } 551 552 /** 553 * update_link_status - Update link status 554 * @netdev: network device 555 * @ls: link status structure 556 * 557 * Called on receipt of a link status response from the core application to 558 * update each interface's link status. 559 */ 560 static inline void update_link_status(struct net_device *netdev, 561 union oct_link_status *ls) 562 { 563 struct lio *lio = GET_LIO(netdev); 564 int changed = (lio->linfo.link.u64 != ls->u64); 565 int current_max_mtu = lio->linfo.link.s.mtu; 566 struct octeon_device *oct = lio->oct_dev; 567 568 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n", 569 __func__, lio->linfo.link.u64, ls->u64); 570 lio->linfo.link.u64 = ls->u64; 571 572 if ((lio->intf_open) && (changed)) { 573 print_link_info(netdev); 574 lio->link_changes++; 575 576 if (lio->linfo.link.s.link_up) { 577 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); 578 netif_carrier_on(netdev); 579 wake_txqs(netdev); 580 } else { 581 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); 582 netif_carrier_off(netdev); 583 stop_txqs(netdev); 584 } 585 if (lio->linfo.link.s.mtu != current_max_mtu) { 586 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", 587 current_max_mtu, lio->linfo.link.s.mtu); 588 netdev->max_mtu = lio->linfo.link.s.mtu; 589 } 590 if (lio->linfo.link.s.mtu < netdev->mtu) { 591 dev_warn(&oct->pci_dev->dev, 592 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 593 netdev->mtu, lio->linfo.link.s.mtu); 594 queue_delayed_work(lio->link_status_wq.wq, 595 &lio->link_status_wq.wk.work, 0); 596 } 597 } 598 } 599 600 /** 601 * lio_sync_octeon_time - send latest localtime to octeon firmware so that 602 * firmware will correct it's time, in case there is a time skew 603 * 604 * @work: work scheduled to send time update to octeon firmware 605 **/ 606 static void lio_sync_octeon_time(struct work_struct *work) 607 { 608 struct cavium_wk *wk = (struct cavium_wk *)work; 609 struct lio *lio = (struct lio *)wk->ctxptr; 610 struct octeon_device *oct = lio->oct_dev; 611 struct octeon_soft_command *sc; 612 struct timespec64 ts; 613 struct lio_time *lt; 614 int ret; 615 616 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); 617 if (!sc) { 618 dev_err(&oct->pci_dev->dev, 619 "Failed to sync time to octeon: soft command allocation failed\n"); 620 return; 621 } 622 623 lt = (struct lio_time *)sc->virtdptr; 624 625 /* Get time of the day */ 626 ktime_get_real_ts64(&ts); 627 lt->sec = ts.tv_sec; 628 lt->nsec = ts.tv_nsec; 629 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); 630 631 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 632 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 633 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); 634 635 init_completion(&sc->complete); 636 sc->sc_status = OCTEON_REQUEST_PENDING; 637 638 ret = octeon_send_soft_command(oct, sc); 639 if (ret == IQ_SEND_FAILED) { 640 dev_err(&oct->pci_dev->dev, 641 "Failed to sync time to octeon: failed to send soft command\n"); 642 octeon_free_soft_command(oct, sc); 643 } else { 644 WRITE_ONCE(sc->caller_is_done, true); 645 } 646 647 queue_delayed_work(lio->sync_octeon_time_wq.wq, 648 &lio->sync_octeon_time_wq.wk.work, 649 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 650 } 651 652 /** 653 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware 654 * 655 * @netdev: network device which should send time update to firmware 656 **/ 657 static inline int setup_sync_octeon_time_wq(struct net_device *netdev) 658 { 659 struct lio *lio = GET_LIO(netdev); 660 struct octeon_device *oct = lio->oct_dev; 661 662 lio->sync_octeon_time_wq.wq = 663 alloc_workqueue("update-octeon-time", 664 WQ_MEM_RECLAIM | WQ_PERCPU, 0); 665 if (!lio->sync_octeon_time_wq.wq) { 666 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); 667 return -1; 668 } 669 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, 670 lio_sync_octeon_time); 671 lio->sync_octeon_time_wq.wk.ctxptr = lio; 672 queue_delayed_work(lio->sync_octeon_time_wq.wq, 673 &lio->sync_octeon_time_wq.wk.work, 674 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 675 676 return 0; 677 } 678 679 /** 680 * cleanup_sync_octeon_time_wq - destroy wq 681 * 682 * @netdev: network device which should send time update to firmware 683 * 684 * Stop scheduling and destroy the work created to periodically update local 685 * time to octeon firmware. 686 **/ 687 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) 688 { 689 struct lio *lio = GET_LIO(netdev); 690 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; 691 692 if (time_wq->wq) { 693 cancel_delayed_work_sync(&time_wq->wk.work); 694 destroy_workqueue(time_wq->wq); 695 } 696 } 697 698 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) 699 { 700 struct octeon_device *other_oct; 701 702 other_oct = lio_get_device(oct->octeon_id + 1); 703 704 if (other_oct && other_oct->pci_dev) { 705 int oct_busnum, other_oct_busnum; 706 707 oct_busnum = oct->pci_dev->bus->number; 708 other_oct_busnum = other_oct->pci_dev->bus->number; 709 710 if (oct_busnum == other_oct_busnum) { 711 int oct_slot, other_oct_slot; 712 713 oct_slot = PCI_SLOT(oct->pci_dev->devfn); 714 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); 715 716 if (oct_slot == other_oct_slot) 717 return other_oct; 718 } 719 } 720 721 return NULL; 722 } 723 724 static void disable_all_vf_links(struct octeon_device *oct) 725 { 726 struct net_device *netdev; 727 int max_vfs, vf, i; 728 729 if (!oct) 730 return; 731 732 max_vfs = oct->sriov_info.max_vfs; 733 734 for (i = 0; i < oct->ifcount; i++) { 735 netdev = oct->props[i].netdev; 736 if (!netdev) 737 continue; 738 739 for (vf = 0; vf < max_vfs; vf++) 740 liquidio_set_vf_link_state(netdev, vf, 741 IFLA_VF_LINK_STATE_DISABLE); 742 } 743 } 744 745 static int liquidio_watchdog(void *param) 746 { 747 bool err_msg_was_printed[LIO_MAX_CORES]; 748 u16 mask_of_crashed_or_stuck_cores = 0; 749 bool all_vf_links_are_disabled = false; 750 struct octeon_device *oct = param; 751 struct octeon_device *other_oct; 752 #ifdef CONFIG_MODULE_UNLOAD 753 long refcount, vfs_referencing_pf; 754 u64 vfs_mask1, vfs_mask2; 755 #endif 756 int core; 757 758 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); 759 760 while (!kthread_should_stop()) { 761 /* sleep for a couple of seconds so that we don't hog the CPU */ 762 set_current_state(TASK_INTERRUPTIBLE); 763 schedule_timeout(msecs_to_jiffies(2000)); 764 765 mask_of_crashed_or_stuck_cores = 766 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); 767 768 if (!mask_of_crashed_or_stuck_cores) 769 continue; 770 771 WRITE_ONCE(oct->cores_crashed, true); 772 other_oct = get_other_octeon_device(oct); 773 if (other_oct) 774 WRITE_ONCE(other_oct->cores_crashed, true); 775 776 for (core = 0; core < LIO_MAX_CORES; core++) { 777 bool core_crashed_or_got_stuck; 778 779 core_crashed_or_got_stuck = 780 (mask_of_crashed_or_stuck_cores 781 >> core) & 1; 782 783 if (core_crashed_or_got_stuck && 784 !err_msg_was_printed[core]) { 785 dev_err(&oct->pci_dev->dev, 786 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 787 core); 788 err_msg_was_printed[core] = true; 789 } 790 } 791 792 if (all_vf_links_are_disabled) 793 continue; 794 795 disable_all_vf_links(oct); 796 disable_all_vf_links(other_oct); 797 all_vf_links_are_disabled = true; 798 799 #ifdef CONFIG_MODULE_UNLOAD 800 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); 801 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); 802 803 vfs_referencing_pf = hweight64(vfs_mask1); 804 vfs_referencing_pf += hweight64(vfs_mask2); 805 806 refcount = module_refcount(THIS_MODULE); 807 if (refcount >= vfs_referencing_pf) { 808 while (vfs_referencing_pf) { 809 module_put(THIS_MODULE); 810 vfs_referencing_pf--; 811 } 812 } 813 #endif 814 } 815 816 return 0; 817 } 818 819 /** 820 * liquidio_probe - PCI probe handler 821 * @pdev: PCI device structure 822 * @ent: unused 823 */ 824 static int 825 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent) 826 { 827 struct octeon_device *oct_dev = NULL; 828 struct handshake *hs; 829 830 oct_dev = octeon_allocate_device(pdev->device, 831 sizeof(struct octeon_device_priv)); 832 if (!oct_dev) { 833 dev_err(&pdev->dev, "Unable to allocate device\n"); 834 return -ENOMEM; 835 } 836 837 if (pdev->device == OCTEON_CN23XX_PF_VID) 838 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 839 840 /* Enable PTP for 6XXX Device */ 841 if (((pdev->device == OCTEON_CN66XX) || 842 (pdev->device == OCTEON_CN68XX))) 843 oct_dev->ptp_enable = true; 844 else 845 oct_dev->ptp_enable = false; 846 847 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 848 (u32)pdev->vendor, (u32)pdev->device); 849 850 /* Assign octeon_device for this device to the private data area. */ 851 pci_set_drvdata(pdev, oct_dev); 852 853 /* set linux specific device pointer */ 854 oct_dev->pci_dev = (void *)pdev; 855 856 oct_dev->subsystem_id = pdev->subsystem_vendor | 857 (pdev->subsystem_device << 16); 858 859 hs = &handshake[oct_dev->octeon_id]; 860 init_completion(&hs->init); 861 init_completion(&hs->started); 862 hs->pci_dev = pdev; 863 864 if (oct_dev->octeon_id == 0) 865 /* first LiquidIO NIC is detected */ 866 complete(&first_stage); 867 868 if (octeon_device_init(oct_dev)) { 869 complete(&hs->init); 870 liquidio_remove(pdev); 871 return -ENOMEM; 872 } 873 874 if (OCTEON_CN23XX_PF(oct_dev)) { 875 u8 bus, device, function; 876 877 if (atomic_read(oct_dev->adapter_refcount) == 1) { 878 /* Each NIC gets one watchdog kernel thread. The first 879 * PF (of each NIC) that gets pci_driver->probe()'d 880 * creates that thread. 881 */ 882 bus = pdev->bus->number; 883 device = PCI_SLOT(pdev->devfn); 884 function = PCI_FUNC(pdev->devfn); 885 oct_dev->watchdog_task = kthread_run(liquidio_watchdog, 886 oct_dev, 887 "liowd/%02hhx:%02hhx.%hhx", 888 bus, device, function); 889 if (IS_ERR(oct_dev->watchdog_task)) { 890 oct_dev->watchdog_task = NULL; 891 dev_err(&oct_dev->pci_dev->dev, 892 "failed to create kernel_thread\n"); 893 liquidio_remove(pdev); 894 return -1; 895 } 896 } 897 } 898 899 oct_dev->rx_pause = 1; 900 oct_dev->tx_pause = 1; 901 902 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 903 904 return 0; 905 } 906 907 static bool fw_type_is_auto(void) 908 { 909 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, 910 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; 911 } 912 913 /** 914 * octeon_pci_flr - PCI FLR for each Octeon device. 915 * @oct: octeon device 916 */ 917 static void octeon_pci_flr(struct octeon_device *oct) 918 { 919 int rc; 920 921 pci_save_state(oct->pci_dev); 922 923 pci_cfg_access_lock(oct->pci_dev); 924 925 /* Quiesce the device completely */ 926 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 927 PCI_COMMAND_INTX_DISABLE); 928 929 rc = __pci_reset_function_locked(oct->pci_dev); 930 931 if (rc != 0) 932 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", 933 rc, oct->pf_num); 934 935 pci_cfg_access_unlock(oct->pci_dev); 936 937 pci_restore_state(oct->pci_dev); 938 } 939 940 /** 941 * octeon_destroy_resources - Destroy resources associated with octeon device 942 * @oct: octeon device 943 */ 944 static void octeon_destroy_resources(struct octeon_device *oct) 945 { 946 int i, refcount; 947 struct msix_entry *msix_entries; 948 struct octeon_device_priv *oct_priv = oct->priv; 949 950 struct handshake *hs; 951 952 switch (atomic_read(&oct->status)) { 953 case OCT_DEV_RUNNING: 954 case OCT_DEV_CORE_OK: 955 956 /* No more instructions will be forwarded. */ 957 atomic_set(&oct->status, OCT_DEV_IN_RESET); 958 959 oct->app_mode = CVM_DRV_INVALID_APP; 960 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 961 lio_get_state_string(&oct->status)); 962 963 schedule_timeout_uninterruptible(HZ / 10); 964 965 fallthrough; 966 case OCT_DEV_HOST_OK: 967 968 case OCT_DEV_CONSOLE_INIT_DONE: 969 /* Remove any consoles */ 970 octeon_remove_consoles(oct); 971 972 fallthrough; 973 case OCT_DEV_IO_QUEUES_DONE: 974 if (lio_wait_for_instr_fetch(oct)) 975 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 976 977 if (wait_for_pending_requests(oct)) 978 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 979 980 /* Disable the input and output queues now. No more packets will 981 * arrive from Octeon, but we should wait for all packet 982 * processing to finish. 983 */ 984 oct->fn_list.disable_io_queues(oct); 985 986 if (lio_wait_for_oq_pkts(oct)) 987 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 988 989 /* Force all requests waiting to be fetched by OCTEON to 990 * complete. 991 */ 992 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 993 struct octeon_instr_queue *iq; 994 995 if (!(oct->io_qmask.iq & BIT_ULL(i))) 996 continue; 997 iq = oct->instr_queue[i]; 998 999 if (atomic_read(&iq->instr_pending)) { 1000 spin_lock_bh(&iq->lock); 1001 iq->fill_cnt = 0; 1002 iq->octeon_read_index = iq->host_write_index; 1003 iq->stats.instr_processed += 1004 atomic_read(&iq->instr_pending); 1005 lio_process_iq_request_list(oct, iq, 0); 1006 spin_unlock_bh(&iq->lock); 1007 } 1008 } 1009 1010 lio_process_ordered_list(oct, 1); 1011 octeon_free_sc_done_list(oct); 1012 octeon_free_sc_zombie_list(oct); 1013 1014 fallthrough; 1015 case OCT_DEV_INTR_SET_DONE: 1016 /* Disable interrupts */ 1017 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1018 1019 if (oct->msix_on) { 1020 msix_entries = (struct msix_entry *)oct->msix_entries; 1021 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 1022 if (oct->ioq_vector[i].vector) { 1023 /* clear the affinity_cpumask */ 1024 irq_set_affinity_hint( 1025 msix_entries[i].vector, 1026 NULL); 1027 free_irq(msix_entries[i].vector, 1028 &oct->ioq_vector[i]); 1029 oct->ioq_vector[i].vector = 0; 1030 } 1031 } 1032 /* non-iov vector's argument is oct struct */ 1033 free_irq(msix_entries[i].vector, oct); 1034 1035 pci_disable_msix(oct->pci_dev); 1036 kfree(oct->msix_entries); 1037 oct->msix_entries = NULL; 1038 } else { 1039 /* Release the interrupt line */ 1040 free_irq(oct->pci_dev->irq, oct); 1041 1042 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1043 pci_disable_msi(oct->pci_dev); 1044 } 1045 1046 kfree(oct->irq_name_storage); 1047 oct->irq_name_storage = NULL; 1048 1049 fallthrough; 1050 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1051 if (OCTEON_CN23XX_PF(oct)) 1052 octeon_free_ioq_vector(oct); 1053 1054 fallthrough; 1055 case OCT_DEV_MBOX_SETUP_DONE: 1056 if (OCTEON_CN23XX_PF(oct)) 1057 oct->fn_list.free_mbox(oct); 1058 1059 fallthrough; 1060 case OCT_DEV_IN_RESET: 1061 case OCT_DEV_DROQ_INIT_DONE: 1062 /* Wait for any pending operations */ 1063 mdelay(100); 1064 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1065 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1066 continue; 1067 octeon_delete_droq(oct, i); 1068 } 1069 1070 /* Force any pending handshakes to complete */ 1071 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1072 hs = &handshake[i]; 1073 1074 if (hs->pci_dev) { 1075 handshake[oct->octeon_id].init_ok = 0; 1076 complete(&handshake[oct->octeon_id].init); 1077 handshake[oct->octeon_id].started_ok = 0; 1078 complete(&handshake[oct->octeon_id].started); 1079 } 1080 } 1081 1082 fallthrough; 1083 case OCT_DEV_RESP_LIST_INIT_DONE: 1084 octeon_delete_response_list(oct); 1085 1086 fallthrough; 1087 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1088 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1089 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1090 continue; 1091 octeon_delete_instr_queue(oct, i); 1092 } 1093 #ifdef CONFIG_PCI_IOV 1094 if (oct->sriov_info.sriov_enabled) 1095 pci_disable_sriov(oct->pci_dev); 1096 #endif 1097 fallthrough; 1098 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1099 octeon_free_sc_buffer_pool(oct); 1100 1101 fallthrough; 1102 case OCT_DEV_DISPATCH_INIT_DONE: 1103 octeon_delete_dispatch_list(oct); 1104 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1105 1106 fallthrough; 1107 case OCT_DEV_PCI_MAP_DONE: 1108 refcount = octeon_deregister_device(oct); 1109 1110 /* Soft reset the octeon device before exiting. 1111 * However, if fw was loaded from card (i.e. autoboot), 1112 * perform an FLR instead. 1113 * Implementation note: only soft-reset the device 1114 * if it is a CN6XXX OR the LAST CN23XX device. 1115 */ 1116 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) 1117 octeon_pci_flr(oct); 1118 else if (OCTEON_CN6XXX(oct) || !refcount) 1119 oct->fn_list.soft_reset(oct); 1120 1121 octeon_unmap_pci_barx(oct, 0); 1122 octeon_unmap_pci_barx(oct, 1); 1123 1124 fallthrough; 1125 case OCT_DEV_PCI_ENABLE_DONE: 1126 /* Disable the device, releasing the PCI INT */ 1127 pci_disable_device(oct->pci_dev); 1128 1129 fallthrough; 1130 case OCT_DEV_BEGIN_STATE: 1131 /* Nothing to be done here either */ 1132 break; 1133 } /* end switch (oct->status) */ 1134 1135 tasklet_kill(&oct_priv->droq_tasklet); 1136 } 1137 1138 /** 1139 * send_rx_ctrl_cmd - Send Rx control command 1140 * @lio: per-network private data 1141 * @start_stop: whether to start or stop 1142 */ 1143 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1144 { 1145 struct octeon_soft_command *sc; 1146 union octnet_cmd *ncmd; 1147 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1148 int retval; 1149 1150 if (oct->props[lio->ifidx].rx_on == start_stop) 1151 return 0; 1152 1153 sc = (struct octeon_soft_command *) 1154 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1155 16, 0); 1156 if (!sc) { 1157 netif_info(lio, rx_err, lio->netdev, 1158 "Failed to allocate octeon_soft_command struct\n"); 1159 return -ENOMEM; 1160 } 1161 1162 ncmd = (union octnet_cmd *)sc->virtdptr; 1163 1164 ncmd->u64 = 0; 1165 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1166 ncmd->s.param1 = start_stop; 1167 1168 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1169 1170 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1171 1172 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1173 OPCODE_NIC_CMD, 0, 0, 0); 1174 1175 init_completion(&sc->complete); 1176 sc->sc_status = OCTEON_REQUEST_PENDING; 1177 1178 retval = octeon_send_soft_command(oct, sc); 1179 if (retval == IQ_SEND_FAILED) { 1180 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1181 octeon_free_soft_command(oct, sc); 1182 } else { 1183 /* Sleep on a wait queue till the cond flag indicates that the 1184 * response arrived or timed-out. 1185 */ 1186 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1187 if (retval) 1188 return retval; 1189 1190 oct->props[lio->ifidx].rx_on = start_stop; 1191 WRITE_ONCE(sc->caller_is_done, true); 1192 } 1193 1194 return retval; 1195 } 1196 1197 /** 1198 * liquidio_destroy_nic_device - Destroy NIC device interface 1199 * @oct: octeon device 1200 * @ifidx: which interface to destroy 1201 * 1202 * Cleanup associated with each interface for an Octeon device when NIC 1203 * module is being unloaded or if initialization fails during load. 1204 */ 1205 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1206 { 1207 struct net_device *netdev = oct->props[ifidx].netdev; 1208 struct octeon_device_priv *oct_priv = oct->priv; 1209 struct napi_struct *napi, *n; 1210 struct lio *lio; 1211 1212 if (!netdev) { 1213 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1214 __func__, ifidx); 1215 return; 1216 } 1217 1218 lio = GET_LIO(netdev); 1219 1220 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1221 1222 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1223 liquidio_stop(netdev); 1224 1225 if (oct->props[lio->ifidx].napi_enabled == 1) { 1226 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1227 napi_disable(napi); 1228 1229 oct->props[lio->ifidx].napi_enabled = 0; 1230 1231 if (OCTEON_CN23XX_PF(oct)) 1232 oct->droq[0]->ops.poll_mode = 0; 1233 } 1234 1235 /* Delete NAPI */ 1236 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1237 netif_napi_del(napi); 1238 1239 tasklet_enable(&oct_priv->droq_tasklet); 1240 1241 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1242 unregister_netdev(netdev); 1243 1244 cleanup_sync_octeon_time_wq(netdev); 1245 cleanup_link_status_change_wq(netdev); 1246 1247 cleanup_rx_oom_poll_fn(netdev); 1248 1249 lio_delete_glists(lio); 1250 1251 free_netdev(netdev); 1252 1253 oct->props[ifidx].gmxport = -1; 1254 1255 oct->props[ifidx].netdev = NULL; 1256 } 1257 1258 /** 1259 * liquidio_stop_nic_module - Stop complete NIC functionality 1260 * @oct: octeon device 1261 */ 1262 static int liquidio_stop_nic_module(struct octeon_device *oct) 1263 { 1264 int i, j; 1265 struct lio *lio; 1266 1267 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1268 device_lock(&oct->pci_dev->dev); 1269 if (oct->devlink) { 1270 devlink_unregister(oct->devlink); 1271 devlink_free(oct->devlink); 1272 oct->devlink = NULL; 1273 } 1274 device_unlock(&oct->pci_dev->dev); 1275 1276 if (!oct->ifcount) { 1277 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1278 return 1; 1279 } 1280 1281 spin_lock_bh(&oct->cmd_resp_wqlock); 1282 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1283 spin_unlock_bh(&oct->cmd_resp_wqlock); 1284 1285 lio_vf_rep_destroy(oct); 1286 1287 for (i = 0; i < oct->ifcount; i++) { 1288 lio = GET_LIO(oct->props[i].netdev); 1289 for (j = 0; j < oct->num_oqs; j++) 1290 octeon_unregister_droq_ops(oct, 1291 lio->linfo.rxpciq[j].s.q_no); 1292 } 1293 1294 for (i = 0; i < oct->ifcount; i++) 1295 liquidio_destroy_nic_device(oct, i); 1296 1297 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1298 return 0; 1299 } 1300 1301 /** 1302 * liquidio_remove - Cleans up resources at unload time 1303 * @pdev: PCI device structure 1304 */ 1305 static void liquidio_remove(struct pci_dev *pdev) 1306 { 1307 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1308 1309 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1310 1311 if (oct_dev->watchdog_task) 1312 kthread_stop(oct_dev->watchdog_task); 1313 1314 if (!oct_dev->octeon_id && 1315 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) 1316 lio_vf_rep_modexit(); 1317 1318 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1319 liquidio_stop_nic_module(oct_dev); 1320 1321 /* Reset the octeon device and cleanup all memory allocated for 1322 * the octeon device by driver. 1323 */ 1324 octeon_destroy_resources(oct_dev); 1325 1326 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1327 1328 /* This octeon device has been removed. Update the global 1329 * data structure to reflect this. Free the device structure. 1330 */ 1331 octeon_free_device_mem(oct_dev); 1332 } 1333 1334 /** 1335 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space 1336 * @oct: octeon device 1337 */ 1338 static int octeon_chip_specific_setup(struct octeon_device *oct) 1339 { 1340 u32 dev_id, rev_id; 1341 int ret = 1; 1342 1343 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1344 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1345 oct->rev_id = rev_id & 0xff; 1346 1347 switch (dev_id) { 1348 case OCTEON_CN68XX_PCIID: 1349 oct->chip_id = OCTEON_CN68XX; 1350 ret = lio_setup_cn68xx_octeon_device(oct); 1351 break; 1352 1353 case OCTEON_CN66XX_PCIID: 1354 oct->chip_id = OCTEON_CN66XX; 1355 ret = lio_setup_cn66xx_octeon_device(oct); 1356 break; 1357 1358 case OCTEON_CN23XX_PCIID_PF: 1359 oct->chip_id = OCTEON_CN23XX_PF_VID; 1360 ret = setup_cn23xx_octeon_pf_device(oct); 1361 if (ret) 1362 break; 1363 #ifdef CONFIG_PCI_IOV 1364 if (!ret) 1365 pci_sriov_set_totalvfs(oct->pci_dev, 1366 oct->sriov_info.max_vfs); 1367 #endif 1368 break; 1369 1370 default: 1371 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1372 dev_id); 1373 } 1374 1375 return ret; 1376 } 1377 1378 /** 1379 * octeon_pci_os_setup - PCI initialization for each Octeon device. 1380 * @oct: octeon device 1381 */ 1382 static int octeon_pci_os_setup(struct octeon_device *oct) 1383 { 1384 /* setup PCI stuff first */ 1385 if (pci_enable_device(oct->pci_dev)) { 1386 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1387 return 1; 1388 } 1389 1390 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1391 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1392 pci_disable_device(oct->pci_dev); 1393 return 1; 1394 } 1395 1396 /* Enable PCI DMA Master. */ 1397 pci_set_master(oct->pci_dev); 1398 1399 return 0; 1400 } 1401 1402 /** 1403 * free_netbuf - Unmap and free network buffer 1404 * @buf: buffer 1405 */ 1406 static void free_netbuf(void *buf) 1407 { 1408 struct sk_buff *skb; 1409 struct octnet_buf_free_info *finfo; 1410 struct lio *lio; 1411 1412 finfo = (struct octnet_buf_free_info *)buf; 1413 skb = finfo->skb; 1414 lio = finfo->lio; 1415 1416 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1417 DMA_TO_DEVICE); 1418 1419 tx_buffer_free(skb); 1420 } 1421 1422 /** 1423 * free_netsgbuf - Unmap and free gather buffer 1424 * @buf: buffer 1425 */ 1426 static void free_netsgbuf(void *buf) 1427 { 1428 struct octnet_buf_free_info *finfo; 1429 struct sk_buff *skb; 1430 struct lio *lio; 1431 struct octnic_gather *g; 1432 int i, frags, iq; 1433 1434 finfo = (struct octnet_buf_free_info *)buf; 1435 skb = finfo->skb; 1436 lio = finfo->lio; 1437 g = finfo->g; 1438 frags = skb_shinfo(skb)->nr_frags; 1439 1440 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1441 g->sg[0].ptr[0], (skb->len - skb->data_len), 1442 DMA_TO_DEVICE); 1443 1444 i = 1; 1445 while (frags--) { 1446 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1447 1448 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1449 g->sg[(i >> 2)].ptr[(i & 3)], 1450 skb_frag_size(frag), DMA_TO_DEVICE); 1451 i++; 1452 } 1453 1454 iq = skb_iq(lio->oct_dev, skb); 1455 spin_lock(&lio->glist_lock[iq]); 1456 list_add_tail(&g->list, &lio->glist[iq]); 1457 spin_unlock(&lio->glist_lock[iq]); 1458 1459 tx_buffer_free(skb); 1460 } 1461 1462 /** 1463 * free_netsgbuf_with_resp - Unmap and free gather buffer with response 1464 * @buf: buffer 1465 */ 1466 static void free_netsgbuf_with_resp(void *buf) 1467 { 1468 struct octeon_soft_command *sc; 1469 struct octnet_buf_free_info *finfo; 1470 struct sk_buff *skb; 1471 struct lio *lio; 1472 struct octnic_gather *g; 1473 int i, frags, iq; 1474 1475 sc = (struct octeon_soft_command *)buf; 1476 skb = (struct sk_buff *)sc->callback_arg; 1477 finfo = (struct octnet_buf_free_info *)&skb->cb; 1478 1479 lio = finfo->lio; 1480 g = finfo->g; 1481 frags = skb_shinfo(skb)->nr_frags; 1482 1483 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1484 g->sg[0].ptr[0], (skb->len - skb->data_len), 1485 DMA_TO_DEVICE); 1486 1487 i = 1; 1488 while (frags--) { 1489 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1490 1491 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1492 g->sg[(i >> 2)].ptr[(i & 3)], 1493 skb_frag_size(frag), DMA_TO_DEVICE); 1494 i++; 1495 } 1496 1497 iq = skb_iq(lio->oct_dev, skb); 1498 1499 spin_lock(&lio->glist_lock[iq]); 1500 list_add_tail(&g->list, &lio->glist[iq]); 1501 spin_unlock(&lio->glist_lock[iq]); 1502 1503 /* Don't free the skb yet */ 1504 } 1505 1506 /** 1507 * liquidio_ptp_adjfine - Adjust ptp frequency 1508 * @ptp: PTP clock info 1509 * @scaled_ppm: how much to adjust by, in scaled parts-per-million 1510 * 1511 * Scaled parts per million is ppm with a 16-bit binary fractional field. 1512 */ 1513 static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 1514 { 1515 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1516 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1517 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 1518 u64 comp, delta; 1519 unsigned long flags; 1520 bool neg_adj = false; 1521 1522 if (ppb < 0) { 1523 neg_adj = true; 1524 ppb = -ppb; 1525 } 1526 1527 /* The hardware adds the clock compensation value to the 1528 * PTP clock on every coprocessor clock cycle, so we 1529 * compute the delta in terms of coprocessor clocks. 1530 */ 1531 delta = (u64)ppb << 32; 1532 do_div(delta, oct->coproc_clock_rate); 1533 1534 spin_lock_irqsave(&lio->ptp_lock, flags); 1535 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1536 if (neg_adj) 1537 comp -= delta; 1538 else 1539 comp += delta; 1540 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1541 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1542 1543 return 0; 1544 } 1545 1546 /** 1547 * liquidio_ptp_adjtime - Adjust ptp time 1548 * @ptp: PTP clock info 1549 * @delta: how much to adjust by, in nanosecs 1550 */ 1551 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1552 { 1553 unsigned long flags; 1554 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1555 1556 spin_lock_irqsave(&lio->ptp_lock, flags); 1557 lio->ptp_adjust += delta; 1558 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1559 1560 return 0; 1561 } 1562 1563 /** 1564 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment 1565 * @ptp: PTP clock info 1566 * @ts: timespec 1567 */ 1568 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1569 struct timespec64 *ts) 1570 { 1571 u64 ns; 1572 unsigned long flags; 1573 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1574 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1575 1576 spin_lock_irqsave(&lio->ptp_lock, flags); 1577 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1578 ns += lio->ptp_adjust; 1579 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1580 1581 *ts = ns_to_timespec64(ns); 1582 1583 return 0; 1584 } 1585 1586 /** 1587 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment 1588 * @ptp: PTP clock info 1589 * @ts: timespec 1590 */ 1591 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1592 const struct timespec64 *ts) 1593 { 1594 u64 ns; 1595 unsigned long flags; 1596 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1597 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1598 1599 ns = timespec64_to_ns(ts); 1600 1601 spin_lock_irqsave(&lio->ptp_lock, flags); 1602 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1603 lio->ptp_adjust = 0; 1604 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1605 1606 return 0; 1607 } 1608 1609 /** 1610 * liquidio_ptp_enable - Check if PTP is enabled 1611 * @ptp: PTP clock info 1612 * @rq: request 1613 * @on: is it on 1614 */ 1615 static int 1616 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp, 1617 struct ptp_clock_request __maybe_unused *rq, 1618 int __maybe_unused on) 1619 { 1620 return -EOPNOTSUPP; 1621 } 1622 1623 /** 1624 * oct_ptp_open - Open PTP clock source 1625 * @netdev: network device 1626 */ 1627 static void oct_ptp_open(struct net_device *netdev) 1628 { 1629 struct lio *lio = GET_LIO(netdev); 1630 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1631 1632 spin_lock_init(&lio->ptp_lock); 1633 1634 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 1635 lio->ptp_info.owner = THIS_MODULE; 1636 lio->ptp_info.max_adj = 250000000; 1637 lio->ptp_info.n_alarm = 0; 1638 lio->ptp_info.n_ext_ts = 0; 1639 lio->ptp_info.n_per_out = 0; 1640 lio->ptp_info.pps = 0; 1641 lio->ptp_info.adjfine = liquidio_ptp_adjfine; 1642 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 1643 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 1644 lio->ptp_info.settime64 = liquidio_ptp_settime; 1645 lio->ptp_info.enable = liquidio_ptp_enable; 1646 1647 lio->ptp_adjust = 0; 1648 1649 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 1650 &oct->pci_dev->dev); 1651 1652 if (IS_ERR(lio->ptp_clock)) 1653 lio->ptp_clock = NULL; 1654 } 1655 1656 /** 1657 * liquidio_ptp_init - Init PTP clock 1658 * @oct: octeon device 1659 */ 1660 static void liquidio_ptp_init(struct octeon_device *oct) 1661 { 1662 u64 clock_comp, cfg; 1663 1664 clock_comp = (u64)NSEC_PER_SEC << 32; 1665 do_div(clock_comp, oct->coproc_clock_rate); 1666 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1667 1668 /* Enable */ 1669 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 1670 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 1671 } 1672 1673 /** 1674 * load_firmware - Load firmware to device 1675 * @oct: octeon device 1676 * 1677 * Maps device to firmware filename, requests firmware, and downloads it 1678 */ 1679 static int load_firmware(struct octeon_device *oct) 1680 { 1681 int ret = 0; 1682 const struct firmware *fw; 1683 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 1684 char *tmp_fw_type; 1685 1686 if (fw_type_is_auto()) { 1687 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 1688 strscpy_pad(fw_type, tmp_fw_type, sizeof(fw_type)); 1689 } else { 1690 tmp_fw_type = fw_type; 1691 } 1692 1693 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 1694 octeon_get_conf(oct)->card_name, tmp_fw_type, 1695 LIO_FW_NAME_SUFFIX); 1696 1697 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 1698 if (ret) { 1699 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", 1700 fw_name); 1701 release_firmware(fw); 1702 return ret; 1703 } 1704 1705 ret = octeon_download_firmware(oct, fw->data, fw->size); 1706 1707 release_firmware(fw); 1708 1709 return ret; 1710 } 1711 1712 /** 1713 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status 1714 * @work: work_struct data structure 1715 */ 1716 static void octnet_poll_check_txq_status(struct work_struct *work) 1717 { 1718 struct cavium_wk *wk = (struct cavium_wk *)work; 1719 struct lio *lio = (struct lio *)wk->ctxptr; 1720 1721 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 1722 return; 1723 1724 check_txq_status(lio); 1725 queue_delayed_work(lio->txq_status_wq.wq, 1726 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1727 } 1728 1729 /** 1730 * setup_tx_poll_fn - Sets up the txq poll check 1731 * @netdev: network device 1732 */ 1733 static inline int setup_tx_poll_fn(struct net_device *netdev) 1734 { 1735 struct lio *lio = GET_LIO(netdev); 1736 struct octeon_device *oct = lio->oct_dev; 1737 1738 lio->txq_status_wq.wq = alloc_workqueue("txq-status", 1739 WQ_MEM_RECLAIM | WQ_PERCPU, 0); 1740 if (!lio->txq_status_wq.wq) { 1741 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 1742 return -1; 1743 } 1744 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 1745 octnet_poll_check_txq_status); 1746 lio->txq_status_wq.wk.ctxptr = lio; 1747 queue_delayed_work(lio->txq_status_wq.wq, 1748 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1749 return 0; 1750 } 1751 1752 static inline void cleanup_tx_poll_fn(struct net_device *netdev) 1753 { 1754 struct lio *lio = GET_LIO(netdev); 1755 1756 if (lio->txq_status_wq.wq) { 1757 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 1758 destroy_workqueue(lio->txq_status_wq.wq); 1759 } 1760 } 1761 1762 /** 1763 * liquidio_open - Net device open for LiquidIO 1764 * @netdev: network device 1765 */ 1766 static int liquidio_open(struct net_device *netdev) 1767 { 1768 struct lio *lio = GET_LIO(netdev); 1769 struct octeon_device *oct = lio->oct_dev; 1770 struct octeon_device_priv *oct_priv = oct->priv; 1771 struct napi_struct *napi, *n; 1772 int ret = 0; 1773 1774 if (oct->props[lio->ifidx].napi_enabled == 0) { 1775 tasklet_disable(&oct_priv->droq_tasklet); 1776 1777 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1778 napi_enable(napi); 1779 1780 oct->props[lio->ifidx].napi_enabled = 1; 1781 1782 if (OCTEON_CN23XX_PF(oct)) 1783 oct->droq[0]->ops.poll_mode = 1; 1784 } 1785 1786 if (oct->ptp_enable) 1787 oct_ptp_open(netdev); 1788 1789 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1790 1791 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { 1792 ret = setup_tx_poll_fn(netdev); 1793 if (ret) 1794 goto err_poll; 1795 } 1796 1797 netif_tx_start_all_queues(netdev); 1798 1799 /* Ready for link status updates */ 1800 lio->intf_open = 1; 1801 1802 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1803 1804 /* tell Octeon to start forwarding packets to host */ 1805 ret = send_rx_ctrl_cmd(lio, 1); 1806 if (ret) 1807 goto err_rx_ctrl; 1808 1809 /* start periodical statistics fetch */ 1810 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 1811 lio->stats_wk.ctxptr = lio; 1812 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 1813 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 1814 1815 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 1816 netdev->name); 1817 1818 return 0; 1819 1820 err_rx_ctrl: 1821 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) 1822 cleanup_tx_poll_fn(netdev); 1823 err_poll: 1824 if (lio->ptp_clock) { 1825 ptp_clock_unregister(lio->ptp_clock); 1826 lio->ptp_clock = NULL; 1827 } 1828 1829 if (oct->props[lio->ifidx].napi_enabled == 1) { 1830 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1831 napi_disable(napi); 1832 1833 oct->props[lio->ifidx].napi_enabled = 0; 1834 1835 if (OCTEON_CN23XX_PF(oct)) 1836 oct->droq[0]->ops.poll_mode = 0; 1837 } 1838 1839 return ret; 1840 } 1841 1842 /** 1843 * liquidio_stop - Net device stop for LiquidIO 1844 * @netdev: network device 1845 */ 1846 static int liquidio_stop(struct net_device *netdev) 1847 { 1848 struct lio *lio = GET_LIO(netdev); 1849 struct octeon_device *oct = lio->oct_dev; 1850 struct octeon_device_priv *oct_priv = oct->priv; 1851 struct napi_struct *napi, *n; 1852 int ret = 0; 1853 1854 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1855 1856 /* Stop any link updates */ 1857 lio->intf_open = 0; 1858 1859 stop_txqs(netdev); 1860 1861 /* Inform that netif carrier is down */ 1862 netif_carrier_off(netdev); 1863 netif_tx_disable(netdev); 1864 1865 lio->linfo.link.s.link_up = 0; 1866 lio->link_changes++; 1867 1868 /* Tell Octeon that nic interface is down. */ 1869 ret = send_rx_ctrl_cmd(lio, 0); 1870 if (ret) 1871 return ret; 1872 1873 if (OCTEON_CN23XX_PF(oct)) { 1874 if (!oct->msix_on) 1875 cleanup_tx_poll_fn(netdev); 1876 } else { 1877 cleanup_tx_poll_fn(netdev); 1878 } 1879 1880 cancel_delayed_work_sync(&lio->stats_wk.work); 1881 1882 if (lio->ptp_clock) { 1883 ptp_clock_unregister(lio->ptp_clock); 1884 lio->ptp_clock = NULL; 1885 } 1886 1887 /* Wait for any pending Rx descriptors */ 1888 if (lio_wait_for_clean_oq(oct)) 1889 netif_info(lio, rx_err, lio->netdev, 1890 "Proceeding with stop interface after partial RX desc processing\n"); 1891 1892 if (oct->props[lio->ifidx].napi_enabled == 1) { 1893 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1894 napi_disable(napi); 1895 1896 oct->props[lio->ifidx].napi_enabled = 0; 1897 1898 if (OCTEON_CN23XX_PF(oct)) 1899 oct->droq[0]->ops.poll_mode = 0; 1900 1901 tasklet_enable(&oct_priv->droq_tasklet); 1902 } 1903 1904 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1905 1906 return ret; 1907 } 1908 1909 /** 1910 * get_new_flags - Converts a mask based on net device flags 1911 * @netdev: network device 1912 * 1913 * This routine generates a octnet_ifflags mask from the net device flags 1914 * received from the OS. 1915 */ 1916 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 1917 { 1918 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1919 1920 if (netdev->flags & IFF_PROMISC) 1921 f |= OCTNET_IFFLAG_PROMISC; 1922 1923 if (netdev->flags & IFF_ALLMULTI) 1924 f |= OCTNET_IFFLAG_ALLMULTI; 1925 1926 if (netdev->flags & IFF_MULTICAST) { 1927 f |= OCTNET_IFFLAG_MULTICAST; 1928 1929 /* Accept all multicast addresses if there are more than we 1930 * can handle 1931 */ 1932 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1933 f |= OCTNET_IFFLAG_ALLMULTI; 1934 } 1935 1936 if (netdev->flags & IFF_BROADCAST) 1937 f |= OCTNET_IFFLAG_BROADCAST; 1938 1939 return f; 1940 } 1941 1942 /** 1943 * liquidio_set_mcast_list - Net device set_multicast_list 1944 * @netdev: network device 1945 */ 1946 static void liquidio_set_mcast_list(struct net_device *netdev) 1947 { 1948 struct lio *lio = GET_LIO(netdev); 1949 struct octeon_device *oct = lio->oct_dev; 1950 struct octnic_ctrl_pkt nctrl; 1951 struct netdev_hw_addr *ha; 1952 u64 *mc; 1953 int ret; 1954 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1955 1956 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1957 1958 /* Create a ctrl pkt command to be sent to core app. */ 1959 nctrl.ncmd.u64 = 0; 1960 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1961 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1962 nctrl.ncmd.s.param2 = mc_count; 1963 nctrl.ncmd.s.more = mc_count; 1964 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1965 nctrl.netpndev = (u64)netdev; 1966 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1967 1968 /* copy all the addresses into the udd */ 1969 mc = &nctrl.udd[0]; 1970 netdev_for_each_mc_addr(ha, netdev) { 1971 *mc = 0; 1972 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 1973 /* no need to swap bytes */ 1974 1975 if (++mc > &nctrl.udd[mc_count]) 1976 break; 1977 } 1978 1979 /* Apparently, any activity in this call from the kernel has to 1980 * be atomic. So we won't wait for response. 1981 */ 1982 1983 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1984 if (ret) { 1985 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1986 ret); 1987 } 1988 } 1989 1990 /** 1991 * liquidio_set_mac - Net device set_mac_address 1992 * @netdev: network device 1993 * @p: pointer to sockaddr 1994 */ 1995 static int liquidio_set_mac(struct net_device *netdev, void *p) 1996 { 1997 int ret = 0; 1998 struct lio *lio = GET_LIO(netdev); 1999 struct octeon_device *oct = lio->oct_dev; 2000 struct sockaddr *addr = (struct sockaddr *)p; 2001 struct octnic_ctrl_pkt nctrl; 2002 2003 if (!is_valid_ether_addr(addr->sa_data)) 2004 return -EADDRNOTAVAIL; 2005 2006 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2007 2008 nctrl.ncmd.u64 = 0; 2009 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2010 nctrl.ncmd.s.param1 = 0; 2011 nctrl.ncmd.s.more = 1; 2012 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2013 nctrl.netpndev = (u64)netdev; 2014 2015 nctrl.udd[0] = 0; 2016 /* The MAC Address is presented in network byte order. */ 2017 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2018 2019 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2020 if (ret < 0) { 2021 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2022 return -ENOMEM; 2023 } 2024 2025 if (nctrl.sc_status) { 2026 dev_err(&oct->pci_dev->dev, 2027 "%s: MAC Address change failed. sc return=%x\n", 2028 __func__, nctrl.sc_status); 2029 return -EIO; 2030 } 2031 2032 eth_hw_addr_set(netdev, addr->sa_data); 2033 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2034 2035 return 0; 2036 } 2037 2038 static void 2039 liquidio_get_stats64(struct net_device *netdev, 2040 struct rtnl_link_stats64 *lstats) 2041 { 2042 struct lio *lio = GET_LIO(netdev); 2043 struct octeon_device *oct; 2044 u64 pkts = 0, drop = 0, bytes = 0; 2045 struct oct_droq_stats *oq_stats; 2046 struct oct_iq_stats *iq_stats; 2047 int i, iq_no, oq_no; 2048 2049 oct = lio->oct_dev; 2050 2051 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 2052 return; 2053 2054 for (i = 0; i < oct->num_iqs; i++) { 2055 iq_no = lio->linfo.txpciq[i].s.q_no; 2056 iq_stats = &oct->instr_queue[iq_no]->stats; 2057 pkts += iq_stats->tx_done; 2058 drop += iq_stats->tx_dropped; 2059 bytes += iq_stats->tx_tot_bytes; 2060 } 2061 2062 lstats->tx_packets = pkts; 2063 lstats->tx_bytes = bytes; 2064 lstats->tx_dropped = drop; 2065 2066 pkts = 0; 2067 drop = 0; 2068 bytes = 0; 2069 2070 for (i = 0; i < oct->num_oqs; i++) { 2071 oq_no = lio->linfo.rxpciq[i].s.q_no; 2072 oq_stats = &oct->droq[oq_no]->stats; 2073 pkts += oq_stats->rx_pkts_received; 2074 drop += (oq_stats->rx_dropped + 2075 oq_stats->dropped_nodispatch + 2076 oq_stats->dropped_toomany + 2077 oq_stats->dropped_nomem); 2078 bytes += oq_stats->rx_bytes_received; 2079 } 2080 2081 lstats->rx_bytes = bytes; 2082 lstats->rx_packets = pkts; 2083 lstats->rx_dropped = drop; 2084 2085 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 2086 lstats->collisions = oct->link_stats.fromhost.total_collisions; 2087 2088 /* detailed rx_errors: */ 2089 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 2090 /* recved pkt with crc error */ 2091 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 2092 /* recv'd frame alignment error */ 2093 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 2094 /* recv'r fifo overrun */ 2095 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; 2096 2097 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 2098 lstats->rx_frame_errors + lstats->rx_fifo_errors; 2099 2100 /* detailed tx_errors */ 2101 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 2102 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 2103 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; 2104 2105 lstats->tx_errors = lstats->tx_aborted_errors + 2106 lstats->tx_carrier_errors + 2107 lstats->tx_fifo_errors; 2108 } 2109 2110 static int liquidio_hwtstamp_set(struct net_device *netdev, 2111 struct kernel_hwtstamp_config *conf, 2112 struct netlink_ext_ack *extack) 2113 { 2114 struct lio *lio = GET_LIO(netdev); 2115 2116 if (!lio->oct_dev->ptp_enable) 2117 return -EOPNOTSUPP; 2118 2119 switch (conf->tx_type) { 2120 case HWTSTAMP_TX_ON: 2121 case HWTSTAMP_TX_OFF: 2122 break; 2123 default: 2124 return -ERANGE; 2125 } 2126 2127 switch (conf->rx_filter) { 2128 case HWTSTAMP_FILTER_NONE: 2129 break; 2130 case HWTSTAMP_FILTER_ALL: 2131 case HWTSTAMP_FILTER_SOME: 2132 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2133 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2134 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2135 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2136 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2137 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2138 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2139 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2140 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2141 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2142 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2143 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2144 case HWTSTAMP_FILTER_NTP_ALL: 2145 conf->rx_filter = HWTSTAMP_FILTER_ALL; 2146 break; 2147 default: 2148 return -ERANGE; 2149 } 2150 2151 if (conf->rx_filter == HWTSTAMP_FILTER_ALL) 2152 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2153 2154 else 2155 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2156 2157 return 0; 2158 } 2159 2160 static int liquidio_hwtstamp_get(struct net_device *netdev, 2161 struct kernel_hwtstamp_config *conf) 2162 { 2163 struct lio *lio = GET_LIO(netdev); 2164 2165 /* TX timestamping is technically always on */ 2166 conf->tx_type = HWTSTAMP_TX_ON; 2167 conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ? 2168 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 2169 2170 return 0; 2171 } 2172 2173 /** 2174 * handle_timestamp - handle a Tx timestamp response 2175 * @oct: octeon device 2176 * @status: response status 2177 * @buf: pointer to skb 2178 */ 2179 static void handle_timestamp(struct octeon_device *oct, 2180 u32 status, 2181 void *buf) 2182 { 2183 struct octnet_buf_free_info *finfo; 2184 struct octeon_soft_command *sc; 2185 struct oct_timestamp_resp *resp; 2186 struct lio *lio; 2187 struct sk_buff *skb = (struct sk_buff *)buf; 2188 2189 finfo = (struct octnet_buf_free_info *)skb->cb; 2190 lio = finfo->lio; 2191 sc = finfo->sc; 2192 oct = lio->oct_dev; 2193 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2194 2195 if (status != OCTEON_REQUEST_DONE) { 2196 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2197 CVM_CAST64(status)); 2198 resp->timestamp = 0; 2199 } 2200 2201 octeon_swap_8B_data(&resp->timestamp, 1); 2202 2203 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { 2204 struct skb_shared_hwtstamps ts; 2205 u64 ns = resp->timestamp; 2206 2207 netif_info(lio, tx_done, lio->netdev, 2208 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2209 skb, (unsigned long long)ns); 2210 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2211 skb_tstamp_tx(skb, &ts); 2212 } 2213 2214 octeon_free_soft_command(oct, sc); 2215 tx_buffer_free(skb); 2216 } 2217 2218 /** 2219 * send_nic_timestamp_pkt - Send a data packet that will be timestamped 2220 * @oct: octeon device 2221 * @ndata: pointer to network data 2222 * @finfo: pointer to private network data 2223 * @xmit_more: more is coming 2224 */ 2225 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2226 struct octnic_data_pkt *ndata, 2227 struct octnet_buf_free_info *finfo, 2228 int xmit_more) 2229 { 2230 int retval; 2231 struct octeon_soft_command *sc; 2232 struct lio *lio; 2233 int ring_doorbell; 2234 u32 len; 2235 2236 lio = finfo->lio; 2237 2238 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2239 sizeof(struct oct_timestamp_resp)); 2240 finfo->sc = sc; 2241 2242 if (!sc) { 2243 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2244 return IQ_SEND_FAILED; 2245 } 2246 2247 if (ndata->reqtype == REQTYPE_NORESP_NET) 2248 ndata->reqtype = REQTYPE_RESP_NET; 2249 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2250 ndata->reqtype = REQTYPE_RESP_NET_SG; 2251 2252 sc->callback = handle_timestamp; 2253 sc->callback_arg = finfo->skb; 2254 sc->iq_no = ndata->q_no; 2255 2256 if (OCTEON_CN23XX_PF(oct)) 2257 len = (u32)((struct octeon_instr_ih3 *) 2258 (&sc->cmd.cmd3.ih3))->dlengsz; 2259 else 2260 len = (u32)((struct octeon_instr_ih2 *) 2261 (&sc->cmd.cmd2.ih2))->dlengsz; 2262 2263 ring_doorbell = !xmit_more; 2264 2265 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2266 sc, len, ndata->reqtype); 2267 2268 if (retval == IQ_SEND_FAILED) { 2269 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2270 retval); 2271 octeon_free_soft_command(oct, sc); 2272 } else { 2273 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2274 } 2275 2276 return retval; 2277 } 2278 2279 /** 2280 * liquidio_xmit - Transmit networks packets to the Octeon interface 2281 * @skb: skbuff struct to be passed to network layer. 2282 * @netdev: pointer to network device 2283 * 2284 * Return: whether the packet was transmitted to the device okay or not 2285 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2286 */ 2287 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2288 { 2289 struct lio *lio; 2290 struct octnet_buf_free_info *finfo; 2291 union octnic_cmd_setup cmdsetup; 2292 struct octnic_data_pkt ndata; 2293 struct octeon_device *oct; 2294 struct oct_iq_stats *stats; 2295 struct octeon_instr_irh *irh; 2296 union tx_info *tx_info; 2297 int status = 0; 2298 int q_idx = 0, iq_no = 0; 2299 int j, xmit_more = 0; 2300 u64 dptr = 0; 2301 u32 tag = 0; 2302 2303 lio = GET_LIO(netdev); 2304 oct = lio->oct_dev; 2305 2306 q_idx = skb_iq(oct, skb); 2307 tag = q_idx; 2308 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2309 2310 stats = &oct->instr_queue[iq_no]->stats; 2311 2312 /* Check for all conditions in which the current packet cannot be 2313 * transmitted. 2314 */ 2315 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2316 (!lio->linfo.link.s.link_up) || 2317 (skb->len <= 0)) { 2318 netif_info(lio, tx_err, lio->netdev, 2319 "Transmit failed link_status : %d\n", 2320 lio->linfo.link.s.link_up); 2321 goto lio_xmit_failed; 2322 } 2323 2324 /* Use space in skb->cb to store info used to unmap and 2325 * free the buffers. 2326 */ 2327 finfo = (struct octnet_buf_free_info *)skb->cb; 2328 finfo->lio = lio; 2329 finfo->skb = skb; 2330 finfo->sc = NULL; 2331 2332 /* Prepare the attributes for the data to be passed to OSI. */ 2333 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2334 2335 ndata.buf = (void *)finfo; 2336 2337 ndata.q_no = iq_no; 2338 2339 if (octnet_iq_is_full(oct, ndata.q_no)) { 2340 /* defer sending if queue is full */ 2341 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2342 ndata.q_no); 2343 stats->tx_iq_busy++; 2344 return NETDEV_TX_BUSY; 2345 } 2346 2347 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2348 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); 2349 */ 2350 2351 ndata.datasize = skb->len; 2352 2353 cmdsetup.u64 = 0; 2354 cmdsetup.s.iq_no = iq_no; 2355 2356 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2357 if (skb->encapsulation) { 2358 cmdsetup.s.tnl_csum = 1; 2359 stats->tx_vxlan++; 2360 } else { 2361 cmdsetup.s.transport_csum = 1; 2362 } 2363 } 2364 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2365 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2366 cmdsetup.s.timestamp = 1; 2367 } 2368 2369 if (skb_shinfo(skb)->nr_frags == 0) { 2370 cmdsetup.s.u.datasize = skb->len; 2371 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2372 2373 /* Offload checksum calculation for TCP/UDP packets */ 2374 dptr = dma_map_single(&oct->pci_dev->dev, 2375 skb->data, 2376 skb->len, 2377 DMA_TO_DEVICE); 2378 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2379 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2380 __func__); 2381 stats->tx_dmamap_fail++; 2382 return NETDEV_TX_BUSY; 2383 } 2384 2385 if (OCTEON_CN23XX_PF(oct)) 2386 ndata.cmd.cmd3.dptr = dptr; 2387 else 2388 ndata.cmd.cmd2.dptr = dptr; 2389 finfo->dptr = dptr; 2390 ndata.reqtype = REQTYPE_NORESP_NET; 2391 2392 } else { 2393 int i, frags; 2394 skb_frag_t *frag; 2395 struct octnic_gather *g; 2396 2397 spin_lock(&lio->glist_lock[q_idx]); 2398 g = (struct octnic_gather *) 2399 lio_list_delete_head(&lio->glist[q_idx]); 2400 spin_unlock(&lio->glist_lock[q_idx]); 2401 2402 if (!g) { 2403 netif_info(lio, tx_err, lio->netdev, 2404 "Transmit scatter gather: glist null!\n"); 2405 goto lio_xmit_failed; 2406 } 2407 2408 cmdsetup.s.gather = 1; 2409 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2410 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2411 2412 memset(g->sg, 0, g->sg_size); 2413 2414 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2415 skb->data, 2416 (skb->len - skb->data_len), 2417 DMA_TO_DEVICE); 2418 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2419 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2420 __func__); 2421 stats->tx_dmamap_fail++; 2422 return NETDEV_TX_BUSY; 2423 } 2424 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2425 2426 frags = skb_shinfo(skb)->nr_frags; 2427 i = 1; 2428 while (frags--) { 2429 frag = &skb_shinfo(skb)->frags[i - 1]; 2430 2431 g->sg[(i >> 2)].ptr[(i & 3)] = 2432 skb_frag_dma_map(&oct->pci_dev->dev, 2433 frag, 0, skb_frag_size(frag), 2434 DMA_TO_DEVICE); 2435 2436 if (dma_mapping_error(&oct->pci_dev->dev, 2437 g->sg[i >> 2].ptr[i & 3])) { 2438 dma_unmap_single(&oct->pci_dev->dev, 2439 g->sg[0].ptr[0], 2440 skb->len - skb->data_len, 2441 DMA_TO_DEVICE); 2442 for (j = 1; j < i; j++) { 2443 frag = &skb_shinfo(skb)->frags[j - 1]; 2444 dma_unmap_page(&oct->pci_dev->dev, 2445 g->sg[j >> 2].ptr[j & 3], 2446 skb_frag_size(frag), 2447 DMA_TO_DEVICE); 2448 } 2449 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2450 __func__); 2451 return NETDEV_TX_BUSY; 2452 } 2453 2454 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 2455 (i & 3)); 2456 i++; 2457 } 2458 2459 dptr = g->sg_dma_ptr; 2460 2461 if (OCTEON_CN23XX_PF(oct)) 2462 ndata.cmd.cmd3.dptr = dptr; 2463 else 2464 ndata.cmd.cmd2.dptr = dptr; 2465 finfo->dptr = dptr; 2466 finfo->g = g; 2467 2468 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2469 } 2470 2471 if (OCTEON_CN23XX_PF(oct)) { 2472 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2473 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2474 } else { 2475 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; 2476 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; 2477 } 2478 2479 if (skb_shinfo(skb)->gso_size) { 2480 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2481 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2482 stats->tx_gso++; 2483 } 2484 2485 /* HW insert VLAN tag */ 2486 if (skb_vlan_tag_present(skb)) { 2487 irh->priority = skb_vlan_tag_get(skb) >> 13; 2488 irh->vlan = skb_vlan_tag_get(skb) & 0xfff; 2489 } 2490 2491 xmit_more = netdev_xmit_more(); 2492 2493 if (unlikely(cmdsetup.s.timestamp)) 2494 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2495 else 2496 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2497 if (status == IQ_SEND_FAILED) 2498 goto lio_xmit_failed; 2499 2500 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2501 2502 if (status == IQ_SEND_STOP) 2503 netif_stop_subqueue(netdev, q_idx); 2504 2505 netif_trans_update(netdev); 2506 2507 if (tx_info->s.gso_segs) 2508 stats->tx_done += tx_info->s.gso_segs; 2509 else 2510 stats->tx_done++; 2511 stats->tx_tot_bytes += ndata.datasize; 2512 2513 return NETDEV_TX_OK; 2514 2515 lio_xmit_failed: 2516 stats->tx_dropped++; 2517 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2518 iq_no, stats->tx_dropped); 2519 if (dptr) 2520 dma_unmap_single(&oct->pci_dev->dev, dptr, 2521 ndata.datasize, DMA_TO_DEVICE); 2522 2523 octeon_ring_doorbell_locked(oct, iq_no); 2524 2525 tx_buffer_free(skb); 2526 return NETDEV_TX_OK; 2527 } 2528 2529 /** 2530 * liquidio_tx_timeout - Network device Tx timeout 2531 * @netdev: pointer to network device 2532 * @txqueue: index of the hung transmit queue 2533 */ 2534 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 2535 { 2536 struct lio *lio; 2537 2538 lio = GET_LIO(netdev); 2539 2540 netif_info(lio, tx_err, lio->netdev, 2541 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2542 netdev->stats.tx_dropped); 2543 netif_trans_update(netdev); 2544 wake_txqs(netdev); 2545 } 2546 2547 static int liquidio_vlan_rx_add_vid(struct net_device *netdev, 2548 __be16 proto __attribute__((unused)), 2549 u16 vid) 2550 { 2551 struct lio *lio = GET_LIO(netdev); 2552 struct octeon_device *oct = lio->oct_dev; 2553 struct octnic_ctrl_pkt nctrl; 2554 int ret = 0; 2555 2556 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2557 2558 nctrl.ncmd.u64 = 0; 2559 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2560 nctrl.ncmd.s.param1 = vid; 2561 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2562 nctrl.netpndev = (u64)netdev; 2563 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2564 2565 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2566 if (ret) { 2567 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2568 ret); 2569 if (ret > 0) 2570 ret = -EIO; 2571 } 2572 2573 return ret; 2574 } 2575 2576 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2577 __be16 proto __attribute__((unused)), 2578 u16 vid) 2579 { 2580 struct lio *lio = GET_LIO(netdev); 2581 struct octeon_device *oct = lio->oct_dev; 2582 struct octnic_ctrl_pkt nctrl; 2583 int ret = 0; 2584 2585 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2586 2587 nctrl.ncmd.u64 = 0; 2588 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2589 nctrl.ncmd.s.param1 = vid; 2590 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2591 nctrl.netpndev = (u64)netdev; 2592 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2593 2594 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2595 if (ret) { 2596 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 2597 ret); 2598 if (ret > 0) 2599 ret = -EIO; 2600 } 2601 return ret; 2602 } 2603 2604 /** 2605 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload 2606 * @netdev: pointer to network device 2607 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL 2608 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE 2609 * Returns: SUCCESS or FAILURE 2610 */ 2611 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2612 u8 rx_cmd) 2613 { 2614 struct lio *lio = GET_LIO(netdev); 2615 struct octeon_device *oct = lio->oct_dev; 2616 struct octnic_ctrl_pkt nctrl; 2617 int ret = 0; 2618 2619 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2620 2621 nctrl.ncmd.u64 = 0; 2622 nctrl.ncmd.s.cmd = command; 2623 nctrl.ncmd.s.param1 = rx_cmd; 2624 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2625 nctrl.netpndev = (u64)netdev; 2626 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2627 2628 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2629 if (ret) { 2630 dev_err(&oct->pci_dev->dev, 2631 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 2632 ret); 2633 if (ret > 0) 2634 ret = -EIO; 2635 } 2636 return ret; 2637 } 2638 2639 /** 2640 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware 2641 * @netdev: pointer to network device 2642 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG 2643 * @vxlan_port: VxLAN port to be added or deleted 2644 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD, 2645 * OCTNET_CMD_VXLAN_PORT_DEL 2646 * Return: SUCCESS or FAILURE 2647 */ 2648 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2649 u16 vxlan_port, u8 vxlan_cmd_bit) 2650 { 2651 struct lio *lio = GET_LIO(netdev); 2652 struct octeon_device *oct = lio->oct_dev; 2653 struct octnic_ctrl_pkt nctrl; 2654 int ret = 0; 2655 2656 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2657 2658 nctrl.ncmd.u64 = 0; 2659 nctrl.ncmd.s.cmd = command; 2660 nctrl.ncmd.s.more = vxlan_cmd_bit; 2661 nctrl.ncmd.s.param1 = vxlan_port; 2662 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2663 nctrl.netpndev = (u64)netdev; 2664 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2665 2666 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2667 if (ret) { 2668 dev_err(&oct->pci_dev->dev, 2669 "VxLAN port add/delete failed in core (ret:0x%x)\n", 2670 ret); 2671 if (ret > 0) 2672 ret = -EIO; 2673 } 2674 return ret; 2675 } 2676 2677 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 2678 unsigned int table, unsigned int entry, 2679 struct udp_tunnel_info *ti) 2680 { 2681 return liquidio_vxlan_port_command(netdev, 2682 OCTNET_CMD_VXLAN_PORT_CONFIG, 2683 htons(ti->port), 2684 OCTNET_CMD_VXLAN_PORT_ADD); 2685 } 2686 2687 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 2688 unsigned int table, 2689 unsigned int entry, 2690 struct udp_tunnel_info *ti) 2691 { 2692 return liquidio_vxlan_port_command(netdev, 2693 OCTNET_CMD_VXLAN_PORT_CONFIG, 2694 htons(ti->port), 2695 OCTNET_CMD_VXLAN_PORT_DEL); 2696 } 2697 2698 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 2699 .set_port = liquidio_udp_tunnel_set_port, 2700 .unset_port = liquidio_udp_tunnel_unset_port, 2701 .tables = { 2702 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 2703 }, 2704 }; 2705 2706 /** 2707 * liquidio_fix_features - Net device fix features 2708 * @netdev: pointer to network device 2709 * @request: features requested 2710 * Return: updated features list 2711 */ 2712 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2713 netdev_features_t request) 2714 { 2715 struct lio *lio = netdev_priv(netdev); 2716 2717 if ((request & NETIF_F_RXCSUM) && 2718 !(lio->dev_capability & NETIF_F_RXCSUM)) 2719 request &= ~NETIF_F_RXCSUM; 2720 2721 if ((request & NETIF_F_HW_CSUM) && 2722 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2723 request &= ~NETIF_F_HW_CSUM; 2724 2725 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2726 request &= ~NETIF_F_TSO; 2727 2728 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2729 request &= ~NETIF_F_TSO6; 2730 2731 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2732 request &= ~NETIF_F_LRO; 2733 2734 /*Disable LRO if RXCSUM is off */ 2735 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2736 (lio->dev_capability & NETIF_F_LRO)) 2737 request &= ~NETIF_F_LRO; 2738 2739 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && 2740 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) 2741 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2742 2743 return request; 2744 } 2745 2746 /** 2747 * liquidio_set_features - Net device set features 2748 * @netdev: pointer to network device 2749 * @features: features to enable/disable 2750 */ 2751 static int liquidio_set_features(struct net_device *netdev, 2752 netdev_features_t features) 2753 { 2754 struct lio *lio = netdev_priv(netdev); 2755 2756 if ((features & NETIF_F_LRO) && 2757 (lio->dev_capability & NETIF_F_LRO) && 2758 !(netdev->features & NETIF_F_LRO)) 2759 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2760 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2761 else if (!(features & NETIF_F_LRO) && 2762 (lio->dev_capability & NETIF_F_LRO) && 2763 (netdev->features & NETIF_F_LRO)) 2764 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2765 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2766 2767 /* Sending command to firmware to enable/disable RX checksum 2768 * offload settings using ethtool 2769 */ 2770 if (!(netdev->features & NETIF_F_RXCSUM) && 2771 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2772 (features & NETIF_F_RXCSUM)) 2773 liquidio_set_rxcsum_command(netdev, 2774 OCTNET_CMD_TNL_RX_CSUM_CTL, 2775 OCTNET_CMD_RXCSUM_ENABLE); 2776 else if ((netdev->features & NETIF_F_RXCSUM) && 2777 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2778 !(features & NETIF_F_RXCSUM)) 2779 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2780 OCTNET_CMD_RXCSUM_DISABLE); 2781 2782 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2783 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2784 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2785 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2786 OCTNET_CMD_VLAN_FILTER_ENABLE); 2787 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2788 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2789 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2790 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2791 OCTNET_CMD_VLAN_FILTER_DISABLE); 2792 2793 return 0; 2794 } 2795 2796 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, 2797 u8 *mac, bool is_admin_assigned) 2798 { 2799 struct lio *lio = GET_LIO(netdev); 2800 struct octeon_device *oct = lio->oct_dev; 2801 struct octnic_ctrl_pkt nctrl; 2802 int ret = 0; 2803 2804 if (!is_valid_ether_addr(mac)) 2805 return -EINVAL; 2806 2807 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) 2808 return -EINVAL; 2809 2810 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2811 2812 nctrl.ncmd.u64 = 0; 2813 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2814 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 2815 nctrl.ncmd.s.param1 = vfidx + 1; 2816 nctrl.ncmd.s.more = 1; 2817 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2818 nctrl.netpndev = (u64)netdev; 2819 if (is_admin_assigned) { 2820 nctrl.ncmd.s.param2 = true; 2821 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2822 } 2823 2824 nctrl.udd[0] = 0; 2825 /* The MAC Address is presented in network byte order. */ 2826 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); 2827 2828 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; 2829 2830 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2831 if (ret > 0) 2832 ret = -EIO; 2833 2834 return ret; 2835 } 2836 2837 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) 2838 { 2839 struct lio *lio = GET_LIO(netdev); 2840 struct octeon_device *oct = lio->oct_dev; 2841 int retval; 2842 2843 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2844 return -EINVAL; 2845 2846 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); 2847 if (!retval) 2848 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); 2849 2850 return retval; 2851 } 2852 2853 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, 2854 bool enable) 2855 { 2856 struct lio *lio = GET_LIO(netdev); 2857 struct octeon_device *oct = lio->oct_dev; 2858 struct octnic_ctrl_pkt nctrl; 2859 int retval; 2860 2861 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { 2862 netif_info(lio, drv, lio->netdev, 2863 "firmware does not support spoofchk\n"); 2864 return -EOPNOTSUPP; 2865 } 2866 2867 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 2868 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 2869 return -EINVAL; 2870 } 2871 2872 if (enable) { 2873 if (oct->sriov_info.vf_spoofchk[vfidx]) 2874 return 0; 2875 } else { 2876 /* Clear */ 2877 if (!oct->sriov_info.vf_spoofchk[vfidx]) 2878 return 0; 2879 } 2880 2881 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2882 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; 2883 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; 2884 nctrl.ncmd.s.param1 = 2885 vfidx + 1; /* vfidx is 0 based, 2886 * but vf_num (param1) is 1 based 2887 */ 2888 nctrl.ncmd.s.param2 = enable; 2889 nctrl.ncmd.s.more = 0; 2890 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2891 nctrl.cb_fn = NULL; 2892 2893 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2894 2895 if (retval) { 2896 netif_info(lio, drv, lio->netdev, 2897 "Failed to set VF %d spoofchk %s\n", vfidx, 2898 enable ? "on" : "off"); 2899 return -1; 2900 } 2901 2902 oct->sriov_info.vf_spoofchk[vfidx] = enable; 2903 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, 2904 enable ? "on" : "off"); 2905 2906 return 0; 2907 } 2908 2909 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, 2910 u16 vlan, u8 qos, __be16 vlan_proto) 2911 { 2912 struct lio *lio = GET_LIO(netdev); 2913 struct octeon_device *oct = lio->oct_dev; 2914 struct octnic_ctrl_pkt nctrl; 2915 u16 vlantci; 2916 int ret = 0; 2917 2918 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2919 return -EINVAL; 2920 2921 if (vlan_proto != htons(ETH_P_8021Q)) 2922 return -EPROTONOSUPPORT; 2923 2924 if (vlan >= VLAN_N_VID || qos > 7) 2925 return -EINVAL; 2926 2927 if (vlan) 2928 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; 2929 else 2930 vlantci = 0; 2931 2932 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) 2933 return 0; 2934 2935 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2936 2937 if (vlan) 2938 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2939 else 2940 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2941 2942 nctrl.ncmd.s.param1 = vlantci; 2943 nctrl.ncmd.s.param2 = 2944 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ 2945 nctrl.ncmd.s.more = 0; 2946 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2947 nctrl.cb_fn = NULL; 2948 2949 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2950 if (ret) { 2951 if (ret > 0) 2952 ret = -EIO; 2953 return ret; 2954 } 2955 2956 oct->sriov_info.vf_vlantci[vfidx] = vlantci; 2957 2958 return ret; 2959 } 2960 2961 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, 2962 struct ifla_vf_info *ivi) 2963 { 2964 struct lio *lio = GET_LIO(netdev); 2965 struct octeon_device *oct = lio->oct_dev; 2966 u8 *macaddr; 2967 2968 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2969 return -EINVAL; 2970 2971 memset(ivi, 0, sizeof(struct ifla_vf_info)); 2972 2973 ivi->vf = vfidx; 2974 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; 2975 ether_addr_copy(&ivi->mac[0], macaddr); 2976 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; 2977 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; 2978 if (oct->sriov_info.trusted_vf.active && 2979 oct->sriov_info.trusted_vf.id == vfidx) 2980 ivi->trusted = true; 2981 else 2982 ivi->trusted = false; 2983 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; 2984 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; 2985 ivi->max_tx_rate = lio->linfo.link.s.speed; 2986 ivi->min_tx_rate = 0; 2987 2988 return 0; 2989 } 2990 2991 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) 2992 { 2993 struct octeon_device *oct = lio->oct_dev; 2994 struct octeon_soft_command *sc; 2995 int retval; 2996 2997 sc = octeon_alloc_soft_command(oct, 0, 16, 0); 2998 if (!sc) 2999 return -ENOMEM; 3000 3001 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 3002 3003 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3004 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 3005 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, 3006 trusted); 3007 3008 init_completion(&sc->complete); 3009 sc->sc_status = OCTEON_REQUEST_PENDING; 3010 3011 retval = octeon_send_soft_command(oct, sc); 3012 if (retval == IQ_SEND_FAILED) { 3013 octeon_free_soft_command(oct, sc); 3014 retval = -1; 3015 } else { 3016 /* Wait for response or timeout */ 3017 retval = wait_for_sc_completion_timeout(oct, sc, 0); 3018 if (retval) 3019 return (retval); 3020 3021 WRITE_ONCE(sc->caller_is_done, true); 3022 } 3023 3024 return retval; 3025 } 3026 3027 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, 3028 bool setting) 3029 { 3030 struct lio *lio = GET_LIO(netdev); 3031 struct octeon_device *oct = lio->oct_dev; 3032 3033 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) { 3034 /* trusted vf is not supported by firmware older than 1.7.1 */ 3035 return -EOPNOTSUPP; 3036 } 3037 3038 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 3039 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 3040 return -EINVAL; 3041 } 3042 3043 if (setting) { 3044 /* Set */ 3045 3046 if (oct->sriov_info.trusted_vf.active && 3047 oct->sriov_info.trusted_vf.id == vfidx) 3048 return 0; 3049 3050 if (oct->sriov_info.trusted_vf.active) { 3051 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n"); 3052 return -EPERM; 3053 } 3054 } else { 3055 /* Clear */ 3056 3057 if (!oct->sriov_info.trusted_vf.active) 3058 return 0; 3059 } 3060 3061 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) { 3062 if (setting) { 3063 oct->sriov_info.trusted_vf.id = vfidx; 3064 oct->sriov_info.trusted_vf.active = true; 3065 } else { 3066 oct->sriov_info.trusted_vf.active = false; 3067 } 3068 3069 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx, 3070 setting ? "" : "not "); 3071 } else { 3072 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n"); 3073 return -1; 3074 } 3075 3076 return 0; 3077 } 3078 3079 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 3080 int linkstate) 3081 { 3082 struct lio *lio = GET_LIO(netdev); 3083 struct octeon_device *oct = lio->oct_dev; 3084 struct octnic_ctrl_pkt nctrl; 3085 int ret = 0; 3086 3087 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3088 return -EINVAL; 3089 3090 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) 3091 return 0; 3092 3093 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3094 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; 3095 nctrl.ncmd.s.param1 = 3096 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3097 nctrl.ncmd.s.param2 = linkstate; 3098 nctrl.ncmd.s.more = 0; 3099 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3100 nctrl.cb_fn = NULL; 3101 3102 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 3103 3104 if (!ret) 3105 oct->sriov_info.vf_linkstate[vfidx] = linkstate; 3106 else if (ret > 0) 3107 ret = -EIO; 3108 3109 return ret; 3110 } 3111 3112 static int 3113 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3114 { 3115 struct lio_devlink_priv *priv; 3116 struct octeon_device *oct; 3117 3118 priv = devlink_priv(devlink); 3119 oct = priv->oct; 3120 3121 *mode = oct->eswitch_mode; 3122 3123 return 0; 3124 } 3125 3126 static int 3127 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, 3128 struct netlink_ext_ack *extack) 3129 { 3130 struct lio_devlink_priv *priv; 3131 struct octeon_device *oct; 3132 int ret = 0; 3133 3134 priv = devlink_priv(devlink); 3135 oct = priv->oct; 3136 3137 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) 3138 return -EINVAL; 3139 3140 if (oct->eswitch_mode == mode) 3141 return 0; 3142 3143 switch (mode) { 3144 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3145 oct->eswitch_mode = mode; 3146 ret = lio_vf_rep_create(oct); 3147 break; 3148 3149 case DEVLINK_ESWITCH_MODE_LEGACY: 3150 lio_vf_rep_destroy(oct); 3151 oct->eswitch_mode = mode; 3152 break; 3153 3154 default: 3155 ret = -EINVAL; 3156 } 3157 3158 return ret; 3159 } 3160 3161 static const struct devlink_ops liquidio_devlink_ops = { 3162 .eswitch_mode_get = liquidio_eswitch_mode_get, 3163 .eswitch_mode_set = liquidio_eswitch_mode_set, 3164 }; 3165 3166 static int 3167 liquidio_get_port_parent_id(struct net_device *dev, 3168 struct netdev_phys_item_id *ppid) 3169 { 3170 struct lio *lio = GET_LIO(dev); 3171 struct octeon_device *oct = lio->oct_dev; 3172 3173 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 3174 return -EOPNOTSUPP; 3175 3176 ppid->id_len = ETH_ALEN; 3177 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); 3178 3179 return 0; 3180 } 3181 3182 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, 3183 struct ifla_vf_stats *vf_stats) 3184 { 3185 struct lio *lio = GET_LIO(netdev); 3186 struct octeon_device *oct = lio->oct_dev; 3187 struct oct_vf_stats stats; 3188 int ret; 3189 3190 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3191 return -EINVAL; 3192 3193 memset(&stats, 0, sizeof(struct oct_vf_stats)); 3194 ret = cn23xx_get_vf_stats(oct, vfidx, &stats); 3195 if (!ret) { 3196 vf_stats->rx_packets = stats.rx_packets; 3197 vf_stats->tx_packets = stats.tx_packets; 3198 vf_stats->rx_bytes = stats.rx_bytes; 3199 vf_stats->tx_bytes = stats.tx_bytes; 3200 vf_stats->broadcast = stats.broadcast; 3201 vf_stats->multicast = stats.multicast; 3202 } 3203 3204 return ret; 3205 } 3206 3207 static const struct net_device_ops lionetdevops = { 3208 .ndo_open = liquidio_open, 3209 .ndo_stop = liquidio_stop, 3210 .ndo_start_xmit = liquidio_xmit, 3211 .ndo_get_stats64 = liquidio_get_stats64, 3212 .ndo_set_mac_address = liquidio_set_mac, 3213 .ndo_set_rx_mode = liquidio_set_mcast_list, 3214 .ndo_tx_timeout = liquidio_tx_timeout, 3215 3216 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 3217 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 3218 .ndo_change_mtu = liquidio_change_mtu, 3219 .ndo_fix_features = liquidio_fix_features, 3220 .ndo_set_features = liquidio_set_features, 3221 .ndo_set_vf_mac = liquidio_set_vf_mac, 3222 .ndo_set_vf_vlan = liquidio_set_vf_vlan, 3223 .ndo_get_vf_config = liquidio_get_vf_config, 3224 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, 3225 .ndo_set_vf_trust = liquidio_set_vf_trust, 3226 .ndo_set_vf_link_state = liquidio_set_vf_link_state, 3227 .ndo_get_vf_stats = liquidio_get_vf_stats, 3228 .ndo_get_port_parent_id = liquidio_get_port_parent_id, 3229 .ndo_hwtstamp_get = liquidio_hwtstamp_get, 3230 .ndo_hwtstamp_set = liquidio_hwtstamp_set, 3231 }; 3232 3233 /** 3234 * liquidio_init - Entry point for the liquidio module 3235 */ 3236 static int __init liquidio_init(void) 3237 { 3238 int i; 3239 struct handshake *hs; 3240 3241 init_completion(&first_stage); 3242 3243 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); 3244 3245 if (liquidio_init_pci()) 3246 return -EINVAL; 3247 3248 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3249 3250 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3251 hs = &handshake[i]; 3252 if (hs->pci_dev) { 3253 wait_for_completion(&hs->init); 3254 if (!hs->init_ok) { 3255 /* init handshake failed */ 3256 dev_err(&hs->pci_dev->dev, 3257 "Failed to init device\n"); 3258 liquidio_deinit_pci(); 3259 return -EIO; 3260 } 3261 } 3262 } 3263 3264 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3265 hs = &handshake[i]; 3266 if (hs->pci_dev) { 3267 wait_for_completion_timeout(&hs->started, 3268 msecs_to_jiffies(30000)); 3269 if (!hs->started_ok) { 3270 /* starter handshake failed */ 3271 dev_err(&hs->pci_dev->dev, 3272 "Firmware failed to start\n"); 3273 liquidio_deinit_pci(); 3274 return -EIO; 3275 } 3276 } 3277 } 3278 3279 return 0; 3280 } 3281 3282 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3283 { 3284 struct octeon_device *oct = (struct octeon_device *)buf; 3285 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3286 int gmxport = 0; 3287 union oct_link_status *ls; 3288 int i; 3289 3290 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 3291 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3292 recv_pkt->buffer_size[0], 3293 recv_pkt->rh.r_nic_info.gmxport); 3294 goto nic_info_err; 3295 } 3296 3297 gmxport = recv_pkt->rh.r_nic_info.gmxport; 3298 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 3299 OCT_DROQ_INFO_SIZE); 3300 3301 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3302 for (i = 0; i < oct->ifcount; i++) { 3303 if (oct->props[i].gmxport == gmxport) { 3304 update_link_status(oct->props[i].netdev, ls); 3305 break; 3306 } 3307 } 3308 3309 nic_info_err: 3310 for (i = 0; i < recv_pkt->buffer_count; i++) 3311 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3312 octeon_free_recv_info(recv_info); 3313 return 0; 3314 } 3315 3316 /** 3317 * setup_nic_devices - Setup network interfaces 3318 * @octeon_dev: octeon device 3319 * 3320 * Called during init time for each device. It assumes the NIC 3321 * is already up and running. The link information for each 3322 * interface is passed in link_info. 3323 */ 3324 static int setup_nic_devices(struct octeon_device *octeon_dev) 3325 { 3326 struct lio *lio = NULL; 3327 struct net_device *netdev; 3328 u8 mac[6], i, j, *fw_ver, *micro_ver; 3329 unsigned long micro; 3330 u32 cur_ver; 3331 struct octeon_soft_command *sc; 3332 struct liquidio_if_cfg_resp *resp; 3333 struct octdev_props *props; 3334 int retval, num_iqueues, num_oqueues; 3335 int max_num_queues = 0; 3336 union oct_nic_if_cfg if_cfg; 3337 unsigned int base_queue; 3338 unsigned int gmx_port_id; 3339 u32 resp_size, data_size; 3340 u32 ifidx_or_pfnum; 3341 struct lio_version *vdata; 3342 struct devlink *devlink; 3343 struct lio_devlink_priv *lio_devlink; 3344 3345 /* This is to handle link status changes */ 3346 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3347 OPCODE_NIC_INFO, 3348 lio_nic_info, octeon_dev); 3349 3350 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3351 * They are handled directly. 3352 */ 3353 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3354 free_netbuf); 3355 3356 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3357 free_netsgbuf); 3358 3359 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3360 free_netsgbuf_with_resp); 3361 3362 for (i = 0; i < octeon_dev->ifcount; i++) { 3363 resp_size = sizeof(struct liquidio_if_cfg_resp); 3364 data_size = sizeof(struct lio_version); 3365 sc = (struct octeon_soft_command *) 3366 octeon_alloc_soft_command(octeon_dev, data_size, 3367 resp_size, 0); 3368 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3369 vdata = (struct lio_version *)sc->virtdptr; 3370 3371 *((u64 *)vdata) = 0; 3372 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 3373 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 3374 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 3375 3376 if (OCTEON_CN23XX_PF(octeon_dev)) { 3377 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 3378 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 3379 base_queue = octeon_dev->sriov_info.pf_srn; 3380 3381 gmx_port_id = octeon_dev->pf_num; 3382 ifidx_or_pfnum = octeon_dev->pf_num; 3383 } else { 3384 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( 3385 octeon_get_conf(octeon_dev), i); 3386 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( 3387 octeon_get_conf(octeon_dev), i); 3388 base_queue = CFG_GET_BASE_QUE_NIC_IF( 3389 octeon_get_conf(octeon_dev), i); 3390 gmx_port_id = CFG_GET_GMXID_NIC_IF( 3391 octeon_get_conf(octeon_dev), i); 3392 ifidx_or_pfnum = i; 3393 } 3394 3395 dev_dbg(&octeon_dev->pci_dev->dev, 3396 "requesting config for interface %d, iqs %d, oqs %d\n", 3397 ifidx_or_pfnum, num_iqueues, num_oqueues); 3398 3399 if_cfg.u64 = 0; 3400 if_cfg.s.num_iqueues = num_iqueues; 3401 if_cfg.s.num_oqueues = num_oqueues; 3402 if_cfg.s.base_queue = base_queue; 3403 if_cfg.s.gmx_port_id = gmx_port_id; 3404 3405 sc->iq_no = 0; 3406 3407 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3408 OPCODE_NIC_IF_CFG, 0, 3409 if_cfg.u64, 0); 3410 3411 init_completion(&sc->complete); 3412 sc->sc_status = OCTEON_REQUEST_PENDING; 3413 3414 retval = octeon_send_soft_command(octeon_dev, sc); 3415 if (retval == IQ_SEND_FAILED) { 3416 dev_err(&octeon_dev->pci_dev->dev, 3417 "iq/oq config failed status: %x\n", 3418 retval); 3419 /* Soft instr is freed by driver in case of failure. */ 3420 octeon_free_soft_command(octeon_dev, sc); 3421 return(-EIO); 3422 } 3423 3424 /* Sleep on a wait queue till the cond flag indicates that the 3425 * response arrived or timed-out. 3426 */ 3427 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 3428 if (retval) 3429 return retval; 3430 3431 retval = resp->status; 3432 if (retval) { 3433 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3434 WRITE_ONCE(sc->caller_is_done, true); 3435 goto setup_nic_dev_done; 3436 } 3437 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 3438 32, "%s", 3439 resp->cfg_info.liquidio_firmware_version); 3440 3441 /* Verify f/w version (in case of 'auto' loading from flash) */ 3442 fw_ver = octeon_dev->fw_info.liquidio_firmware_version; 3443 if (memcmp(LIQUIDIO_BASE_VERSION, 3444 fw_ver, 3445 strlen(LIQUIDIO_BASE_VERSION))) { 3446 dev_err(&octeon_dev->pci_dev->dev, 3447 "Unmatched firmware version. Expected %s.x, got %s.\n", 3448 LIQUIDIO_BASE_VERSION, fw_ver); 3449 WRITE_ONCE(sc->caller_is_done, true); 3450 goto setup_nic_dev_done; 3451 } else if (atomic_read(octeon_dev->adapter_fw_state) == 3452 FW_IS_PRELOADED) { 3453 dev_info(&octeon_dev->pci_dev->dev, 3454 "Using auto-loaded firmware version %s.\n", 3455 fw_ver); 3456 } 3457 3458 /* extract micro version field; point past '<maj>.<min>.' */ 3459 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; 3460 if (kstrtoul(micro_ver, 10, µ) != 0) 3461 micro = 0; 3462 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; 3463 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; 3464 octeon_dev->fw_info.ver.rev = micro; 3465 3466 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3467 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3468 3469 num_iqueues = hweight64(resp->cfg_info.iqmask); 3470 num_oqueues = hweight64(resp->cfg_info.oqmask); 3471 3472 if (!(num_iqueues) || !(num_oqueues)) { 3473 dev_err(&octeon_dev->pci_dev->dev, 3474 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3475 resp->cfg_info.iqmask, 3476 resp->cfg_info.oqmask); 3477 WRITE_ONCE(sc->caller_is_done, true); 3478 goto setup_nic_dev_done; 3479 } 3480 3481 if (OCTEON_CN6XXX(octeon_dev)) { 3482 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3483 cn6xxx)); 3484 } else if (OCTEON_CN23XX_PF(octeon_dev)) { 3485 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3486 cn23xx_pf)); 3487 } 3488 3489 dev_dbg(&octeon_dev->pci_dev->dev, 3490 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", 3491 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3492 num_iqueues, num_oqueues, max_num_queues); 3493 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); 3494 3495 if (!netdev) { 3496 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3497 WRITE_ONCE(sc->caller_is_done, true); 3498 goto setup_nic_dev_done; 3499 } 3500 3501 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 3502 3503 /* Associate the routines that will handle different 3504 * netdev tasks. 3505 */ 3506 netdev->netdev_ops = &lionetdevops; 3507 3508 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3509 if (retval) { 3510 dev_err(&octeon_dev->pci_dev->dev, 3511 "setting real number rx failed\n"); 3512 WRITE_ONCE(sc->caller_is_done, true); 3513 goto setup_nic_dev_free; 3514 } 3515 3516 retval = netif_set_real_num_tx_queues(netdev, num_iqueues); 3517 if (retval) { 3518 dev_err(&octeon_dev->pci_dev->dev, 3519 "setting real number tx failed\n"); 3520 WRITE_ONCE(sc->caller_is_done, true); 3521 goto setup_nic_dev_free; 3522 } 3523 3524 lio = GET_LIO(netdev); 3525 3526 memset(lio, 0, sizeof(struct lio)); 3527 3528 lio->ifidx = ifidx_or_pfnum; 3529 3530 props = &octeon_dev->props[i]; 3531 props->gmxport = resp->cfg_info.linfo.gmxport; 3532 props->netdev = netdev; 3533 3534 lio->linfo.num_rxpciq = num_oqueues; 3535 lio->linfo.num_txpciq = num_iqueues; 3536 for (j = 0; j < num_oqueues; j++) { 3537 lio->linfo.rxpciq[j].u64 = 3538 resp->cfg_info.linfo.rxpciq[j].u64; 3539 } 3540 for (j = 0; j < num_iqueues; j++) { 3541 lio->linfo.txpciq[j].u64 = 3542 resp->cfg_info.linfo.txpciq[j].u64; 3543 } 3544 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3545 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3546 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3547 3548 WRITE_ONCE(sc->caller_is_done, true); 3549 3550 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3551 3552 if (OCTEON_CN23XX_PF(octeon_dev) || 3553 OCTEON_CN6XXX(octeon_dev)) { 3554 lio->dev_capability = NETIF_F_HIGHDMA 3555 | NETIF_F_IP_CSUM 3556 | NETIF_F_IPV6_CSUM 3557 | NETIF_F_SG | NETIF_F_RXCSUM 3558 | NETIF_F_GRO 3559 | NETIF_F_TSO | NETIF_F_TSO6 3560 | NETIF_F_LRO; 3561 } 3562 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3563 3564 /* Copy of transmit encapsulation capabilities: 3565 * TSO, TSO6, Checksums for this device 3566 */ 3567 lio->enc_dev_capability = NETIF_F_IP_CSUM 3568 | NETIF_F_IPV6_CSUM 3569 | NETIF_F_GSO_UDP_TUNNEL 3570 | NETIF_F_HW_CSUM | NETIF_F_SG 3571 | NETIF_F_RXCSUM 3572 | NETIF_F_TSO | NETIF_F_TSO6 3573 | NETIF_F_LRO; 3574 3575 netdev->hw_enc_features = (lio->enc_dev_capability & 3576 ~NETIF_F_LRO); 3577 3578 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 3579 3580 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; 3581 3582 netdev->vlan_features = lio->dev_capability; 3583 /* Add any unchangeable hw features */ 3584 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 3585 NETIF_F_HW_VLAN_CTAG_RX | 3586 NETIF_F_HW_VLAN_CTAG_TX; 3587 3588 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 3589 3590 netdev->hw_features = lio->dev_capability; 3591 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ 3592 netdev->hw_features = netdev->hw_features & 3593 ~NETIF_F_HW_VLAN_CTAG_RX; 3594 3595 /* MTU range: 68 - 16000 */ 3596 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3597 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3598 3599 /* Point to the properties for octeon device to which this 3600 * interface belongs. 3601 */ 3602 lio->oct_dev = octeon_dev; 3603 lio->octprops = props; 3604 lio->netdev = netdev; 3605 3606 dev_dbg(&octeon_dev->pci_dev->dev, 3607 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3608 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3609 3610 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { 3611 u8 vfmac[ETH_ALEN]; 3612 3613 eth_random_addr(vfmac); 3614 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { 3615 dev_err(&octeon_dev->pci_dev->dev, 3616 "Error setting VF%d MAC address\n", 3617 j); 3618 goto setup_nic_dev_free; 3619 } 3620 } 3621 3622 /* 64-bit swap required on LE machines */ 3623 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3624 for (j = 0; j < 6; j++) 3625 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3626 3627 /* Copy MAC Address to OS network device structure */ 3628 3629 eth_hw_addr_set(netdev, mac); 3630 3631 /* By default all interfaces on a single Octeon uses the same 3632 * tx and rx queues 3633 */ 3634 lio->txq = lio->linfo.txpciq[0].s.q_no; 3635 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 3636 if (liquidio_setup_io_queues(octeon_dev, i, 3637 lio->linfo.num_txpciq, 3638 lio->linfo.num_rxpciq)) { 3639 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3640 goto setup_nic_dev_free; 3641 } 3642 3643 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3644 3645 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3646 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3647 3648 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 3649 dev_err(&octeon_dev->pci_dev->dev, 3650 "Gather list allocation failed\n"); 3651 goto setup_nic_dev_free; 3652 } 3653 3654 /* Register ethtool support */ 3655 liquidio_set_ethtool_ops(netdev); 3656 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) 3657 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 3658 else 3659 octeon_dev->priv_flags = 0x0; 3660 3661 if (netdev->features & NETIF_F_LRO) 3662 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3663 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3664 3665 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3666 OCTNET_CMD_VLAN_FILTER_ENABLE); 3667 3668 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3669 liquidio_set_feature(netdev, 3670 OCTNET_CMD_VERBOSE_ENABLE, 0); 3671 3672 if (setup_link_status_change_wq(netdev)) 3673 goto setup_nic_dev_free; 3674 3675 if ((octeon_dev->fw_info.app_cap_flags & 3676 LIQUIDIO_TIME_SYNC_CAP) && 3677 setup_sync_octeon_time_wq(netdev)) 3678 goto setup_nic_dev_free; 3679 3680 if (setup_rx_oom_poll_fn(netdev)) 3681 goto setup_nic_dev_free; 3682 3683 /* Register the network device with the OS */ 3684 if (register_netdev(netdev)) { 3685 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3686 goto setup_nic_dev_free; 3687 } 3688 3689 dev_dbg(&octeon_dev->pci_dev->dev, 3690 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3691 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3692 netif_carrier_off(netdev); 3693 lio->link_changes++; 3694 3695 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3696 3697 /* Sending command to firmware to enable Rx checksum offload 3698 * by default at the time of setup of Liquidio driver for 3699 * this device 3700 */ 3701 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3702 OCTNET_CMD_RXCSUM_ENABLE); 3703 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3704 OCTNET_CMD_TXCSUM_ENABLE); 3705 3706 dev_dbg(&octeon_dev->pci_dev->dev, 3707 "NIC ifidx:%d Setup successful\n", i); 3708 3709 if (octeon_dev->subsystem_id == 3710 OCTEON_CN2350_25GB_SUBSYS_ID || 3711 octeon_dev->subsystem_id == 3712 OCTEON_CN2360_25GB_SUBSYS_ID) { 3713 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, 3714 octeon_dev->fw_info.ver.min, 3715 octeon_dev->fw_info.ver.rev); 3716 3717 /* speed control unsupported in f/w older than 1.7.2 */ 3718 if (cur_ver < OCT_FW_VER(1, 7, 2)) { 3719 dev_info(&octeon_dev->pci_dev->dev, 3720 "speed setting not supported by f/w."); 3721 octeon_dev->speed_setting = 25; 3722 octeon_dev->no_speed_setting = 1; 3723 } else { 3724 liquidio_get_speed(lio); 3725 } 3726 3727 if (octeon_dev->speed_setting == 0) { 3728 octeon_dev->speed_setting = 25; 3729 octeon_dev->no_speed_setting = 1; 3730 } 3731 } else { 3732 octeon_dev->no_speed_setting = 1; 3733 octeon_dev->speed_setting = 10; 3734 } 3735 octeon_dev->speed_boot = octeon_dev->speed_setting; 3736 3737 /* don't read FEC setting if unsupported by f/w (see above) */ 3738 if (octeon_dev->speed_boot == 25 && 3739 !octeon_dev->no_speed_setting) { 3740 liquidio_get_fec(lio); 3741 octeon_dev->props[lio->ifidx].fec_boot = 3742 octeon_dev->props[lio->ifidx].fec; 3743 } 3744 } 3745 3746 device_lock(&octeon_dev->pci_dev->dev); 3747 devlink = devlink_alloc(&liquidio_devlink_ops, 3748 sizeof(struct lio_devlink_priv), 3749 &octeon_dev->pci_dev->dev); 3750 if (!devlink) { 3751 device_unlock(&octeon_dev->pci_dev->dev); 3752 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3753 goto setup_nic_dev_free; 3754 } 3755 3756 lio_devlink = devlink_priv(devlink); 3757 lio_devlink->oct = octeon_dev; 3758 3759 octeon_dev->devlink = devlink; 3760 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 3761 devlink_register(devlink); 3762 device_unlock(&octeon_dev->pci_dev->dev); 3763 3764 return 0; 3765 3766 setup_nic_dev_free: 3767 3768 while (i--) { 3769 dev_err(&octeon_dev->pci_dev->dev, 3770 "NIC ifidx:%d Setup failed\n", i); 3771 liquidio_destroy_nic_device(octeon_dev, i); 3772 } 3773 3774 setup_nic_dev_done: 3775 3776 return -ENODEV; 3777 } 3778 3779 #ifdef CONFIG_PCI_IOV 3780 static int octeon_enable_sriov(struct octeon_device *oct) 3781 { 3782 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; 3783 struct pci_dev *vfdev; 3784 int err; 3785 u32 u; 3786 3787 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { 3788 err = pci_enable_sriov(oct->pci_dev, 3789 oct->sriov_info.num_vfs_alloced); 3790 if (err) { 3791 dev_err(&oct->pci_dev->dev, 3792 "OCTEON: Failed to enable PCI sriov: %d\n", 3793 err); 3794 oct->sriov_info.num_vfs_alloced = 0; 3795 return err; 3796 } 3797 oct->sriov_info.sriov_enabled = 1; 3798 3799 /* init lookup table that maps DPI ring number to VF pci_dev 3800 * struct pointer 3801 */ 3802 u = 0; 3803 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3804 OCTEON_CN23XX_VF_VID, NULL); 3805 while (vfdev) { 3806 if (vfdev->is_virtfn && 3807 (vfdev->physfn == oct->pci_dev)) { 3808 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = 3809 vfdev; 3810 u += oct->sriov_info.rings_per_vf; 3811 } 3812 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3813 OCTEON_CN23XX_VF_VID, vfdev); 3814 } 3815 } 3816 3817 return num_vfs_alloced; 3818 } 3819 3820 static int lio_pci_sriov_disable(struct octeon_device *oct) 3821 { 3822 int u; 3823 3824 if (pci_vfs_assigned(oct->pci_dev)) { 3825 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); 3826 return -EPERM; 3827 } 3828 3829 pci_disable_sriov(oct->pci_dev); 3830 3831 u = 0; 3832 while (u < MAX_POSSIBLE_VFS) { 3833 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; 3834 u += oct->sriov_info.rings_per_vf; 3835 } 3836 3837 oct->sriov_info.num_vfs_alloced = 0; 3838 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", 3839 oct->pf_num); 3840 3841 return 0; 3842 } 3843 3844 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) 3845 { 3846 struct octeon_device *oct = pci_get_drvdata(dev); 3847 int ret = 0; 3848 3849 if ((num_vfs == oct->sriov_info.num_vfs_alloced) && 3850 (oct->sriov_info.sriov_enabled)) { 3851 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", 3852 oct->pf_num, num_vfs); 3853 return 0; 3854 } 3855 3856 if (!num_vfs) { 3857 lio_vf_rep_destroy(oct); 3858 ret = lio_pci_sriov_disable(oct); 3859 } else if (num_vfs > oct->sriov_info.max_vfs) { 3860 dev_err(&oct->pci_dev->dev, 3861 "OCTEON: Max allowed VFs:%d user requested:%d", 3862 oct->sriov_info.max_vfs, num_vfs); 3863 ret = -EPERM; 3864 } else { 3865 oct->sriov_info.num_vfs_alloced = num_vfs; 3866 ret = octeon_enable_sriov(oct); 3867 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", 3868 oct->pf_num, num_vfs); 3869 ret = lio_vf_rep_create(oct); 3870 if (ret) 3871 dev_info(&oct->pci_dev->dev, 3872 "vf representor create failed"); 3873 } 3874 3875 return ret; 3876 } 3877 #endif 3878 3879 /** 3880 * liquidio_init_nic_module - initialize the NIC 3881 * @oct: octeon device 3882 * 3883 * This initialization routine is called once the Octeon device application is 3884 * up and running 3885 */ 3886 static int liquidio_init_nic_module(struct octeon_device *oct) 3887 { 3888 int i, retval = 0; 3889 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3890 3891 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3892 3893 /* only default iq and oq were initialized 3894 * initialize the rest as well 3895 */ 3896 /* run port_config command for each port */ 3897 oct->ifcount = num_nic_ports; 3898 3899 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); 3900 3901 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3902 oct->props[i].gmxport = -1; 3903 3904 retval = setup_nic_devices(oct); 3905 if (retval) { 3906 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3907 goto octnet_init_failure; 3908 } 3909 3910 /* Call vf_rep_modinit if the firmware is switchdev capable 3911 * and do it from the first liquidio function probed. 3912 */ 3913 if (!oct->octeon_id && 3914 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { 3915 retval = lio_vf_rep_modinit(); 3916 if (retval) { 3917 liquidio_stop_nic_module(oct); 3918 goto octnet_init_failure; 3919 } 3920 } 3921 3922 liquidio_ptp_init(oct); 3923 3924 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3925 3926 return retval; 3927 3928 octnet_init_failure: 3929 3930 oct->ifcount = 0; 3931 3932 return retval; 3933 } 3934 3935 /** 3936 * nic_starter - finish init 3937 * @work: work struct work_struct 3938 * 3939 * starter callback that invokes the remaining initialization work after the NIC is up and running. 3940 */ 3941 static void nic_starter(struct work_struct *work) 3942 { 3943 struct octeon_device *oct; 3944 struct cavium_wk *wk = (struct cavium_wk *)work; 3945 3946 oct = (struct octeon_device *)wk->ctxptr; 3947 3948 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3949 return; 3950 3951 /* If the status of the device is CORE_OK, the core 3952 * application has reported its application type. Call 3953 * any registered handlers now and move to the RUNNING 3954 * state. 3955 */ 3956 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3957 schedule_delayed_work(&oct->nic_poll_work.work, 3958 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3959 return; 3960 } 3961 3962 atomic_set(&oct->status, OCT_DEV_RUNNING); 3963 3964 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3965 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3966 3967 if (liquidio_init_nic_module(oct)) 3968 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3969 else 3970 handshake[oct->octeon_id].started_ok = 1; 3971 } else { 3972 dev_err(&oct->pci_dev->dev, 3973 "Unexpected application running on NIC (%d). Check firmware.\n", 3974 oct->app_mode); 3975 } 3976 3977 complete(&handshake[oct->octeon_id].started); 3978 } 3979 3980 static int 3981 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) 3982 { 3983 struct octeon_device *oct = (struct octeon_device *)buf; 3984 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3985 int i, notice, vf_idx; 3986 bool cores_crashed; 3987 u64 *data, vf_num; 3988 3989 notice = recv_pkt->rh.r.ossp; 3990 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); 3991 3992 /* the first 64-bit word of data is the vf_num */ 3993 vf_num = data[0]; 3994 octeon_swap_8B_data(&vf_num, 1); 3995 vf_idx = (int)vf_num - 1; 3996 3997 cores_crashed = READ_ONCE(oct->cores_crashed); 3998 3999 if (notice == VF_DRV_LOADED) { 4000 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { 4001 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); 4002 dev_info(&oct->pci_dev->dev, 4003 "driver for VF%d was loaded\n", vf_idx); 4004 if (!cores_crashed) 4005 try_module_get(THIS_MODULE); 4006 } 4007 } else if (notice == VF_DRV_REMOVED) { 4008 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { 4009 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); 4010 dev_info(&oct->pci_dev->dev, 4011 "driver for VF%d was removed\n", vf_idx); 4012 if (!cores_crashed) 4013 module_put(THIS_MODULE); 4014 } 4015 } else if (notice == VF_DRV_MACADDR_CHANGED) { 4016 u8 *b = (u8 *)&data[1]; 4017 4018 oct->sriov_info.vf_macaddr[vf_idx] = data[1]; 4019 dev_info(&oct->pci_dev->dev, 4020 "VF driver changed VF%d's MAC address to %pM\n", 4021 vf_idx, b + 2); 4022 } 4023 4024 for (i = 0; i < recv_pkt->buffer_count; i++) 4025 recv_buffer_free(recv_pkt->buffer_ptr[i]); 4026 octeon_free_recv_info(recv_info); 4027 4028 return 0; 4029 } 4030 4031 /** 4032 * octeon_device_init - Device initialization for each Octeon device that is probed 4033 * @octeon_dev: octeon device 4034 */ 4035 static int octeon_device_init(struct octeon_device *octeon_dev) 4036 { 4037 int j, ret; 4038 char bootcmd[] = "\n"; 4039 char *dbg_enb = NULL; 4040 enum lio_fw_state fw_state; 4041 struct octeon_device_priv *oct_priv = octeon_dev->priv; 4042 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 4043 4044 /* Enable access to the octeon device and make its DMA capability 4045 * known to the OS. 4046 */ 4047 if (octeon_pci_os_setup(octeon_dev)) 4048 return 1; 4049 4050 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); 4051 4052 /* Identify the Octeon type and map the BAR address space. */ 4053 if (octeon_chip_specific_setup(octeon_dev)) { 4054 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 4055 return 1; 4056 } 4057 4058 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 4059 4060 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', 4061 * since that is what is required for the reference to be removed 4062 * during de-initialization (see 'octeon_destroy_resources'). 4063 */ 4064 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number, 4065 PCI_SLOT(octeon_dev->pci_dev->devfn), 4066 PCI_FUNC(octeon_dev->pci_dev->devfn), 4067 true); 4068 4069 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 4070 4071 /* CN23XX supports preloaded firmware if the following is true: 4072 * 4073 * The adapter indicates that firmware is currently running AND 4074 * 'fw_type' is 'auto'. 4075 * 4076 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). 4077 */ 4078 if (OCTEON_CN23XX_PF(octeon_dev) && 4079 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { 4080 atomic_cmpxchg(octeon_dev->adapter_fw_state, 4081 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); 4082 } 4083 4084 /* If loading firmware, only first device of adapter needs to do so. */ 4085 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, 4086 FW_NEEDS_TO_BE_LOADED, 4087 FW_IS_BEING_LOADED); 4088 4089 /* Here, [local variable] 'fw_state' is set to one of: 4090 * 4091 * FW_IS_PRELOADED: No firmware is to be loaded (see above) 4092 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load 4093 * firmware to the adapter. 4094 * FW_IS_BEING_LOADED: The driver's second instance will not load 4095 * firmware to the adapter. 4096 */ 4097 4098 /* Prior to f/w load, perform a soft reset of the Octeon device; 4099 * if error resetting, return w/error. 4100 */ 4101 if (fw_state == FW_NEEDS_TO_BE_LOADED) 4102 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 4103 return 1; 4104 4105 /* Initialize the dispatch mechanism used to push packets arriving on 4106 * Octeon Output queues. 4107 */ 4108 if (octeon_init_dispatch_list(octeon_dev)) 4109 return 1; 4110 4111 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4112 OPCODE_NIC_CORE_DRV_ACTIVE, 4113 octeon_core_drv_init, 4114 octeon_dev); 4115 4116 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4117 OPCODE_NIC_VF_DRV_NOTICE, 4118 octeon_recv_vf_drv_notice, octeon_dev); 4119 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 4120 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 4121 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 4122 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4123 4124 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 4125 4126 if (octeon_set_io_queues_off(octeon_dev)) { 4127 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); 4128 return 1; 4129 } 4130 4131 if (OCTEON_CN23XX_PF(octeon_dev)) { 4132 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4133 if (ret) { 4134 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); 4135 return ret; 4136 } 4137 } 4138 4139 /* Initialize soft command buffer pool 4140 */ 4141 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 4142 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 4143 return 1; 4144 } 4145 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 4146 4147 /* Setup the data structures that manage this Octeon's Input queues. */ 4148 if (octeon_setup_instr_queues(octeon_dev)) { 4149 dev_err(&octeon_dev->pci_dev->dev, 4150 "instruction queue initialization failed\n"); 4151 return 1; 4152 } 4153 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 4154 4155 /* Initialize lists to manage the requests of different types that 4156 * arrive from user & kernel applications for this octeon device. 4157 */ 4158 if (octeon_setup_response_list(octeon_dev)) { 4159 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 4160 return 1; 4161 } 4162 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 4163 4164 if (octeon_setup_output_queues(octeon_dev)) { 4165 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 4166 return 1; 4167 } 4168 4169 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 4170 4171 if (OCTEON_CN23XX_PF(octeon_dev)) { 4172 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { 4173 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); 4174 return 1; 4175 } 4176 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4177 4178 if (octeon_allocate_ioq_vector 4179 (octeon_dev, 4180 octeon_dev->sriov_info.num_pf_rings)) { 4181 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4182 return 1; 4183 } 4184 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 4185 4186 } else { 4187 /* The input and output queue registers were setup earlier (the 4188 * queues were not enabled). Any additional registers 4189 * that need to be programmed should be done now. 4190 */ 4191 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4192 if (ret) { 4193 dev_err(&octeon_dev->pci_dev->dev, 4194 "Failed to configure device registers\n"); 4195 return ret; 4196 } 4197 } 4198 4199 /* Initialize the tasklet that handles output queue packet processing.*/ 4200 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 4201 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh); 4202 4203 /* Setup the interrupt handler and record the INT SUM register address 4204 */ 4205 if (octeon_setup_interrupt(octeon_dev, 4206 octeon_dev->sriov_info.num_pf_rings)) 4207 return 1; 4208 4209 /* Enable Octeon device interrupts */ 4210 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 4211 4212 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); 4213 4214 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE 4215 * the output queue is enabled. 4216 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in 4217 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. 4218 * Otherwise, it is possible that the DRV_ACTIVE message will be sent 4219 * before any credits have been issued, causing the ring to be reset 4220 * (and the f/w appear to never have started). 4221 */ 4222 for (j = 0; j < octeon_dev->num_oqs; j++) 4223 writel(octeon_dev->droq[j]->max_count, 4224 octeon_dev->droq[j]->pkts_credit_reg); 4225 4226 /* Enable the input and output queues for this Octeon device */ 4227 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 4228 if (ret) { 4229 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); 4230 return ret; 4231 } 4232 4233 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 4234 4235 if (fw_state == FW_NEEDS_TO_BE_LOADED) { 4236 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 4237 if (!ddr_timeout) { 4238 dev_info(&octeon_dev->pci_dev->dev, 4239 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 4240 } 4241 4242 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 4243 4244 /* Wait for the octeon to initialize DDR after the soft-reset.*/ 4245 while (!ddr_timeout) { 4246 set_current_state(TASK_INTERRUPTIBLE); 4247 if (schedule_timeout(HZ / 10)) { 4248 /* user probably pressed Control-C */ 4249 return 1; 4250 } 4251 } 4252 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 4253 if (ret) { 4254 dev_err(&octeon_dev->pci_dev->dev, 4255 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 4256 ret); 4257 return 1; 4258 } 4259 4260 if (octeon_wait_for_bootloader(octeon_dev, 1000)) { 4261 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 4262 return 1; 4263 } 4264 4265 /* Divert uboot to take commands from host instead. */ 4266 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); 4267 4268 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 4269 ret = octeon_init_consoles(octeon_dev); 4270 if (ret) { 4271 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 4272 return 1; 4273 } 4274 /* If console debug enabled, specify empty string to use default 4275 * enablement ELSE specify NULL string for 'disabled'. 4276 */ 4277 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; 4278 ret = octeon_add_console(octeon_dev, 0, dbg_enb); 4279 if (ret) { 4280 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 4281 return 1; 4282 } else if (octeon_console_debug_enabled(0)) { 4283 /* If console was added AND we're logging console output 4284 * then set our console print function. 4285 */ 4286 octeon_dev->console[0].print = octeon_dbg_console_print; 4287 } 4288 4289 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 4290 4291 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 4292 ret = load_firmware(octeon_dev); 4293 if (ret) { 4294 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 4295 return 1; 4296 } 4297 4298 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); 4299 } 4300 4301 handshake[octeon_dev->octeon_id].init_ok = 1; 4302 complete(&handshake[octeon_dev->octeon_id].init); 4303 4304 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 4305 oct_priv->dev = octeon_dev; 4306 4307 return 0; 4308 } 4309 4310 /** 4311 * octeon_dbg_console_print - Debug console print function 4312 * @oct: octeon device 4313 * @console_num: console number 4314 * @prefix: first portion of line to display 4315 * @suffix: second portion of line to display 4316 * 4317 * The OCTEON debug console outputs entire lines (excluding '\n'). 4318 * Normally, the line will be passed in the 'prefix' parameter. 4319 * However, due to buffering, it is possible for a line to be split into two 4320 * parts, in which case they will be passed as the 'prefix' parameter and 4321 * 'suffix' parameter. 4322 */ 4323 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 4324 char *prefix, char *suffix) 4325 { 4326 if (prefix && suffix) 4327 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, 4328 suffix); 4329 else if (prefix) 4330 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); 4331 else if (suffix) 4332 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); 4333 4334 return 0; 4335 } 4336 4337 /** 4338 * liquidio_exit - Exits the module 4339 */ 4340 static void __exit liquidio_exit(void) 4341 { 4342 liquidio_deinit_pci(); 4343 4344 pr_info("LiquidIO network module is now unloaded\n"); 4345 } 4346 4347 module_init(liquidio_init); 4348 module_exit(liquidio_exit); 4349