1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/firmware.h> 22 #include <net/vxlan.h> 23 #include <linux/kthread.h> 24 #include "liquidio_common.h" 25 #include "octeon_droq.h" 26 #include "octeon_iq.h" 27 #include "response_manager.h" 28 #include "octeon_device.h" 29 #include "octeon_nic.h" 30 #include "octeon_main.h" 31 #include "octeon_network.h" 32 #include "cn66xx_regs.h" 33 #include "cn66xx_device.h" 34 #include "cn68xx_device.h" 35 #include "cn23xx_pf_device.h" 36 #include "liquidio_image.h" 37 #include "lio_vf_rep.h" 38 39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 41 MODULE_LICENSE("GPL"); 42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME 43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME 45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME 47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME 49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 50 51 static int ddr_timeout = 10000; 52 module_param(ddr_timeout, int, 0644); 53 MODULE_PARM_DESC(ddr_timeout, 54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 55 56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 57 58 static int debug = -1; 59 module_param(debug, int, 0644); 60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 61 62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; 63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); 64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); 65 66 static u32 console_bitmask; 67 module_param(console_bitmask, int, 0644); 68 MODULE_PARM_DESC(console_bitmask, 69 "Bitmask indicating which consoles have debug output redirected to syslog."); 70 71 /** 72 * octeon_console_debug_enabled - determines if a given console has debug enabled. 73 * @console: console to check 74 * Return: 1 = enabled. 0 otherwise 75 */ 76 static int octeon_console_debug_enabled(u32 console) 77 { 78 return (console_bitmask >> (console)) & 0x1; 79 } 80 81 /* Polling interval for determining when NIC application is alive */ 82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 83 84 /* runtime link query interval */ 85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 86 /* update localtime to octeon firmware every 60 seconds. 87 * make firmware to use same time reference, so that it will be easy to 88 * correlate firmware logged events/errors with host events, for debugging. 89 */ 90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 91 92 /* time to wait for possible in-flight requests in milliseconds */ 93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) 94 95 struct oct_timestamp_resp { 96 u64 rh; 97 u64 timestamp; 98 u64 status; 99 }; 100 101 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 102 103 union tx_info { 104 u64 u64; 105 struct { 106 #ifdef __BIG_ENDIAN_BITFIELD 107 u16 gso_size; 108 u16 gso_segs; 109 u32 reserved; 110 #else 111 u32 reserved; 112 u16 gso_segs; 113 u16 gso_size; 114 #endif 115 } s; 116 }; 117 118 /* Octeon device properties to be used by the NIC module. 119 * Each octeon device in the system will be represented 120 * by this structure in the NIC module. 121 */ 122 123 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 124 #define OCTNIC_GSO_MAX_SIZE \ 125 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 126 127 struct handshake { 128 struct completion init; 129 struct completion started; 130 struct pci_dev *pci_dev; 131 int init_ok; 132 int started_ok; 133 }; 134 135 #ifdef CONFIG_PCI_IOV 136 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); 137 #endif 138 139 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 140 char *prefix, char *suffix); 141 142 static int octeon_device_init(struct octeon_device *); 143 static int liquidio_stop(struct net_device *netdev); 144 static void liquidio_remove(struct pci_dev *pdev); 145 static int liquidio_probe(struct pci_dev *pdev, 146 const struct pci_device_id *ent); 147 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 148 int linkstate); 149 150 static struct handshake handshake[MAX_OCTEON_DEVICES]; 151 static struct completion first_stage; 152 153 static void octeon_droq_bh(struct tasklet_struct *t) 154 { 155 int q_no; 156 int reschedule = 0; 157 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t, 158 droq_tasklet); 159 struct octeon_device *oct = oct_priv->dev; 160 161 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { 162 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) 163 continue; 164 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 165 MAX_PACKET_BUDGET); 166 lio_enable_irq(oct->droq[q_no], NULL); 167 168 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 169 /* set time and cnt interrupt thresholds for this DROQ 170 * for NAPI 171 */ 172 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; 173 174 octeon_write_csr64( 175 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), 176 0x5700000040ULL); 177 octeon_write_csr64( 178 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); 179 } 180 } 181 182 if (reschedule) 183 tasklet_schedule(&oct_priv->droq_tasklet); 184 } 185 186 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 187 { 188 struct octeon_device_priv *oct_priv = oct->priv; 189 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 190 int i; 191 192 do { 193 pending_pkts = 0; 194 195 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 196 if (!(oct->io_qmask.oq & BIT_ULL(i))) 197 continue; 198 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 199 } 200 if (pkt_cnt > 0) { 201 pending_pkts += pkt_cnt; 202 tasklet_schedule(&oct_priv->droq_tasklet); 203 } 204 pkt_cnt = 0; 205 schedule_timeout_uninterruptible(1); 206 207 } while (retry-- && pending_pkts); 208 209 return pkt_cnt; 210 } 211 212 /** 213 * force_io_queues_off - Forces all IO queues off on a given device 214 * @oct: Pointer to Octeon device 215 */ 216 static void force_io_queues_off(struct octeon_device *oct) 217 { 218 if ((oct->chip_id == OCTEON_CN66XX) || 219 (oct->chip_id == OCTEON_CN68XX)) { 220 /* Reset the Enable bits for Input Queues. */ 221 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 222 223 /* Reset the Enable bits for Output Queues. */ 224 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 225 } 226 } 227 228 /** 229 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc 230 * @oct: Pointer to Octeon device 231 */ 232 static inline void pcierror_quiesce_device(struct octeon_device *oct) 233 { 234 int i; 235 236 /* Disable the input and output queues now. No more packets will 237 * arrive from Octeon, but we should wait for all packet processing 238 * to finish. 239 */ 240 force_io_queues_off(oct); 241 242 /* To allow for in-flight requests */ 243 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); 244 245 if (wait_for_pending_requests(oct)) 246 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 247 248 /* Force all requests waiting to be fetched by OCTEON to complete. */ 249 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 250 struct octeon_instr_queue *iq; 251 252 if (!(oct->io_qmask.iq & BIT_ULL(i))) 253 continue; 254 iq = oct->instr_queue[i]; 255 256 if (atomic_read(&iq->instr_pending)) { 257 spin_lock_bh(&iq->lock); 258 iq->fill_cnt = 0; 259 iq->octeon_read_index = iq->host_write_index; 260 iq->stats.instr_processed += 261 atomic_read(&iq->instr_pending); 262 lio_process_iq_request_list(oct, iq, 0); 263 spin_unlock_bh(&iq->lock); 264 } 265 } 266 267 /* Force all pending ordered list requests to time out. */ 268 lio_process_ordered_list(oct, 1); 269 270 /* We do not need to wait for output queue packets to be processed. */ 271 } 272 273 /** 274 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status 275 * @dev: Pointer to PCI device 276 */ 277 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 278 { 279 int pos = 0x100; 280 u32 status, mask; 281 282 pr_info("%s :\n", __func__); 283 284 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 285 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 286 if (dev->error_state == pci_channel_io_normal) 287 status &= ~mask; /* Clear corresponding nonfatal bits */ 288 else 289 status &= mask; /* Clear corresponding fatal bits */ 290 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 291 } 292 293 /** 294 * stop_pci_io - Stop all PCI IO to a given device 295 * @oct: Pointer to Octeon device 296 */ 297 static void stop_pci_io(struct octeon_device *oct) 298 { 299 /* No more instructions will be forwarded. */ 300 atomic_set(&oct->status, OCT_DEV_IN_RESET); 301 302 pci_disable_device(oct->pci_dev); 303 304 /* Disable interrupts */ 305 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 306 307 pcierror_quiesce_device(oct); 308 309 /* Release the interrupt line */ 310 free_irq(oct->pci_dev->irq, oct); 311 312 if (oct->flags & LIO_FLAG_MSI_ENABLED) 313 pci_disable_msi(oct->pci_dev); 314 315 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 316 lio_get_state_string(&oct->status)); 317 318 /* making it a common function for all OCTEON models */ 319 cleanup_aer_uncorrect_error_status(oct->pci_dev); 320 } 321 322 /** 323 * liquidio_pcie_error_detected - called when PCI error is detected 324 * @pdev: Pointer to PCI device 325 * @state: The current pci connection state 326 * 327 * This function is called after a PCI bus error affecting 328 * this device has been detected. 329 */ 330 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 331 pci_channel_state_t state) 332 { 333 struct octeon_device *oct = pci_get_drvdata(pdev); 334 335 /* Non-correctable Non-fatal errors */ 336 if (state == pci_channel_io_normal) { 337 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 338 cleanup_aer_uncorrect_error_status(oct->pci_dev); 339 return PCI_ERS_RESULT_CAN_RECOVER; 340 } 341 342 /* Non-correctable Fatal errors */ 343 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 344 stop_pci_io(oct); 345 346 /* Always return a DISCONNECT. There is no support for recovery but only 347 * for a clean shutdown. 348 */ 349 return PCI_ERS_RESULT_DISCONNECT; 350 } 351 352 /** 353 * liquidio_pcie_mmio_enabled - mmio handler 354 * @pdev: Pointer to PCI device 355 */ 356 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev) 357 { 358 /* We should never hit this since we never ask for a reset for a Fatal 359 * Error. We always return DISCONNECT in io_error above. 360 * But play safe and return RECOVERED for now. 361 */ 362 return PCI_ERS_RESULT_RECOVERED; 363 } 364 365 /** 366 * liquidio_pcie_slot_reset - called after the pci bus has been reset. 367 * @pdev: Pointer to PCI device 368 * 369 * Restart the card from scratch, as if from a cold-boot. Implementation 370 * resembles the first-half of the octeon_resume routine. 371 */ 372 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev) 373 { 374 /* We should never hit this since we never ask for a reset for a Fatal 375 * Error. We always return DISCONNECT in io_error above. 376 * But play safe and return RECOVERED for now. 377 */ 378 return PCI_ERS_RESULT_RECOVERED; 379 } 380 381 /** 382 * liquidio_pcie_resume - called when traffic can start flowing again. 383 * @pdev: Pointer to PCI device 384 * 385 * This callback is called when the error recovery driver tells us that 386 * its OK to resume normal operation. Implementation resembles the 387 * second-half of the octeon_resume routine. 388 */ 389 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev) 390 { 391 /* Nothing to be done here. */ 392 } 393 394 #define liquidio_suspend NULL 395 #define liquidio_resume NULL 396 397 /* For PCI-E Advanced Error Recovery (AER) Interface */ 398 static const struct pci_error_handlers liquidio_err_handler = { 399 .error_detected = liquidio_pcie_error_detected, 400 .mmio_enabled = liquidio_pcie_mmio_enabled, 401 .slot_reset = liquidio_pcie_slot_reset, 402 .resume = liquidio_pcie_resume, 403 }; 404 405 static const struct pci_device_id liquidio_pci_tbl[] = { 406 { /* 68xx */ 407 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 408 }, 409 { /* 66xx */ 410 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 411 }, 412 { /* 23xx pf */ 413 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 414 }, 415 { 416 0, 0, 0, 0, 0, 0, 0 417 } 418 }; 419 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 420 421 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); 422 423 static struct pci_driver liquidio_pci_driver = { 424 .name = "LiquidIO", 425 .id_table = liquidio_pci_tbl, 426 .probe = liquidio_probe, 427 .remove = liquidio_remove, 428 .err_handler = &liquidio_err_handler, /* For AER */ 429 .driver.pm = &liquidio_pm_ops, 430 #ifdef CONFIG_PCI_IOV 431 .sriov_configure = liquidio_enable_sriov, 432 #endif 433 }; 434 435 /** 436 * liquidio_init_pci - register PCI driver 437 */ 438 static int liquidio_init_pci(void) 439 { 440 return pci_register_driver(&liquidio_pci_driver); 441 } 442 443 /** 444 * liquidio_deinit_pci - unregister PCI driver 445 */ 446 static void liquidio_deinit_pci(void) 447 { 448 pci_unregister_driver(&liquidio_pci_driver); 449 } 450 451 /** 452 * check_txq_status - Check Tx queue status, and take appropriate action 453 * @lio: per-network private data 454 * Return: 0 if full, number of queues woken up otherwise 455 */ 456 static inline int check_txq_status(struct lio *lio) 457 { 458 int numqs = lio->netdev->real_num_tx_queues; 459 int ret_val = 0; 460 int q, iq; 461 462 /* check each sub-queue state */ 463 for (q = 0; q < numqs; q++) { 464 iq = lio->linfo.txpciq[q % 465 lio->oct_dev->num_iqs].s.q_no; 466 if (octnet_iq_is_full(lio->oct_dev, iq)) 467 continue; 468 if (__netif_subqueue_stopped(lio->netdev, q)) { 469 netif_wake_subqueue(lio->netdev, q); 470 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, 471 tx_restart, 1); 472 ret_val++; 473 } 474 } 475 476 return ret_val; 477 } 478 479 /** 480 * print_link_info - Print link information 481 * @netdev: network device 482 */ 483 static void print_link_info(struct net_device *netdev) 484 { 485 struct lio *lio = GET_LIO(netdev); 486 487 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 488 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 489 struct oct_link_info *linfo = &lio->linfo; 490 491 if (linfo->link.s.link_up) { 492 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 493 linfo->link.s.speed, 494 (linfo->link.s.duplex) ? "Full" : "Half"); 495 } else { 496 netif_info(lio, link, lio->netdev, "Link Down\n"); 497 } 498 } 499 } 500 501 /** 502 * octnet_link_status_change - Routine to notify MTU change 503 * @work: work_struct data structure 504 */ 505 static void octnet_link_status_change(struct work_struct *work) 506 { 507 struct cavium_wk *wk = (struct cavium_wk *)work; 508 struct lio *lio = (struct lio *)wk->ctxptr; 509 510 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 511 * this API is invoked only when new max-MTU of the interface is 512 * less than current MTU. 513 */ 514 rtnl_lock(); 515 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 516 rtnl_unlock(); 517 } 518 519 /** 520 * setup_link_status_change_wq - Sets up the mtu status change work 521 * @netdev: network device 522 */ 523 static inline int setup_link_status_change_wq(struct net_device *netdev) 524 { 525 struct lio *lio = GET_LIO(netdev); 526 struct octeon_device *oct = lio->oct_dev; 527 528 lio->link_status_wq.wq = alloc_workqueue("link-status", 529 WQ_MEM_RECLAIM | WQ_PERCPU, 530 0); 531 if (!lio->link_status_wq.wq) { 532 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 533 return -1; 534 } 535 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 536 octnet_link_status_change); 537 lio->link_status_wq.wk.ctxptr = lio; 538 539 return 0; 540 } 541 542 static inline void cleanup_link_status_change_wq(struct net_device *netdev) 543 { 544 struct lio *lio = GET_LIO(netdev); 545 546 if (lio->link_status_wq.wq) { 547 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 548 destroy_workqueue(lio->link_status_wq.wq); 549 } 550 } 551 552 /** 553 * update_link_status - Update link status 554 * @netdev: network device 555 * @ls: link status structure 556 * 557 * Called on receipt of a link status response from the core application to 558 * update each interface's link status. 559 */ 560 static inline void update_link_status(struct net_device *netdev, 561 union oct_link_status *ls) 562 { 563 struct lio *lio = GET_LIO(netdev); 564 int changed = (lio->linfo.link.u64 != ls->u64); 565 int current_max_mtu = lio->linfo.link.s.mtu; 566 struct octeon_device *oct = lio->oct_dev; 567 568 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n", 569 __func__, lio->linfo.link.u64, ls->u64); 570 lio->linfo.link.u64 = ls->u64; 571 572 if ((lio->intf_open) && (changed)) { 573 print_link_info(netdev); 574 lio->link_changes++; 575 576 if (lio->linfo.link.s.link_up) { 577 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); 578 netif_carrier_on(netdev); 579 wake_txqs(netdev); 580 } else { 581 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); 582 netif_carrier_off(netdev); 583 stop_txqs(netdev); 584 } 585 if (lio->linfo.link.s.mtu != current_max_mtu) { 586 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", 587 current_max_mtu, lio->linfo.link.s.mtu); 588 netdev->max_mtu = lio->linfo.link.s.mtu; 589 } 590 if (lio->linfo.link.s.mtu < netdev->mtu) { 591 dev_warn(&oct->pci_dev->dev, 592 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 593 netdev->mtu, lio->linfo.link.s.mtu); 594 queue_delayed_work(lio->link_status_wq.wq, 595 &lio->link_status_wq.wk.work, 0); 596 } 597 } 598 } 599 600 /** 601 * lio_sync_octeon_time - send latest localtime to octeon firmware so that 602 * firmware will correct it's time, in case there is a time skew 603 * 604 * @work: work scheduled to send time update to octeon firmware 605 **/ 606 static void lio_sync_octeon_time(struct work_struct *work) 607 { 608 struct cavium_wk *wk = (struct cavium_wk *)work; 609 struct lio *lio = (struct lio *)wk->ctxptr; 610 struct octeon_device *oct = lio->oct_dev; 611 struct octeon_soft_command *sc; 612 struct timespec64 ts; 613 struct lio_time *lt; 614 int ret; 615 616 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); 617 if (!sc) { 618 dev_err(&oct->pci_dev->dev, 619 "Failed to sync time to octeon: soft command allocation failed\n"); 620 return; 621 } 622 623 lt = (struct lio_time *)sc->virtdptr; 624 625 /* Get time of the day */ 626 ktime_get_real_ts64(&ts); 627 lt->sec = ts.tv_sec; 628 lt->nsec = ts.tv_nsec; 629 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); 630 631 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 632 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 633 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); 634 635 init_completion(&sc->complete); 636 sc->sc_status = OCTEON_REQUEST_PENDING; 637 638 ret = octeon_send_soft_command(oct, sc); 639 if (ret == IQ_SEND_FAILED) { 640 dev_err(&oct->pci_dev->dev, 641 "Failed to sync time to octeon: failed to send soft command\n"); 642 octeon_free_soft_command(oct, sc); 643 } else { 644 WRITE_ONCE(sc->caller_is_done, true); 645 } 646 647 queue_delayed_work(lio->sync_octeon_time_wq.wq, 648 &lio->sync_octeon_time_wq.wk.work, 649 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 650 } 651 652 /** 653 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware 654 * 655 * @netdev: network device which should send time update to firmware 656 **/ 657 static inline int setup_sync_octeon_time_wq(struct net_device *netdev) 658 { 659 struct lio *lio = GET_LIO(netdev); 660 struct octeon_device *oct = lio->oct_dev; 661 662 lio->sync_octeon_time_wq.wq = 663 alloc_workqueue("update-octeon-time", 664 WQ_MEM_RECLAIM | WQ_PERCPU, 0); 665 if (!lio->sync_octeon_time_wq.wq) { 666 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); 667 return -1; 668 } 669 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, 670 lio_sync_octeon_time); 671 lio->sync_octeon_time_wq.wk.ctxptr = lio; 672 queue_delayed_work(lio->sync_octeon_time_wq.wq, 673 &lio->sync_octeon_time_wq.wk.work, 674 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 675 676 return 0; 677 } 678 679 /** 680 * cleanup_sync_octeon_time_wq - destroy wq 681 * 682 * @netdev: network device which should send time update to firmware 683 * 684 * Stop scheduling and destroy the work created to periodically update local 685 * time to octeon firmware. 686 **/ 687 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) 688 { 689 struct lio *lio = GET_LIO(netdev); 690 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; 691 692 if (time_wq->wq) { 693 cancel_delayed_work_sync(&time_wq->wk.work); 694 destroy_workqueue(time_wq->wq); 695 } 696 } 697 698 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) 699 { 700 struct octeon_device *other_oct; 701 702 other_oct = lio_get_device(oct->octeon_id + 1); 703 704 if (other_oct && other_oct->pci_dev) { 705 int oct_busnum, other_oct_busnum; 706 707 oct_busnum = oct->pci_dev->bus->number; 708 other_oct_busnum = other_oct->pci_dev->bus->number; 709 710 if (oct_busnum == other_oct_busnum) { 711 int oct_slot, other_oct_slot; 712 713 oct_slot = PCI_SLOT(oct->pci_dev->devfn); 714 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); 715 716 if (oct_slot == other_oct_slot) 717 return other_oct; 718 } 719 } 720 721 return NULL; 722 } 723 724 static void disable_all_vf_links(struct octeon_device *oct) 725 { 726 struct net_device *netdev; 727 int max_vfs, vf, i; 728 729 if (!oct) 730 return; 731 732 max_vfs = oct->sriov_info.max_vfs; 733 734 for (i = 0; i < oct->ifcount; i++) { 735 netdev = oct->props[i].netdev; 736 if (!netdev) 737 continue; 738 739 for (vf = 0; vf < max_vfs; vf++) 740 liquidio_set_vf_link_state(netdev, vf, 741 IFLA_VF_LINK_STATE_DISABLE); 742 } 743 } 744 745 static int liquidio_watchdog(void *param) 746 { 747 bool err_msg_was_printed[LIO_MAX_CORES]; 748 u16 mask_of_crashed_or_stuck_cores = 0; 749 bool all_vf_links_are_disabled = false; 750 struct octeon_device *oct = param; 751 struct octeon_device *other_oct; 752 #ifdef CONFIG_MODULE_UNLOAD 753 long refcount, vfs_referencing_pf; 754 u64 vfs_mask1, vfs_mask2; 755 #endif 756 int core; 757 758 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); 759 760 while (!kthread_should_stop()) { 761 /* sleep for a couple of seconds so that we don't hog the CPU */ 762 set_current_state(TASK_INTERRUPTIBLE); 763 schedule_timeout(msecs_to_jiffies(2000)); 764 765 mask_of_crashed_or_stuck_cores = 766 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); 767 768 if (!mask_of_crashed_or_stuck_cores) 769 continue; 770 771 WRITE_ONCE(oct->cores_crashed, true); 772 other_oct = get_other_octeon_device(oct); 773 if (other_oct) 774 WRITE_ONCE(other_oct->cores_crashed, true); 775 776 for (core = 0; core < LIO_MAX_CORES; core++) { 777 bool core_crashed_or_got_stuck; 778 779 core_crashed_or_got_stuck = 780 (mask_of_crashed_or_stuck_cores 781 >> core) & 1; 782 783 if (core_crashed_or_got_stuck && 784 !err_msg_was_printed[core]) { 785 dev_err(&oct->pci_dev->dev, 786 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 787 core); 788 err_msg_was_printed[core] = true; 789 } 790 } 791 792 if (all_vf_links_are_disabled) 793 continue; 794 795 disable_all_vf_links(oct); 796 disable_all_vf_links(other_oct); 797 all_vf_links_are_disabled = true; 798 799 #ifdef CONFIG_MODULE_UNLOAD 800 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); 801 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); 802 803 vfs_referencing_pf = hweight64(vfs_mask1); 804 vfs_referencing_pf += hweight64(vfs_mask2); 805 806 refcount = module_refcount(THIS_MODULE); 807 if (refcount >= vfs_referencing_pf) { 808 while (vfs_referencing_pf) { 809 module_put(THIS_MODULE); 810 vfs_referencing_pf--; 811 } 812 } 813 #endif 814 } 815 816 return 0; 817 } 818 819 /** 820 * liquidio_probe - PCI probe handler 821 * @pdev: PCI device structure 822 * @ent: unused 823 */ 824 static int 825 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent) 826 { 827 struct octeon_device *oct_dev = NULL; 828 struct handshake *hs; 829 830 oct_dev = octeon_allocate_device(pdev->device, 831 sizeof(struct octeon_device_priv)); 832 if (!oct_dev) { 833 dev_err(&pdev->dev, "Unable to allocate device\n"); 834 return -ENOMEM; 835 } 836 837 if (pdev->device == OCTEON_CN23XX_PF_VID) 838 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 839 840 /* Enable PTP for 6XXX Device */ 841 if (((pdev->device == OCTEON_CN66XX) || 842 (pdev->device == OCTEON_CN68XX))) 843 oct_dev->ptp_enable = true; 844 else 845 oct_dev->ptp_enable = false; 846 847 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 848 (u32)pdev->vendor, (u32)pdev->device); 849 850 /* Assign octeon_device for this device to the private data area. */ 851 pci_set_drvdata(pdev, oct_dev); 852 853 /* set linux specific device pointer */ 854 oct_dev->pci_dev = (void *)pdev; 855 856 oct_dev->subsystem_id = pdev->subsystem_vendor | 857 (pdev->subsystem_device << 16); 858 859 hs = &handshake[oct_dev->octeon_id]; 860 init_completion(&hs->init); 861 init_completion(&hs->started); 862 hs->pci_dev = pdev; 863 864 if (oct_dev->octeon_id == 0) 865 /* first LiquidIO NIC is detected */ 866 complete(&first_stage); 867 868 if (octeon_device_init(oct_dev)) { 869 complete(&hs->init); 870 liquidio_remove(pdev); 871 return -ENOMEM; 872 } 873 874 if (OCTEON_CN23XX_PF(oct_dev)) { 875 u8 bus, device, function; 876 877 if (atomic_read(oct_dev->adapter_refcount) == 1) { 878 /* Each NIC gets one watchdog kernel thread. The first 879 * PF (of each NIC) that gets pci_driver->probe()'d 880 * creates that thread. 881 */ 882 bus = pdev->bus->number; 883 device = PCI_SLOT(pdev->devfn); 884 function = PCI_FUNC(pdev->devfn); 885 oct_dev->watchdog_task = kthread_run(liquidio_watchdog, 886 oct_dev, 887 "liowd/%02hhx:%02hhx.%hhx", 888 bus, device, function); 889 if (IS_ERR(oct_dev->watchdog_task)) { 890 oct_dev->watchdog_task = NULL; 891 dev_err(&oct_dev->pci_dev->dev, 892 "failed to create kernel_thread\n"); 893 liquidio_remove(pdev); 894 return -1; 895 } 896 } 897 } 898 899 oct_dev->rx_pause = 1; 900 oct_dev->tx_pause = 1; 901 902 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 903 904 return 0; 905 } 906 907 static bool fw_type_is_auto(void) 908 { 909 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, 910 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; 911 } 912 913 /** 914 * octeon_pci_flr - PCI FLR for each Octeon device. 915 * @oct: octeon device 916 */ 917 static void octeon_pci_flr(struct octeon_device *oct) 918 { 919 int rc; 920 921 pci_save_state(oct->pci_dev); 922 923 pci_cfg_access_lock(oct->pci_dev); 924 925 /* Quiesce the device completely */ 926 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 927 PCI_COMMAND_INTX_DISABLE); 928 929 rc = __pci_reset_function_locked(oct->pci_dev); 930 931 if (rc != 0) 932 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", 933 rc, oct->pf_num); 934 935 pci_cfg_access_unlock(oct->pci_dev); 936 937 pci_restore_state(oct->pci_dev); 938 } 939 940 /** 941 * octeon_destroy_resources - Destroy resources associated with octeon device 942 * @oct: octeon device 943 */ 944 static void octeon_destroy_resources(struct octeon_device *oct) 945 { 946 int i, refcount; 947 struct msix_entry *msix_entries; 948 struct octeon_device_priv *oct_priv = oct->priv; 949 950 struct handshake *hs; 951 952 switch (atomic_read(&oct->status)) { 953 case OCT_DEV_RUNNING: 954 case OCT_DEV_CORE_OK: 955 956 /* No more instructions will be forwarded. */ 957 atomic_set(&oct->status, OCT_DEV_IN_RESET); 958 959 oct->app_mode = CVM_DRV_INVALID_APP; 960 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 961 lio_get_state_string(&oct->status)); 962 963 schedule_timeout_uninterruptible(HZ / 10); 964 965 fallthrough; 966 case OCT_DEV_HOST_OK: 967 968 case OCT_DEV_CONSOLE_INIT_DONE: 969 /* Remove any consoles */ 970 octeon_remove_consoles(oct); 971 972 fallthrough; 973 case OCT_DEV_IO_QUEUES_DONE: 974 if (lio_wait_for_instr_fetch(oct)) 975 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 976 977 if (wait_for_pending_requests(oct)) 978 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 979 980 /* Disable the input and output queues now. No more packets will 981 * arrive from Octeon, but we should wait for all packet 982 * processing to finish. 983 */ 984 oct->fn_list.disable_io_queues(oct); 985 986 if (lio_wait_for_oq_pkts(oct)) 987 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 988 989 /* Force all requests waiting to be fetched by OCTEON to 990 * complete. 991 */ 992 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 993 struct octeon_instr_queue *iq; 994 995 if (!(oct->io_qmask.iq & BIT_ULL(i))) 996 continue; 997 iq = oct->instr_queue[i]; 998 999 if (atomic_read(&iq->instr_pending)) { 1000 spin_lock_bh(&iq->lock); 1001 iq->fill_cnt = 0; 1002 iq->octeon_read_index = iq->host_write_index; 1003 iq->stats.instr_processed += 1004 atomic_read(&iq->instr_pending); 1005 lio_process_iq_request_list(oct, iq, 0); 1006 spin_unlock_bh(&iq->lock); 1007 } 1008 } 1009 1010 lio_process_ordered_list(oct, 1); 1011 octeon_free_sc_done_list(oct); 1012 octeon_free_sc_zombie_list(oct); 1013 1014 fallthrough; 1015 case OCT_DEV_INTR_SET_DONE: 1016 /* Disable interrupts */ 1017 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1018 1019 if (oct->msix_on) { 1020 msix_entries = (struct msix_entry *)oct->msix_entries; 1021 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 1022 if (oct->ioq_vector[i].vector) { 1023 /* clear the affinity_cpumask */ 1024 irq_set_affinity_hint( 1025 msix_entries[i].vector, 1026 NULL); 1027 free_irq(msix_entries[i].vector, 1028 &oct->ioq_vector[i]); 1029 oct->ioq_vector[i].vector = 0; 1030 } 1031 } 1032 /* non-iov vector's argument is oct struct */ 1033 free_irq(msix_entries[i].vector, oct); 1034 1035 pci_disable_msix(oct->pci_dev); 1036 kfree(oct->msix_entries); 1037 oct->msix_entries = NULL; 1038 } else { 1039 /* Release the interrupt line */ 1040 free_irq(oct->pci_dev->irq, oct); 1041 1042 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1043 pci_disable_msi(oct->pci_dev); 1044 } 1045 1046 kfree(oct->irq_name_storage); 1047 oct->irq_name_storage = NULL; 1048 1049 fallthrough; 1050 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1051 if (OCTEON_CN23XX_PF(oct)) 1052 octeon_free_ioq_vector(oct); 1053 1054 fallthrough; 1055 case OCT_DEV_MBOX_SETUP_DONE: 1056 if (OCTEON_CN23XX_PF(oct)) 1057 oct->fn_list.free_mbox(oct); 1058 1059 fallthrough; 1060 case OCT_DEV_IN_RESET: 1061 case OCT_DEV_DROQ_INIT_DONE: 1062 /* Wait for any pending operations */ 1063 mdelay(100); 1064 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1065 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1066 continue; 1067 octeon_delete_droq(oct, i); 1068 } 1069 1070 /* Force any pending handshakes to complete */ 1071 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1072 hs = &handshake[i]; 1073 1074 if (hs->pci_dev) { 1075 handshake[oct->octeon_id].init_ok = 0; 1076 complete(&handshake[oct->octeon_id].init); 1077 handshake[oct->octeon_id].started_ok = 0; 1078 complete(&handshake[oct->octeon_id].started); 1079 } 1080 } 1081 1082 fallthrough; 1083 case OCT_DEV_RESP_LIST_INIT_DONE: 1084 octeon_delete_response_list(oct); 1085 1086 fallthrough; 1087 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1088 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1089 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1090 continue; 1091 octeon_delete_instr_queue(oct, i); 1092 } 1093 #ifdef CONFIG_PCI_IOV 1094 if (oct->sriov_info.sriov_enabled) 1095 pci_disable_sriov(oct->pci_dev); 1096 #endif 1097 fallthrough; 1098 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1099 octeon_free_sc_buffer_pool(oct); 1100 1101 fallthrough; 1102 case OCT_DEV_DISPATCH_INIT_DONE: 1103 octeon_delete_dispatch_list(oct); 1104 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1105 1106 fallthrough; 1107 case OCT_DEV_PCI_MAP_DONE: 1108 refcount = octeon_deregister_device(oct); 1109 1110 /* Soft reset the octeon device before exiting. 1111 * However, if fw was loaded from card (i.e. autoboot), 1112 * perform an FLR instead. 1113 * Implementation note: only soft-reset the device 1114 * if it is a CN6XXX OR the LAST CN23XX device. 1115 */ 1116 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) 1117 octeon_pci_flr(oct); 1118 else if (OCTEON_CN6XXX(oct) || !refcount) 1119 oct->fn_list.soft_reset(oct); 1120 1121 octeon_unmap_pci_barx(oct, 0); 1122 octeon_unmap_pci_barx(oct, 1); 1123 1124 fallthrough; 1125 case OCT_DEV_PCI_ENABLE_DONE: 1126 /* Disable the device, releasing the PCI INT */ 1127 pci_disable_device(oct->pci_dev); 1128 1129 fallthrough; 1130 case OCT_DEV_BEGIN_STATE: 1131 /* Nothing to be done here either */ 1132 break; 1133 } /* end switch (oct->status) */ 1134 1135 tasklet_kill(&oct_priv->droq_tasklet); 1136 } 1137 1138 /** 1139 * send_rx_ctrl_cmd - Send Rx control command 1140 * @lio: per-network private data 1141 * @start_stop: whether to start or stop 1142 */ 1143 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1144 { 1145 struct octeon_soft_command *sc; 1146 union octnet_cmd *ncmd; 1147 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1148 int retval; 1149 1150 if (oct->props[lio->ifidx].rx_on == start_stop) 1151 return 0; 1152 1153 sc = (struct octeon_soft_command *) 1154 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1155 16, 0); 1156 if (!sc) { 1157 netif_info(lio, rx_err, lio->netdev, 1158 "Failed to allocate octeon_soft_command struct\n"); 1159 return -ENOMEM; 1160 } 1161 1162 ncmd = (union octnet_cmd *)sc->virtdptr; 1163 1164 ncmd->u64 = 0; 1165 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1166 ncmd->s.param1 = start_stop; 1167 1168 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1169 1170 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1171 1172 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1173 OPCODE_NIC_CMD, 0, 0, 0); 1174 1175 init_completion(&sc->complete); 1176 sc->sc_status = OCTEON_REQUEST_PENDING; 1177 1178 retval = octeon_send_soft_command(oct, sc); 1179 if (retval == IQ_SEND_FAILED) { 1180 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1181 octeon_free_soft_command(oct, sc); 1182 } else { 1183 /* Sleep on a wait queue till the cond flag indicates that the 1184 * response arrived or timed-out. 1185 */ 1186 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1187 if (retval) 1188 return retval; 1189 1190 oct->props[lio->ifidx].rx_on = start_stop; 1191 WRITE_ONCE(sc->caller_is_done, true); 1192 } 1193 1194 return retval; 1195 } 1196 1197 /** 1198 * liquidio_destroy_nic_device - Destroy NIC device interface 1199 * @oct: octeon device 1200 * @ifidx: which interface to destroy 1201 * 1202 * Cleanup associated with each interface for an Octeon device when NIC 1203 * module is being unloaded or if initialization fails during load. 1204 */ 1205 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1206 { 1207 struct net_device *netdev = oct->props[ifidx].netdev; 1208 struct octeon_device_priv *oct_priv = oct->priv; 1209 struct napi_struct *napi, *n; 1210 struct lio *lio; 1211 1212 if (!netdev) { 1213 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1214 __func__, ifidx); 1215 return; 1216 } 1217 1218 lio = GET_LIO(netdev); 1219 1220 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1221 1222 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1223 liquidio_stop(netdev); 1224 1225 if (oct->props[lio->ifidx].napi_enabled == 1) { 1226 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1227 napi_disable(napi); 1228 1229 oct->props[lio->ifidx].napi_enabled = 0; 1230 1231 if (OCTEON_CN23XX_PF(oct)) 1232 oct->droq[0]->ops.poll_mode = 0; 1233 } 1234 1235 /* Delete NAPI */ 1236 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1237 netif_napi_del(napi); 1238 1239 tasklet_enable(&oct_priv->droq_tasklet); 1240 1241 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1242 unregister_netdev(netdev); 1243 1244 cleanup_sync_octeon_time_wq(netdev); 1245 cleanup_link_status_change_wq(netdev); 1246 1247 cleanup_rx_oom_poll_fn(netdev); 1248 1249 lio_delete_glists(lio); 1250 1251 free_netdev(netdev); 1252 1253 oct->props[ifidx].gmxport = -1; 1254 1255 oct->props[ifidx].netdev = NULL; 1256 } 1257 1258 /** 1259 * liquidio_stop_nic_module - Stop complete NIC functionality 1260 * @oct: octeon device 1261 */ 1262 static int liquidio_stop_nic_module(struct octeon_device *oct) 1263 { 1264 int i, j; 1265 struct lio *lio; 1266 1267 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1268 device_lock(&oct->pci_dev->dev); 1269 if (oct->devlink) { 1270 devlink_unregister(oct->devlink); 1271 devlink_free(oct->devlink); 1272 oct->devlink = NULL; 1273 } 1274 device_unlock(&oct->pci_dev->dev); 1275 1276 if (!oct->ifcount) { 1277 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1278 return 1; 1279 } 1280 1281 spin_lock_bh(&oct->cmd_resp_wqlock); 1282 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1283 spin_unlock_bh(&oct->cmd_resp_wqlock); 1284 1285 lio_vf_rep_destroy(oct); 1286 1287 for (i = 0; i < oct->ifcount; i++) { 1288 lio = GET_LIO(oct->props[i].netdev); 1289 for (j = 0; j < oct->num_oqs; j++) 1290 octeon_unregister_droq_ops(oct, 1291 lio->linfo.rxpciq[j].s.q_no); 1292 } 1293 1294 for (i = 0; i < oct->ifcount; i++) 1295 liquidio_destroy_nic_device(oct, i); 1296 1297 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1298 return 0; 1299 } 1300 1301 /** 1302 * liquidio_remove - Cleans up resources at unload time 1303 * @pdev: PCI device structure 1304 */ 1305 static void liquidio_remove(struct pci_dev *pdev) 1306 { 1307 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1308 1309 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1310 1311 if (oct_dev->watchdog_task) 1312 kthread_stop(oct_dev->watchdog_task); 1313 1314 if (!oct_dev->octeon_id && 1315 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) 1316 lio_vf_rep_modexit(); 1317 1318 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1319 liquidio_stop_nic_module(oct_dev); 1320 1321 /* Reset the octeon device and cleanup all memory allocated for 1322 * the octeon device by driver. 1323 */ 1324 octeon_destroy_resources(oct_dev); 1325 1326 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1327 1328 /* This octeon device has been removed. Update the global 1329 * data structure to reflect this. Free the device structure. 1330 */ 1331 octeon_free_device_mem(oct_dev); 1332 } 1333 1334 /** 1335 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space 1336 * @oct: octeon device 1337 */ 1338 static int octeon_chip_specific_setup(struct octeon_device *oct) 1339 { 1340 u32 dev_id, rev_id; 1341 int ret = 1; 1342 1343 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1344 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1345 oct->rev_id = rev_id & 0xff; 1346 1347 switch (dev_id) { 1348 case OCTEON_CN68XX_PCIID: 1349 oct->chip_id = OCTEON_CN68XX; 1350 ret = lio_setup_cn68xx_octeon_device(oct); 1351 break; 1352 1353 case OCTEON_CN66XX_PCIID: 1354 oct->chip_id = OCTEON_CN66XX; 1355 ret = lio_setup_cn66xx_octeon_device(oct); 1356 break; 1357 1358 case OCTEON_CN23XX_PCIID_PF: 1359 oct->chip_id = OCTEON_CN23XX_PF_VID; 1360 ret = setup_cn23xx_octeon_pf_device(oct); 1361 if (ret) 1362 break; 1363 #ifdef CONFIG_PCI_IOV 1364 if (!ret) 1365 pci_sriov_set_totalvfs(oct->pci_dev, 1366 oct->sriov_info.max_vfs); 1367 #endif 1368 break; 1369 1370 default: 1371 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1372 dev_id); 1373 } 1374 1375 return ret; 1376 } 1377 1378 /** 1379 * octeon_pci_os_setup - PCI initialization for each Octeon device. 1380 * @oct: octeon device 1381 */ 1382 static int octeon_pci_os_setup(struct octeon_device *oct) 1383 { 1384 /* setup PCI stuff first */ 1385 if (pci_enable_device(oct->pci_dev)) { 1386 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1387 return 1; 1388 } 1389 1390 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1391 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1392 pci_disable_device(oct->pci_dev); 1393 return 1; 1394 } 1395 1396 /* Enable PCI DMA Master. */ 1397 pci_set_master(oct->pci_dev); 1398 1399 return 0; 1400 } 1401 1402 /** 1403 * free_netbuf - Unmap and free network buffer 1404 * @buf: buffer 1405 */ 1406 static void free_netbuf(void *buf) 1407 { 1408 struct sk_buff *skb; 1409 struct octnet_buf_free_info *finfo; 1410 struct lio *lio; 1411 1412 finfo = (struct octnet_buf_free_info *)buf; 1413 skb = finfo->skb; 1414 lio = finfo->lio; 1415 1416 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1417 DMA_TO_DEVICE); 1418 1419 tx_buffer_free(skb); 1420 } 1421 1422 /** 1423 * free_netsgbuf - Unmap and free gather buffer 1424 * @buf: buffer 1425 */ 1426 static void free_netsgbuf(void *buf) 1427 { 1428 struct octnet_buf_free_info *finfo; 1429 struct sk_buff *skb; 1430 struct lio *lio; 1431 struct octnic_gather *g; 1432 int i, frags, iq; 1433 1434 finfo = (struct octnet_buf_free_info *)buf; 1435 skb = finfo->skb; 1436 lio = finfo->lio; 1437 g = finfo->g; 1438 frags = skb_shinfo(skb)->nr_frags; 1439 1440 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1441 g->sg[0].ptr[0], (skb->len - skb->data_len), 1442 DMA_TO_DEVICE); 1443 1444 i = 1; 1445 while (frags--) { 1446 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1447 1448 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1449 g->sg[(i >> 2)].ptr[(i & 3)], 1450 skb_frag_size(frag), DMA_TO_DEVICE); 1451 i++; 1452 } 1453 1454 iq = skb_iq(lio->oct_dev, skb); 1455 spin_lock(&lio->glist_lock[iq]); 1456 list_add_tail(&g->list, &lio->glist[iq]); 1457 spin_unlock(&lio->glist_lock[iq]); 1458 1459 tx_buffer_free(skb); 1460 } 1461 1462 /** 1463 * free_netsgbuf_with_resp - Unmap and free gather buffer with response 1464 * @buf: buffer 1465 */ 1466 static void free_netsgbuf_with_resp(void *buf) 1467 { 1468 struct octeon_soft_command *sc; 1469 struct octnet_buf_free_info *finfo; 1470 struct sk_buff *skb; 1471 struct lio *lio; 1472 struct octnic_gather *g; 1473 int i, frags, iq; 1474 1475 sc = (struct octeon_soft_command *)buf; 1476 skb = (struct sk_buff *)sc->callback_arg; 1477 finfo = (struct octnet_buf_free_info *)&skb->cb; 1478 1479 lio = finfo->lio; 1480 g = finfo->g; 1481 frags = skb_shinfo(skb)->nr_frags; 1482 1483 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1484 g->sg[0].ptr[0], (skb->len - skb->data_len), 1485 DMA_TO_DEVICE); 1486 1487 i = 1; 1488 while (frags--) { 1489 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1490 1491 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1492 g->sg[(i >> 2)].ptr[(i & 3)], 1493 skb_frag_size(frag), DMA_TO_DEVICE); 1494 i++; 1495 } 1496 1497 iq = skb_iq(lio->oct_dev, skb); 1498 1499 spin_lock(&lio->glist_lock[iq]); 1500 list_add_tail(&g->list, &lio->glist[iq]); 1501 spin_unlock(&lio->glist_lock[iq]); 1502 1503 /* Don't free the skb yet */ 1504 } 1505 1506 /** 1507 * liquidio_ptp_adjfine - Adjust ptp frequency 1508 * @ptp: PTP clock info 1509 * @scaled_ppm: how much to adjust by, in scaled parts-per-million 1510 * 1511 * Scaled parts per million is ppm with a 16-bit binary fractional field. 1512 */ 1513 static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 1514 { 1515 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1516 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1517 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 1518 u64 comp, delta; 1519 unsigned long flags; 1520 bool neg_adj = false; 1521 1522 if (ppb < 0) { 1523 neg_adj = true; 1524 ppb = -ppb; 1525 } 1526 1527 /* The hardware adds the clock compensation value to the 1528 * PTP clock on every coprocessor clock cycle, so we 1529 * compute the delta in terms of coprocessor clocks. 1530 */ 1531 delta = (u64)ppb << 32; 1532 do_div(delta, oct->coproc_clock_rate); 1533 1534 spin_lock_irqsave(&lio->ptp_lock, flags); 1535 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1536 if (neg_adj) 1537 comp -= delta; 1538 else 1539 comp += delta; 1540 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1541 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1542 1543 return 0; 1544 } 1545 1546 /** 1547 * liquidio_ptp_adjtime - Adjust ptp time 1548 * @ptp: PTP clock info 1549 * @delta: how much to adjust by, in nanosecs 1550 */ 1551 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1552 { 1553 unsigned long flags; 1554 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1555 1556 spin_lock_irqsave(&lio->ptp_lock, flags); 1557 lio->ptp_adjust += delta; 1558 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1559 1560 return 0; 1561 } 1562 1563 /** 1564 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment 1565 * @ptp: PTP clock info 1566 * @ts: timespec 1567 */ 1568 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1569 struct timespec64 *ts) 1570 { 1571 u64 ns; 1572 unsigned long flags; 1573 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1574 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1575 1576 spin_lock_irqsave(&lio->ptp_lock, flags); 1577 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1578 ns += lio->ptp_adjust; 1579 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1580 1581 *ts = ns_to_timespec64(ns); 1582 1583 return 0; 1584 } 1585 1586 /** 1587 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment 1588 * @ptp: PTP clock info 1589 * @ts: timespec 1590 */ 1591 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1592 const struct timespec64 *ts) 1593 { 1594 u64 ns; 1595 unsigned long flags; 1596 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1597 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1598 1599 ns = timespec64_to_ns(ts); 1600 1601 spin_lock_irqsave(&lio->ptp_lock, flags); 1602 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1603 lio->ptp_adjust = 0; 1604 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1605 1606 return 0; 1607 } 1608 1609 /** 1610 * liquidio_ptp_enable - Check if PTP is enabled 1611 * @ptp: PTP clock info 1612 * @rq: request 1613 * @on: is it on 1614 */ 1615 static int 1616 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp, 1617 struct ptp_clock_request __maybe_unused *rq, 1618 int __maybe_unused on) 1619 { 1620 return -EOPNOTSUPP; 1621 } 1622 1623 /** 1624 * oct_ptp_open - Open PTP clock source 1625 * @netdev: network device 1626 */ 1627 static void oct_ptp_open(struct net_device *netdev) 1628 { 1629 struct lio *lio = GET_LIO(netdev); 1630 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1631 1632 spin_lock_init(&lio->ptp_lock); 1633 1634 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 1635 lio->ptp_info.owner = THIS_MODULE; 1636 lio->ptp_info.max_adj = 250000000; 1637 lio->ptp_info.n_alarm = 0; 1638 lio->ptp_info.n_ext_ts = 0; 1639 lio->ptp_info.n_per_out = 0; 1640 lio->ptp_info.pps = 0; 1641 lio->ptp_info.adjfine = liquidio_ptp_adjfine; 1642 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 1643 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 1644 lio->ptp_info.settime64 = liquidio_ptp_settime; 1645 lio->ptp_info.enable = liquidio_ptp_enable; 1646 1647 lio->ptp_adjust = 0; 1648 1649 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 1650 &oct->pci_dev->dev); 1651 1652 if (IS_ERR(lio->ptp_clock)) 1653 lio->ptp_clock = NULL; 1654 } 1655 1656 /** 1657 * liquidio_ptp_init - Init PTP clock 1658 * @oct: octeon device 1659 */ 1660 static void liquidio_ptp_init(struct octeon_device *oct) 1661 { 1662 u64 clock_comp, cfg; 1663 1664 clock_comp = (u64)NSEC_PER_SEC << 32; 1665 do_div(clock_comp, oct->coproc_clock_rate); 1666 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1667 1668 /* Enable */ 1669 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 1670 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 1671 } 1672 1673 /** 1674 * load_firmware - Load firmware to device 1675 * @oct: octeon device 1676 * 1677 * Maps device to firmware filename, requests firmware, and downloads it 1678 */ 1679 static int load_firmware(struct octeon_device *oct) 1680 { 1681 int ret = 0; 1682 const struct firmware *fw; 1683 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 1684 char *tmp_fw_type; 1685 1686 if (fw_type_is_auto()) { 1687 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 1688 strscpy_pad(fw_type, tmp_fw_type, sizeof(fw_type)); 1689 } else { 1690 tmp_fw_type = fw_type; 1691 } 1692 1693 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 1694 octeon_get_conf(oct)->card_name, tmp_fw_type, 1695 LIO_FW_NAME_SUFFIX); 1696 1697 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 1698 if (ret) { 1699 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", 1700 fw_name); 1701 release_firmware(fw); 1702 return ret; 1703 } 1704 1705 ret = octeon_download_firmware(oct, fw->data, fw->size); 1706 1707 release_firmware(fw); 1708 1709 return ret; 1710 } 1711 1712 /** 1713 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status 1714 * @work: work_struct data structure 1715 */ 1716 static void octnet_poll_check_txq_status(struct work_struct *work) 1717 { 1718 struct cavium_wk *wk = (struct cavium_wk *)work; 1719 struct lio *lio = (struct lio *)wk->ctxptr; 1720 1721 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 1722 return; 1723 1724 check_txq_status(lio); 1725 queue_delayed_work(lio->txq_status_wq.wq, 1726 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1727 } 1728 1729 /** 1730 * setup_tx_poll_fn - Sets up the txq poll check 1731 * @netdev: network device 1732 */ 1733 static inline int setup_tx_poll_fn(struct net_device *netdev) 1734 { 1735 struct lio *lio = GET_LIO(netdev); 1736 struct octeon_device *oct = lio->oct_dev; 1737 1738 lio->txq_status_wq.wq = alloc_workqueue("txq-status", 1739 WQ_MEM_RECLAIM | WQ_PERCPU, 0); 1740 if (!lio->txq_status_wq.wq) { 1741 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 1742 return -1; 1743 } 1744 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 1745 octnet_poll_check_txq_status); 1746 lio->txq_status_wq.wk.ctxptr = lio; 1747 queue_delayed_work(lio->txq_status_wq.wq, 1748 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1749 return 0; 1750 } 1751 1752 static inline void cleanup_tx_poll_fn(struct net_device *netdev) 1753 { 1754 struct lio *lio = GET_LIO(netdev); 1755 1756 if (lio->txq_status_wq.wq) { 1757 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 1758 destroy_workqueue(lio->txq_status_wq.wq); 1759 } 1760 } 1761 1762 /** 1763 * liquidio_open - Net device open for LiquidIO 1764 * @netdev: network device 1765 */ 1766 static int liquidio_open(struct net_device *netdev) 1767 { 1768 struct lio *lio = GET_LIO(netdev); 1769 struct octeon_device *oct = lio->oct_dev; 1770 struct octeon_device_priv *oct_priv = oct->priv; 1771 struct napi_struct *napi, *n; 1772 int ret = 0; 1773 1774 if (oct->props[lio->ifidx].napi_enabled == 0) { 1775 tasklet_disable(&oct_priv->droq_tasklet); 1776 1777 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1778 napi_enable(napi); 1779 1780 oct->props[lio->ifidx].napi_enabled = 1; 1781 1782 if (OCTEON_CN23XX_PF(oct)) 1783 oct->droq[0]->ops.poll_mode = 1; 1784 } 1785 1786 if (oct->ptp_enable) 1787 oct_ptp_open(netdev); 1788 1789 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1790 1791 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { 1792 ret = setup_tx_poll_fn(netdev); 1793 if (ret) 1794 goto err_poll; 1795 } 1796 1797 netif_tx_start_all_queues(netdev); 1798 1799 /* Ready for link status updates */ 1800 lio->intf_open = 1; 1801 1802 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1803 1804 /* tell Octeon to start forwarding packets to host */ 1805 ret = send_rx_ctrl_cmd(lio, 1); 1806 if (ret) 1807 goto err_rx_ctrl; 1808 1809 /* start periodical statistics fetch */ 1810 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 1811 lio->stats_wk.ctxptr = lio; 1812 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 1813 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 1814 1815 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 1816 netdev->name); 1817 1818 return 0; 1819 1820 err_rx_ctrl: 1821 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) 1822 cleanup_tx_poll_fn(netdev); 1823 err_poll: 1824 if (lio->ptp_clock) { 1825 ptp_clock_unregister(lio->ptp_clock); 1826 lio->ptp_clock = NULL; 1827 } 1828 1829 if (oct->props[lio->ifidx].napi_enabled == 1) { 1830 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1831 napi_disable(napi); 1832 1833 oct->props[lio->ifidx].napi_enabled = 0; 1834 1835 if (OCTEON_CN23XX_PF(oct)) 1836 oct->droq[0]->ops.poll_mode = 0; 1837 } 1838 1839 return ret; 1840 } 1841 1842 /** 1843 * liquidio_stop - Net device stop for LiquidIO 1844 * @netdev: network device 1845 */ 1846 static int liquidio_stop(struct net_device *netdev) 1847 { 1848 struct lio *lio = GET_LIO(netdev); 1849 struct octeon_device *oct = lio->oct_dev; 1850 struct octeon_device_priv *oct_priv = oct->priv; 1851 struct napi_struct *napi, *n; 1852 int ret = 0; 1853 1854 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1855 1856 /* Stop any link updates */ 1857 lio->intf_open = 0; 1858 1859 stop_txqs(netdev); 1860 1861 /* Inform that netif carrier is down */ 1862 netif_carrier_off(netdev); 1863 netif_tx_disable(netdev); 1864 1865 lio->linfo.link.s.link_up = 0; 1866 lio->link_changes++; 1867 1868 /* Tell Octeon that nic interface is down. */ 1869 ret = send_rx_ctrl_cmd(lio, 0); 1870 if (ret) 1871 return ret; 1872 1873 if (OCTEON_CN23XX_PF(oct)) { 1874 if (!oct->msix_on) 1875 cleanup_tx_poll_fn(netdev); 1876 } else { 1877 cleanup_tx_poll_fn(netdev); 1878 } 1879 1880 cancel_delayed_work_sync(&lio->stats_wk.work); 1881 1882 if (lio->ptp_clock) { 1883 ptp_clock_unregister(lio->ptp_clock); 1884 lio->ptp_clock = NULL; 1885 } 1886 1887 /* Wait for any pending Rx descriptors */ 1888 if (lio_wait_for_clean_oq(oct)) 1889 netif_info(lio, rx_err, lio->netdev, 1890 "Proceeding with stop interface after partial RX desc processing\n"); 1891 1892 if (oct->props[lio->ifidx].napi_enabled == 1) { 1893 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1894 napi_disable(napi); 1895 1896 oct->props[lio->ifidx].napi_enabled = 0; 1897 1898 if (OCTEON_CN23XX_PF(oct)) 1899 oct->droq[0]->ops.poll_mode = 0; 1900 1901 tasklet_enable(&oct_priv->droq_tasklet); 1902 } 1903 1904 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1905 1906 return ret; 1907 } 1908 1909 /** 1910 * get_new_flags - Converts a mask based on net device flags 1911 * @netdev: network device 1912 * 1913 * This routine generates a octnet_ifflags mask from the net device flags 1914 * received from the OS. 1915 */ 1916 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 1917 { 1918 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1919 1920 if (netdev->flags & IFF_PROMISC) 1921 f |= OCTNET_IFFLAG_PROMISC; 1922 1923 if (netdev->flags & IFF_ALLMULTI) 1924 f |= OCTNET_IFFLAG_ALLMULTI; 1925 1926 if (netdev->flags & IFF_MULTICAST) { 1927 f |= OCTNET_IFFLAG_MULTICAST; 1928 1929 /* Accept all multicast addresses if there are more than we 1930 * can handle 1931 */ 1932 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1933 f |= OCTNET_IFFLAG_ALLMULTI; 1934 } 1935 1936 if (netdev->flags & IFF_BROADCAST) 1937 f |= OCTNET_IFFLAG_BROADCAST; 1938 1939 return f; 1940 } 1941 1942 /** 1943 * liquidio_set_mcast_list - Net device set_multicast_list 1944 * @netdev: network device 1945 */ 1946 static void liquidio_set_mcast_list(struct net_device *netdev) 1947 { 1948 struct lio *lio = GET_LIO(netdev); 1949 struct octeon_device *oct = lio->oct_dev; 1950 struct octnic_ctrl_pkt nctrl; 1951 struct netdev_hw_addr *ha; 1952 u64 *mc; 1953 int ret; 1954 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1955 1956 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1957 1958 /* Create a ctrl pkt command to be sent to core app. */ 1959 nctrl.ncmd.u64 = 0; 1960 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1961 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1962 nctrl.ncmd.s.param2 = mc_count; 1963 nctrl.ncmd.s.more = mc_count; 1964 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1965 nctrl.netpndev = (u64)netdev; 1966 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1967 1968 /* copy all the addresses into the udd */ 1969 mc = &nctrl.udd[0]; 1970 netdev_for_each_mc_addr(ha, netdev) { 1971 *mc = 0; 1972 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 1973 /* no need to swap bytes */ 1974 1975 if (++mc > &nctrl.udd[mc_count]) 1976 break; 1977 } 1978 1979 /* Apparently, any activity in this call from the kernel has to 1980 * be atomic. So we won't wait for response. 1981 */ 1982 1983 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1984 if (ret) { 1985 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1986 ret); 1987 } 1988 } 1989 1990 /** 1991 * liquidio_set_mac - Net device set_mac_address 1992 * @netdev: network device 1993 * @p: pointer to sockaddr 1994 */ 1995 static int liquidio_set_mac(struct net_device *netdev, void *p) 1996 { 1997 int ret = 0; 1998 struct lio *lio = GET_LIO(netdev); 1999 struct octeon_device *oct = lio->oct_dev; 2000 struct sockaddr *addr = (struct sockaddr *)p; 2001 struct octnic_ctrl_pkt nctrl; 2002 2003 if (!is_valid_ether_addr(addr->sa_data)) 2004 return -EADDRNOTAVAIL; 2005 2006 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2007 2008 nctrl.ncmd.u64 = 0; 2009 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2010 nctrl.ncmd.s.param1 = 0; 2011 nctrl.ncmd.s.more = 1; 2012 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2013 nctrl.netpndev = (u64)netdev; 2014 2015 nctrl.udd[0] = 0; 2016 /* The MAC Address is presented in network byte order. */ 2017 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2018 2019 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2020 if (ret < 0) { 2021 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2022 return -ENOMEM; 2023 } 2024 2025 if (nctrl.sc_status) { 2026 dev_err(&oct->pci_dev->dev, 2027 "%s: MAC Address change failed. sc return=%x\n", 2028 __func__, nctrl.sc_status); 2029 return -EIO; 2030 } 2031 2032 eth_hw_addr_set(netdev, addr->sa_data); 2033 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2034 2035 return 0; 2036 } 2037 2038 static void 2039 liquidio_get_stats64(struct net_device *netdev, 2040 struct rtnl_link_stats64 *lstats) 2041 { 2042 struct lio *lio = GET_LIO(netdev); 2043 struct octeon_device *oct; 2044 u64 pkts = 0, drop = 0, bytes = 0; 2045 struct oct_droq_stats *oq_stats; 2046 struct oct_iq_stats *iq_stats; 2047 int i, iq_no, oq_no; 2048 2049 oct = lio->oct_dev; 2050 2051 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 2052 return; 2053 2054 for (i = 0; i < oct->num_iqs; i++) { 2055 iq_no = lio->linfo.txpciq[i].s.q_no; 2056 iq_stats = &oct->instr_queue[iq_no]->stats; 2057 pkts += iq_stats->tx_done; 2058 drop += iq_stats->tx_dropped; 2059 bytes += iq_stats->tx_tot_bytes; 2060 } 2061 2062 lstats->tx_packets = pkts; 2063 lstats->tx_bytes = bytes; 2064 lstats->tx_dropped = drop; 2065 2066 pkts = 0; 2067 drop = 0; 2068 bytes = 0; 2069 2070 for (i = 0; i < oct->num_oqs; i++) { 2071 oq_no = lio->linfo.rxpciq[i].s.q_no; 2072 oq_stats = &oct->droq[oq_no]->stats; 2073 pkts += oq_stats->rx_pkts_received; 2074 drop += (oq_stats->rx_dropped + 2075 oq_stats->dropped_nodispatch + 2076 oq_stats->dropped_toomany + 2077 oq_stats->dropped_nomem); 2078 bytes += oq_stats->rx_bytes_received; 2079 } 2080 2081 lstats->rx_bytes = bytes; 2082 lstats->rx_packets = pkts; 2083 lstats->rx_dropped = drop; 2084 2085 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 2086 lstats->collisions = oct->link_stats.fromhost.total_collisions; 2087 2088 /* detailed rx_errors: */ 2089 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 2090 /* recved pkt with crc error */ 2091 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 2092 /* recv'd frame alignment error */ 2093 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 2094 /* recv'r fifo overrun */ 2095 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; 2096 2097 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 2098 lstats->rx_frame_errors + lstats->rx_fifo_errors; 2099 2100 /* detailed tx_errors */ 2101 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 2102 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 2103 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; 2104 2105 lstats->tx_errors = lstats->tx_aborted_errors + 2106 lstats->tx_carrier_errors + 2107 lstats->tx_fifo_errors; 2108 } 2109 2110 /** 2111 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl 2112 * @netdev: network device 2113 * @ifr: interface request 2114 */ 2115 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2116 { 2117 struct hwtstamp_config conf; 2118 struct lio *lio = GET_LIO(netdev); 2119 2120 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2121 return -EFAULT; 2122 2123 switch (conf.tx_type) { 2124 case HWTSTAMP_TX_ON: 2125 case HWTSTAMP_TX_OFF: 2126 break; 2127 default: 2128 return -ERANGE; 2129 } 2130 2131 switch (conf.rx_filter) { 2132 case HWTSTAMP_FILTER_NONE: 2133 break; 2134 case HWTSTAMP_FILTER_ALL: 2135 case HWTSTAMP_FILTER_SOME: 2136 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2137 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2138 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2139 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2140 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2141 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2142 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2143 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2144 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2145 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2146 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2147 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2148 case HWTSTAMP_FILTER_NTP_ALL: 2149 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2150 break; 2151 default: 2152 return -ERANGE; 2153 } 2154 2155 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2156 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2157 2158 else 2159 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2160 2161 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2162 } 2163 2164 /** 2165 * liquidio_ioctl - ioctl handler 2166 * @netdev: network device 2167 * @ifr: interface request 2168 * @cmd: command 2169 */ 2170 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2171 { 2172 struct lio *lio = GET_LIO(netdev); 2173 2174 switch (cmd) { 2175 case SIOCSHWTSTAMP: 2176 if (lio->oct_dev->ptp_enable) 2177 return hwtstamp_ioctl(netdev, ifr); 2178 fallthrough; 2179 default: 2180 return -EOPNOTSUPP; 2181 } 2182 } 2183 2184 /** 2185 * handle_timestamp - handle a Tx timestamp response 2186 * @oct: octeon device 2187 * @status: response status 2188 * @buf: pointer to skb 2189 */ 2190 static void handle_timestamp(struct octeon_device *oct, 2191 u32 status, 2192 void *buf) 2193 { 2194 struct octnet_buf_free_info *finfo; 2195 struct octeon_soft_command *sc; 2196 struct oct_timestamp_resp *resp; 2197 struct lio *lio; 2198 struct sk_buff *skb = (struct sk_buff *)buf; 2199 2200 finfo = (struct octnet_buf_free_info *)skb->cb; 2201 lio = finfo->lio; 2202 sc = finfo->sc; 2203 oct = lio->oct_dev; 2204 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2205 2206 if (status != OCTEON_REQUEST_DONE) { 2207 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2208 CVM_CAST64(status)); 2209 resp->timestamp = 0; 2210 } 2211 2212 octeon_swap_8B_data(&resp->timestamp, 1); 2213 2214 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { 2215 struct skb_shared_hwtstamps ts; 2216 u64 ns = resp->timestamp; 2217 2218 netif_info(lio, tx_done, lio->netdev, 2219 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2220 skb, (unsigned long long)ns); 2221 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2222 skb_tstamp_tx(skb, &ts); 2223 } 2224 2225 octeon_free_soft_command(oct, sc); 2226 tx_buffer_free(skb); 2227 } 2228 2229 /** 2230 * send_nic_timestamp_pkt - Send a data packet that will be timestamped 2231 * @oct: octeon device 2232 * @ndata: pointer to network data 2233 * @finfo: pointer to private network data 2234 * @xmit_more: more is coming 2235 */ 2236 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2237 struct octnic_data_pkt *ndata, 2238 struct octnet_buf_free_info *finfo, 2239 int xmit_more) 2240 { 2241 int retval; 2242 struct octeon_soft_command *sc; 2243 struct lio *lio; 2244 int ring_doorbell; 2245 u32 len; 2246 2247 lio = finfo->lio; 2248 2249 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2250 sizeof(struct oct_timestamp_resp)); 2251 finfo->sc = sc; 2252 2253 if (!sc) { 2254 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2255 return IQ_SEND_FAILED; 2256 } 2257 2258 if (ndata->reqtype == REQTYPE_NORESP_NET) 2259 ndata->reqtype = REQTYPE_RESP_NET; 2260 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2261 ndata->reqtype = REQTYPE_RESP_NET_SG; 2262 2263 sc->callback = handle_timestamp; 2264 sc->callback_arg = finfo->skb; 2265 sc->iq_no = ndata->q_no; 2266 2267 if (OCTEON_CN23XX_PF(oct)) 2268 len = (u32)((struct octeon_instr_ih3 *) 2269 (&sc->cmd.cmd3.ih3))->dlengsz; 2270 else 2271 len = (u32)((struct octeon_instr_ih2 *) 2272 (&sc->cmd.cmd2.ih2))->dlengsz; 2273 2274 ring_doorbell = !xmit_more; 2275 2276 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2277 sc, len, ndata->reqtype); 2278 2279 if (retval == IQ_SEND_FAILED) { 2280 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2281 retval); 2282 octeon_free_soft_command(oct, sc); 2283 } else { 2284 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2285 } 2286 2287 return retval; 2288 } 2289 2290 /** 2291 * liquidio_xmit - Transmit networks packets to the Octeon interface 2292 * @skb: skbuff struct to be passed to network layer. 2293 * @netdev: pointer to network device 2294 * 2295 * Return: whether the packet was transmitted to the device okay or not 2296 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2297 */ 2298 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2299 { 2300 struct lio *lio; 2301 struct octnet_buf_free_info *finfo; 2302 union octnic_cmd_setup cmdsetup; 2303 struct octnic_data_pkt ndata; 2304 struct octeon_device *oct; 2305 struct oct_iq_stats *stats; 2306 struct octeon_instr_irh *irh; 2307 union tx_info *tx_info; 2308 int status = 0; 2309 int q_idx = 0, iq_no = 0; 2310 int j, xmit_more = 0; 2311 u64 dptr = 0; 2312 u32 tag = 0; 2313 2314 lio = GET_LIO(netdev); 2315 oct = lio->oct_dev; 2316 2317 q_idx = skb_iq(oct, skb); 2318 tag = q_idx; 2319 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2320 2321 stats = &oct->instr_queue[iq_no]->stats; 2322 2323 /* Check for all conditions in which the current packet cannot be 2324 * transmitted. 2325 */ 2326 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2327 (!lio->linfo.link.s.link_up) || 2328 (skb->len <= 0)) { 2329 netif_info(lio, tx_err, lio->netdev, 2330 "Transmit failed link_status : %d\n", 2331 lio->linfo.link.s.link_up); 2332 goto lio_xmit_failed; 2333 } 2334 2335 /* Use space in skb->cb to store info used to unmap and 2336 * free the buffers. 2337 */ 2338 finfo = (struct octnet_buf_free_info *)skb->cb; 2339 finfo->lio = lio; 2340 finfo->skb = skb; 2341 finfo->sc = NULL; 2342 2343 /* Prepare the attributes for the data to be passed to OSI. */ 2344 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2345 2346 ndata.buf = (void *)finfo; 2347 2348 ndata.q_no = iq_no; 2349 2350 if (octnet_iq_is_full(oct, ndata.q_no)) { 2351 /* defer sending if queue is full */ 2352 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2353 ndata.q_no); 2354 stats->tx_iq_busy++; 2355 return NETDEV_TX_BUSY; 2356 } 2357 2358 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2359 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); 2360 */ 2361 2362 ndata.datasize = skb->len; 2363 2364 cmdsetup.u64 = 0; 2365 cmdsetup.s.iq_no = iq_no; 2366 2367 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2368 if (skb->encapsulation) { 2369 cmdsetup.s.tnl_csum = 1; 2370 stats->tx_vxlan++; 2371 } else { 2372 cmdsetup.s.transport_csum = 1; 2373 } 2374 } 2375 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2376 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2377 cmdsetup.s.timestamp = 1; 2378 } 2379 2380 if (skb_shinfo(skb)->nr_frags == 0) { 2381 cmdsetup.s.u.datasize = skb->len; 2382 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2383 2384 /* Offload checksum calculation for TCP/UDP packets */ 2385 dptr = dma_map_single(&oct->pci_dev->dev, 2386 skb->data, 2387 skb->len, 2388 DMA_TO_DEVICE); 2389 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2390 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2391 __func__); 2392 stats->tx_dmamap_fail++; 2393 return NETDEV_TX_BUSY; 2394 } 2395 2396 if (OCTEON_CN23XX_PF(oct)) 2397 ndata.cmd.cmd3.dptr = dptr; 2398 else 2399 ndata.cmd.cmd2.dptr = dptr; 2400 finfo->dptr = dptr; 2401 ndata.reqtype = REQTYPE_NORESP_NET; 2402 2403 } else { 2404 int i, frags; 2405 skb_frag_t *frag; 2406 struct octnic_gather *g; 2407 2408 spin_lock(&lio->glist_lock[q_idx]); 2409 g = (struct octnic_gather *) 2410 lio_list_delete_head(&lio->glist[q_idx]); 2411 spin_unlock(&lio->glist_lock[q_idx]); 2412 2413 if (!g) { 2414 netif_info(lio, tx_err, lio->netdev, 2415 "Transmit scatter gather: glist null!\n"); 2416 goto lio_xmit_failed; 2417 } 2418 2419 cmdsetup.s.gather = 1; 2420 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2421 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2422 2423 memset(g->sg, 0, g->sg_size); 2424 2425 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2426 skb->data, 2427 (skb->len - skb->data_len), 2428 DMA_TO_DEVICE); 2429 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2430 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2431 __func__); 2432 stats->tx_dmamap_fail++; 2433 return NETDEV_TX_BUSY; 2434 } 2435 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2436 2437 frags = skb_shinfo(skb)->nr_frags; 2438 i = 1; 2439 while (frags--) { 2440 frag = &skb_shinfo(skb)->frags[i - 1]; 2441 2442 g->sg[(i >> 2)].ptr[(i & 3)] = 2443 skb_frag_dma_map(&oct->pci_dev->dev, 2444 frag, 0, skb_frag_size(frag), 2445 DMA_TO_DEVICE); 2446 2447 if (dma_mapping_error(&oct->pci_dev->dev, 2448 g->sg[i >> 2].ptr[i & 3])) { 2449 dma_unmap_single(&oct->pci_dev->dev, 2450 g->sg[0].ptr[0], 2451 skb->len - skb->data_len, 2452 DMA_TO_DEVICE); 2453 for (j = 1; j < i; j++) { 2454 frag = &skb_shinfo(skb)->frags[j - 1]; 2455 dma_unmap_page(&oct->pci_dev->dev, 2456 g->sg[j >> 2].ptr[j & 3], 2457 skb_frag_size(frag), 2458 DMA_TO_DEVICE); 2459 } 2460 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2461 __func__); 2462 return NETDEV_TX_BUSY; 2463 } 2464 2465 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 2466 (i & 3)); 2467 i++; 2468 } 2469 2470 dptr = g->sg_dma_ptr; 2471 2472 if (OCTEON_CN23XX_PF(oct)) 2473 ndata.cmd.cmd3.dptr = dptr; 2474 else 2475 ndata.cmd.cmd2.dptr = dptr; 2476 finfo->dptr = dptr; 2477 finfo->g = g; 2478 2479 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2480 } 2481 2482 if (OCTEON_CN23XX_PF(oct)) { 2483 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2484 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2485 } else { 2486 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; 2487 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; 2488 } 2489 2490 if (skb_shinfo(skb)->gso_size) { 2491 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2492 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2493 stats->tx_gso++; 2494 } 2495 2496 /* HW insert VLAN tag */ 2497 if (skb_vlan_tag_present(skb)) { 2498 irh->priority = skb_vlan_tag_get(skb) >> 13; 2499 irh->vlan = skb_vlan_tag_get(skb) & 0xfff; 2500 } 2501 2502 xmit_more = netdev_xmit_more(); 2503 2504 if (unlikely(cmdsetup.s.timestamp)) 2505 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2506 else 2507 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2508 if (status == IQ_SEND_FAILED) 2509 goto lio_xmit_failed; 2510 2511 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2512 2513 if (status == IQ_SEND_STOP) 2514 netif_stop_subqueue(netdev, q_idx); 2515 2516 netif_trans_update(netdev); 2517 2518 if (tx_info->s.gso_segs) 2519 stats->tx_done += tx_info->s.gso_segs; 2520 else 2521 stats->tx_done++; 2522 stats->tx_tot_bytes += ndata.datasize; 2523 2524 return NETDEV_TX_OK; 2525 2526 lio_xmit_failed: 2527 stats->tx_dropped++; 2528 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2529 iq_no, stats->tx_dropped); 2530 if (dptr) 2531 dma_unmap_single(&oct->pci_dev->dev, dptr, 2532 ndata.datasize, DMA_TO_DEVICE); 2533 2534 octeon_ring_doorbell_locked(oct, iq_no); 2535 2536 tx_buffer_free(skb); 2537 return NETDEV_TX_OK; 2538 } 2539 2540 /** 2541 * liquidio_tx_timeout - Network device Tx timeout 2542 * @netdev: pointer to network device 2543 * @txqueue: index of the hung transmit queue 2544 */ 2545 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 2546 { 2547 struct lio *lio; 2548 2549 lio = GET_LIO(netdev); 2550 2551 netif_info(lio, tx_err, lio->netdev, 2552 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2553 netdev->stats.tx_dropped); 2554 netif_trans_update(netdev); 2555 wake_txqs(netdev); 2556 } 2557 2558 static int liquidio_vlan_rx_add_vid(struct net_device *netdev, 2559 __be16 proto __attribute__((unused)), 2560 u16 vid) 2561 { 2562 struct lio *lio = GET_LIO(netdev); 2563 struct octeon_device *oct = lio->oct_dev; 2564 struct octnic_ctrl_pkt nctrl; 2565 int ret = 0; 2566 2567 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2568 2569 nctrl.ncmd.u64 = 0; 2570 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2571 nctrl.ncmd.s.param1 = vid; 2572 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2573 nctrl.netpndev = (u64)netdev; 2574 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2575 2576 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2577 if (ret) { 2578 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2579 ret); 2580 if (ret > 0) 2581 ret = -EIO; 2582 } 2583 2584 return ret; 2585 } 2586 2587 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2588 __be16 proto __attribute__((unused)), 2589 u16 vid) 2590 { 2591 struct lio *lio = GET_LIO(netdev); 2592 struct octeon_device *oct = lio->oct_dev; 2593 struct octnic_ctrl_pkt nctrl; 2594 int ret = 0; 2595 2596 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2597 2598 nctrl.ncmd.u64 = 0; 2599 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2600 nctrl.ncmd.s.param1 = vid; 2601 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2602 nctrl.netpndev = (u64)netdev; 2603 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2604 2605 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2606 if (ret) { 2607 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 2608 ret); 2609 if (ret > 0) 2610 ret = -EIO; 2611 } 2612 return ret; 2613 } 2614 2615 /** 2616 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload 2617 * @netdev: pointer to network device 2618 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL 2619 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE 2620 * Returns: SUCCESS or FAILURE 2621 */ 2622 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2623 u8 rx_cmd) 2624 { 2625 struct lio *lio = GET_LIO(netdev); 2626 struct octeon_device *oct = lio->oct_dev; 2627 struct octnic_ctrl_pkt nctrl; 2628 int ret = 0; 2629 2630 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2631 2632 nctrl.ncmd.u64 = 0; 2633 nctrl.ncmd.s.cmd = command; 2634 nctrl.ncmd.s.param1 = rx_cmd; 2635 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2636 nctrl.netpndev = (u64)netdev; 2637 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2638 2639 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2640 if (ret) { 2641 dev_err(&oct->pci_dev->dev, 2642 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 2643 ret); 2644 if (ret > 0) 2645 ret = -EIO; 2646 } 2647 return ret; 2648 } 2649 2650 /** 2651 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware 2652 * @netdev: pointer to network device 2653 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG 2654 * @vxlan_port: VxLAN port to be added or deleted 2655 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD, 2656 * OCTNET_CMD_VXLAN_PORT_DEL 2657 * Return: SUCCESS or FAILURE 2658 */ 2659 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2660 u16 vxlan_port, u8 vxlan_cmd_bit) 2661 { 2662 struct lio *lio = GET_LIO(netdev); 2663 struct octeon_device *oct = lio->oct_dev; 2664 struct octnic_ctrl_pkt nctrl; 2665 int ret = 0; 2666 2667 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2668 2669 nctrl.ncmd.u64 = 0; 2670 nctrl.ncmd.s.cmd = command; 2671 nctrl.ncmd.s.more = vxlan_cmd_bit; 2672 nctrl.ncmd.s.param1 = vxlan_port; 2673 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2674 nctrl.netpndev = (u64)netdev; 2675 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2676 2677 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2678 if (ret) { 2679 dev_err(&oct->pci_dev->dev, 2680 "VxLAN port add/delete failed in core (ret:0x%x)\n", 2681 ret); 2682 if (ret > 0) 2683 ret = -EIO; 2684 } 2685 return ret; 2686 } 2687 2688 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 2689 unsigned int table, unsigned int entry, 2690 struct udp_tunnel_info *ti) 2691 { 2692 return liquidio_vxlan_port_command(netdev, 2693 OCTNET_CMD_VXLAN_PORT_CONFIG, 2694 htons(ti->port), 2695 OCTNET_CMD_VXLAN_PORT_ADD); 2696 } 2697 2698 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 2699 unsigned int table, 2700 unsigned int entry, 2701 struct udp_tunnel_info *ti) 2702 { 2703 return liquidio_vxlan_port_command(netdev, 2704 OCTNET_CMD_VXLAN_PORT_CONFIG, 2705 htons(ti->port), 2706 OCTNET_CMD_VXLAN_PORT_DEL); 2707 } 2708 2709 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 2710 .set_port = liquidio_udp_tunnel_set_port, 2711 .unset_port = liquidio_udp_tunnel_unset_port, 2712 .tables = { 2713 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 2714 }, 2715 }; 2716 2717 /** 2718 * liquidio_fix_features - Net device fix features 2719 * @netdev: pointer to network device 2720 * @request: features requested 2721 * Return: updated features list 2722 */ 2723 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2724 netdev_features_t request) 2725 { 2726 struct lio *lio = netdev_priv(netdev); 2727 2728 if ((request & NETIF_F_RXCSUM) && 2729 !(lio->dev_capability & NETIF_F_RXCSUM)) 2730 request &= ~NETIF_F_RXCSUM; 2731 2732 if ((request & NETIF_F_HW_CSUM) && 2733 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2734 request &= ~NETIF_F_HW_CSUM; 2735 2736 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2737 request &= ~NETIF_F_TSO; 2738 2739 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2740 request &= ~NETIF_F_TSO6; 2741 2742 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2743 request &= ~NETIF_F_LRO; 2744 2745 /*Disable LRO if RXCSUM is off */ 2746 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2747 (lio->dev_capability & NETIF_F_LRO)) 2748 request &= ~NETIF_F_LRO; 2749 2750 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && 2751 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) 2752 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2753 2754 return request; 2755 } 2756 2757 /** 2758 * liquidio_set_features - Net device set features 2759 * @netdev: pointer to network device 2760 * @features: features to enable/disable 2761 */ 2762 static int liquidio_set_features(struct net_device *netdev, 2763 netdev_features_t features) 2764 { 2765 struct lio *lio = netdev_priv(netdev); 2766 2767 if ((features & NETIF_F_LRO) && 2768 (lio->dev_capability & NETIF_F_LRO) && 2769 !(netdev->features & NETIF_F_LRO)) 2770 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2771 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2772 else if (!(features & NETIF_F_LRO) && 2773 (lio->dev_capability & NETIF_F_LRO) && 2774 (netdev->features & NETIF_F_LRO)) 2775 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2776 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2777 2778 /* Sending command to firmware to enable/disable RX checksum 2779 * offload settings using ethtool 2780 */ 2781 if (!(netdev->features & NETIF_F_RXCSUM) && 2782 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2783 (features & NETIF_F_RXCSUM)) 2784 liquidio_set_rxcsum_command(netdev, 2785 OCTNET_CMD_TNL_RX_CSUM_CTL, 2786 OCTNET_CMD_RXCSUM_ENABLE); 2787 else if ((netdev->features & NETIF_F_RXCSUM) && 2788 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2789 !(features & NETIF_F_RXCSUM)) 2790 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2791 OCTNET_CMD_RXCSUM_DISABLE); 2792 2793 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2794 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2795 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2796 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2797 OCTNET_CMD_VLAN_FILTER_ENABLE); 2798 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2799 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2800 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2801 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2802 OCTNET_CMD_VLAN_FILTER_DISABLE); 2803 2804 return 0; 2805 } 2806 2807 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, 2808 u8 *mac, bool is_admin_assigned) 2809 { 2810 struct lio *lio = GET_LIO(netdev); 2811 struct octeon_device *oct = lio->oct_dev; 2812 struct octnic_ctrl_pkt nctrl; 2813 int ret = 0; 2814 2815 if (!is_valid_ether_addr(mac)) 2816 return -EINVAL; 2817 2818 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) 2819 return -EINVAL; 2820 2821 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2822 2823 nctrl.ncmd.u64 = 0; 2824 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2825 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 2826 nctrl.ncmd.s.param1 = vfidx + 1; 2827 nctrl.ncmd.s.more = 1; 2828 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2829 nctrl.netpndev = (u64)netdev; 2830 if (is_admin_assigned) { 2831 nctrl.ncmd.s.param2 = true; 2832 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2833 } 2834 2835 nctrl.udd[0] = 0; 2836 /* The MAC Address is presented in network byte order. */ 2837 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); 2838 2839 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; 2840 2841 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2842 if (ret > 0) 2843 ret = -EIO; 2844 2845 return ret; 2846 } 2847 2848 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) 2849 { 2850 struct lio *lio = GET_LIO(netdev); 2851 struct octeon_device *oct = lio->oct_dev; 2852 int retval; 2853 2854 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2855 return -EINVAL; 2856 2857 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); 2858 if (!retval) 2859 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); 2860 2861 return retval; 2862 } 2863 2864 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, 2865 bool enable) 2866 { 2867 struct lio *lio = GET_LIO(netdev); 2868 struct octeon_device *oct = lio->oct_dev; 2869 struct octnic_ctrl_pkt nctrl; 2870 int retval; 2871 2872 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { 2873 netif_info(lio, drv, lio->netdev, 2874 "firmware does not support spoofchk\n"); 2875 return -EOPNOTSUPP; 2876 } 2877 2878 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 2879 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 2880 return -EINVAL; 2881 } 2882 2883 if (enable) { 2884 if (oct->sriov_info.vf_spoofchk[vfidx]) 2885 return 0; 2886 } else { 2887 /* Clear */ 2888 if (!oct->sriov_info.vf_spoofchk[vfidx]) 2889 return 0; 2890 } 2891 2892 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2893 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; 2894 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; 2895 nctrl.ncmd.s.param1 = 2896 vfidx + 1; /* vfidx is 0 based, 2897 * but vf_num (param1) is 1 based 2898 */ 2899 nctrl.ncmd.s.param2 = enable; 2900 nctrl.ncmd.s.more = 0; 2901 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2902 nctrl.cb_fn = NULL; 2903 2904 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2905 2906 if (retval) { 2907 netif_info(lio, drv, lio->netdev, 2908 "Failed to set VF %d spoofchk %s\n", vfidx, 2909 enable ? "on" : "off"); 2910 return -1; 2911 } 2912 2913 oct->sriov_info.vf_spoofchk[vfidx] = enable; 2914 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, 2915 enable ? "on" : "off"); 2916 2917 return 0; 2918 } 2919 2920 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, 2921 u16 vlan, u8 qos, __be16 vlan_proto) 2922 { 2923 struct lio *lio = GET_LIO(netdev); 2924 struct octeon_device *oct = lio->oct_dev; 2925 struct octnic_ctrl_pkt nctrl; 2926 u16 vlantci; 2927 int ret = 0; 2928 2929 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2930 return -EINVAL; 2931 2932 if (vlan_proto != htons(ETH_P_8021Q)) 2933 return -EPROTONOSUPPORT; 2934 2935 if (vlan >= VLAN_N_VID || qos > 7) 2936 return -EINVAL; 2937 2938 if (vlan) 2939 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; 2940 else 2941 vlantci = 0; 2942 2943 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) 2944 return 0; 2945 2946 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2947 2948 if (vlan) 2949 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2950 else 2951 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2952 2953 nctrl.ncmd.s.param1 = vlantci; 2954 nctrl.ncmd.s.param2 = 2955 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ 2956 nctrl.ncmd.s.more = 0; 2957 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2958 nctrl.cb_fn = NULL; 2959 2960 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2961 if (ret) { 2962 if (ret > 0) 2963 ret = -EIO; 2964 return ret; 2965 } 2966 2967 oct->sriov_info.vf_vlantci[vfidx] = vlantci; 2968 2969 return ret; 2970 } 2971 2972 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, 2973 struct ifla_vf_info *ivi) 2974 { 2975 struct lio *lio = GET_LIO(netdev); 2976 struct octeon_device *oct = lio->oct_dev; 2977 u8 *macaddr; 2978 2979 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2980 return -EINVAL; 2981 2982 memset(ivi, 0, sizeof(struct ifla_vf_info)); 2983 2984 ivi->vf = vfidx; 2985 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; 2986 ether_addr_copy(&ivi->mac[0], macaddr); 2987 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; 2988 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; 2989 if (oct->sriov_info.trusted_vf.active && 2990 oct->sriov_info.trusted_vf.id == vfidx) 2991 ivi->trusted = true; 2992 else 2993 ivi->trusted = false; 2994 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; 2995 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; 2996 ivi->max_tx_rate = lio->linfo.link.s.speed; 2997 ivi->min_tx_rate = 0; 2998 2999 return 0; 3000 } 3001 3002 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) 3003 { 3004 struct octeon_device *oct = lio->oct_dev; 3005 struct octeon_soft_command *sc; 3006 int retval; 3007 3008 sc = octeon_alloc_soft_command(oct, 0, 16, 0); 3009 if (!sc) 3010 return -ENOMEM; 3011 3012 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 3013 3014 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3015 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 3016 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, 3017 trusted); 3018 3019 init_completion(&sc->complete); 3020 sc->sc_status = OCTEON_REQUEST_PENDING; 3021 3022 retval = octeon_send_soft_command(oct, sc); 3023 if (retval == IQ_SEND_FAILED) { 3024 octeon_free_soft_command(oct, sc); 3025 retval = -1; 3026 } else { 3027 /* Wait for response or timeout */ 3028 retval = wait_for_sc_completion_timeout(oct, sc, 0); 3029 if (retval) 3030 return (retval); 3031 3032 WRITE_ONCE(sc->caller_is_done, true); 3033 } 3034 3035 return retval; 3036 } 3037 3038 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, 3039 bool setting) 3040 { 3041 struct lio *lio = GET_LIO(netdev); 3042 struct octeon_device *oct = lio->oct_dev; 3043 3044 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) { 3045 /* trusted vf is not supported by firmware older than 1.7.1 */ 3046 return -EOPNOTSUPP; 3047 } 3048 3049 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 3050 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 3051 return -EINVAL; 3052 } 3053 3054 if (setting) { 3055 /* Set */ 3056 3057 if (oct->sriov_info.trusted_vf.active && 3058 oct->sriov_info.trusted_vf.id == vfidx) 3059 return 0; 3060 3061 if (oct->sriov_info.trusted_vf.active) { 3062 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n"); 3063 return -EPERM; 3064 } 3065 } else { 3066 /* Clear */ 3067 3068 if (!oct->sriov_info.trusted_vf.active) 3069 return 0; 3070 } 3071 3072 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) { 3073 if (setting) { 3074 oct->sriov_info.trusted_vf.id = vfidx; 3075 oct->sriov_info.trusted_vf.active = true; 3076 } else { 3077 oct->sriov_info.trusted_vf.active = false; 3078 } 3079 3080 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx, 3081 setting ? "" : "not "); 3082 } else { 3083 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n"); 3084 return -1; 3085 } 3086 3087 return 0; 3088 } 3089 3090 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 3091 int linkstate) 3092 { 3093 struct lio *lio = GET_LIO(netdev); 3094 struct octeon_device *oct = lio->oct_dev; 3095 struct octnic_ctrl_pkt nctrl; 3096 int ret = 0; 3097 3098 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3099 return -EINVAL; 3100 3101 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) 3102 return 0; 3103 3104 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3105 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; 3106 nctrl.ncmd.s.param1 = 3107 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3108 nctrl.ncmd.s.param2 = linkstate; 3109 nctrl.ncmd.s.more = 0; 3110 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3111 nctrl.cb_fn = NULL; 3112 3113 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 3114 3115 if (!ret) 3116 oct->sriov_info.vf_linkstate[vfidx] = linkstate; 3117 else if (ret > 0) 3118 ret = -EIO; 3119 3120 return ret; 3121 } 3122 3123 static int 3124 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3125 { 3126 struct lio_devlink_priv *priv; 3127 struct octeon_device *oct; 3128 3129 priv = devlink_priv(devlink); 3130 oct = priv->oct; 3131 3132 *mode = oct->eswitch_mode; 3133 3134 return 0; 3135 } 3136 3137 static int 3138 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, 3139 struct netlink_ext_ack *extack) 3140 { 3141 struct lio_devlink_priv *priv; 3142 struct octeon_device *oct; 3143 int ret = 0; 3144 3145 priv = devlink_priv(devlink); 3146 oct = priv->oct; 3147 3148 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) 3149 return -EINVAL; 3150 3151 if (oct->eswitch_mode == mode) 3152 return 0; 3153 3154 switch (mode) { 3155 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3156 oct->eswitch_mode = mode; 3157 ret = lio_vf_rep_create(oct); 3158 break; 3159 3160 case DEVLINK_ESWITCH_MODE_LEGACY: 3161 lio_vf_rep_destroy(oct); 3162 oct->eswitch_mode = mode; 3163 break; 3164 3165 default: 3166 ret = -EINVAL; 3167 } 3168 3169 return ret; 3170 } 3171 3172 static const struct devlink_ops liquidio_devlink_ops = { 3173 .eswitch_mode_get = liquidio_eswitch_mode_get, 3174 .eswitch_mode_set = liquidio_eswitch_mode_set, 3175 }; 3176 3177 static int 3178 liquidio_get_port_parent_id(struct net_device *dev, 3179 struct netdev_phys_item_id *ppid) 3180 { 3181 struct lio *lio = GET_LIO(dev); 3182 struct octeon_device *oct = lio->oct_dev; 3183 3184 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 3185 return -EOPNOTSUPP; 3186 3187 ppid->id_len = ETH_ALEN; 3188 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); 3189 3190 return 0; 3191 } 3192 3193 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, 3194 struct ifla_vf_stats *vf_stats) 3195 { 3196 struct lio *lio = GET_LIO(netdev); 3197 struct octeon_device *oct = lio->oct_dev; 3198 struct oct_vf_stats stats; 3199 int ret; 3200 3201 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3202 return -EINVAL; 3203 3204 memset(&stats, 0, sizeof(struct oct_vf_stats)); 3205 ret = cn23xx_get_vf_stats(oct, vfidx, &stats); 3206 if (!ret) { 3207 vf_stats->rx_packets = stats.rx_packets; 3208 vf_stats->tx_packets = stats.tx_packets; 3209 vf_stats->rx_bytes = stats.rx_bytes; 3210 vf_stats->tx_bytes = stats.tx_bytes; 3211 vf_stats->broadcast = stats.broadcast; 3212 vf_stats->multicast = stats.multicast; 3213 } 3214 3215 return ret; 3216 } 3217 3218 static const struct net_device_ops lionetdevops = { 3219 .ndo_open = liquidio_open, 3220 .ndo_stop = liquidio_stop, 3221 .ndo_start_xmit = liquidio_xmit, 3222 .ndo_get_stats64 = liquidio_get_stats64, 3223 .ndo_set_mac_address = liquidio_set_mac, 3224 .ndo_set_rx_mode = liquidio_set_mcast_list, 3225 .ndo_tx_timeout = liquidio_tx_timeout, 3226 3227 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 3228 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 3229 .ndo_change_mtu = liquidio_change_mtu, 3230 .ndo_eth_ioctl = liquidio_ioctl, 3231 .ndo_fix_features = liquidio_fix_features, 3232 .ndo_set_features = liquidio_set_features, 3233 .ndo_set_vf_mac = liquidio_set_vf_mac, 3234 .ndo_set_vf_vlan = liquidio_set_vf_vlan, 3235 .ndo_get_vf_config = liquidio_get_vf_config, 3236 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, 3237 .ndo_set_vf_trust = liquidio_set_vf_trust, 3238 .ndo_set_vf_link_state = liquidio_set_vf_link_state, 3239 .ndo_get_vf_stats = liquidio_get_vf_stats, 3240 .ndo_get_port_parent_id = liquidio_get_port_parent_id, 3241 }; 3242 3243 /** 3244 * liquidio_init - Entry point for the liquidio module 3245 */ 3246 static int __init liquidio_init(void) 3247 { 3248 int i; 3249 struct handshake *hs; 3250 3251 init_completion(&first_stage); 3252 3253 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); 3254 3255 if (liquidio_init_pci()) 3256 return -EINVAL; 3257 3258 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3259 3260 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3261 hs = &handshake[i]; 3262 if (hs->pci_dev) { 3263 wait_for_completion(&hs->init); 3264 if (!hs->init_ok) { 3265 /* init handshake failed */ 3266 dev_err(&hs->pci_dev->dev, 3267 "Failed to init device\n"); 3268 liquidio_deinit_pci(); 3269 return -EIO; 3270 } 3271 } 3272 } 3273 3274 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3275 hs = &handshake[i]; 3276 if (hs->pci_dev) { 3277 wait_for_completion_timeout(&hs->started, 3278 msecs_to_jiffies(30000)); 3279 if (!hs->started_ok) { 3280 /* starter handshake failed */ 3281 dev_err(&hs->pci_dev->dev, 3282 "Firmware failed to start\n"); 3283 liquidio_deinit_pci(); 3284 return -EIO; 3285 } 3286 } 3287 } 3288 3289 return 0; 3290 } 3291 3292 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3293 { 3294 struct octeon_device *oct = (struct octeon_device *)buf; 3295 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3296 int gmxport = 0; 3297 union oct_link_status *ls; 3298 int i; 3299 3300 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 3301 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3302 recv_pkt->buffer_size[0], 3303 recv_pkt->rh.r_nic_info.gmxport); 3304 goto nic_info_err; 3305 } 3306 3307 gmxport = recv_pkt->rh.r_nic_info.gmxport; 3308 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 3309 OCT_DROQ_INFO_SIZE); 3310 3311 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3312 for (i = 0; i < oct->ifcount; i++) { 3313 if (oct->props[i].gmxport == gmxport) { 3314 update_link_status(oct->props[i].netdev, ls); 3315 break; 3316 } 3317 } 3318 3319 nic_info_err: 3320 for (i = 0; i < recv_pkt->buffer_count; i++) 3321 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3322 octeon_free_recv_info(recv_info); 3323 return 0; 3324 } 3325 3326 /** 3327 * setup_nic_devices - Setup network interfaces 3328 * @octeon_dev: octeon device 3329 * 3330 * Called during init time for each device. It assumes the NIC 3331 * is already up and running. The link information for each 3332 * interface is passed in link_info. 3333 */ 3334 static int setup_nic_devices(struct octeon_device *octeon_dev) 3335 { 3336 struct lio *lio = NULL; 3337 struct net_device *netdev; 3338 u8 mac[6], i, j, *fw_ver, *micro_ver; 3339 unsigned long micro; 3340 u32 cur_ver; 3341 struct octeon_soft_command *sc; 3342 struct liquidio_if_cfg_resp *resp; 3343 struct octdev_props *props; 3344 int retval, num_iqueues, num_oqueues; 3345 int max_num_queues = 0; 3346 union oct_nic_if_cfg if_cfg; 3347 unsigned int base_queue; 3348 unsigned int gmx_port_id; 3349 u32 resp_size, data_size; 3350 u32 ifidx_or_pfnum; 3351 struct lio_version *vdata; 3352 struct devlink *devlink; 3353 struct lio_devlink_priv *lio_devlink; 3354 3355 /* This is to handle link status changes */ 3356 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3357 OPCODE_NIC_INFO, 3358 lio_nic_info, octeon_dev); 3359 3360 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3361 * They are handled directly. 3362 */ 3363 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3364 free_netbuf); 3365 3366 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3367 free_netsgbuf); 3368 3369 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3370 free_netsgbuf_with_resp); 3371 3372 for (i = 0; i < octeon_dev->ifcount; i++) { 3373 resp_size = sizeof(struct liquidio_if_cfg_resp); 3374 data_size = sizeof(struct lio_version); 3375 sc = (struct octeon_soft_command *) 3376 octeon_alloc_soft_command(octeon_dev, data_size, 3377 resp_size, 0); 3378 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3379 vdata = (struct lio_version *)sc->virtdptr; 3380 3381 *((u64 *)vdata) = 0; 3382 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 3383 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 3384 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 3385 3386 if (OCTEON_CN23XX_PF(octeon_dev)) { 3387 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 3388 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 3389 base_queue = octeon_dev->sriov_info.pf_srn; 3390 3391 gmx_port_id = octeon_dev->pf_num; 3392 ifidx_or_pfnum = octeon_dev->pf_num; 3393 } else { 3394 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( 3395 octeon_get_conf(octeon_dev), i); 3396 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( 3397 octeon_get_conf(octeon_dev), i); 3398 base_queue = CFG_GET_BASE_QUE_NIC_IF( 3399 octeon_get_conf(octeon_dev), i); 3400 gmx_port_id = CFG_GET_GMXID_NIC_IF( 3401 octeon_get_conf(octeon_dev), i); 3402 ifidx_or_pfnum = i; 3403 } 3404 3405 dev_dbg(&octeon_dev->pci_dev->dev, 3406 "requesting config for interface %d, iqs %d, oqs %d\n", 3407 ifidx_or_pfnum, num_iqueues, num_oqueues); 3408 3409 if_cfg.u64 = 0; 3410 if_cfg.s.num_iqueues = num_iqueues; 3411 if_cfg.s.num_oqueues = num_oqueues; 3412 if_cfg.s.base_queue = base_queue; 3413 if_cfg.s.gmx_port_id = gmx_port_id; 3414 3415 sc->iq_no = 0; 3416 3417 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3418 OPCODE_NIC_IF_CFG, 0, 3419 if_cfg.u64, 0); 3420 3421 init_completion(&sc->complete); 3422 sc->sc_status = OCTEON_REQUEST_PENDING; 3423 3424 retval = octeon_send_soft_command(octeon_dev, sc); 3425 if (retval == IQ_SEND_FAILED) { 3426 dev_err(&octeon_dev->pci_dev->dev, 3427 "iq/oq config failed status: %x\n", 3428 retval); 3429 /* Soft instr is freed by driver in case of failure. */ 3430 octeon_free_soft_command(octeon_dev, sc); 3431 return(-EIO); 3432 } 3433 3434 /* Sleep on a wait queue till the cond flag indicates that the 3435 * response arrived or timed-out. 3436 */ 3437 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 3438 if (retval) 3439 return retval; 3440 3441 retval = resp->status; 3442 if (retval) { 3443 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3444 WRITE_ONCE(sc->caller_is_done, true); 3445 goto setup_nic_dev_done; 3446 } 3447 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 3448 32, "%s", 3449 resp->cfg_info.liquidio_firmware_version); 3450 3451 /* Verify f/w version (in case of 'auto' loading from flash) */ 3452 fw_ver = octeon_dev->fw_info.liquidio_firmware_version; 3453 if (memcmp(LIQUIDIO_BASE_VERSION, 3454 fw_ver, 3455 strlen(LIQUIDIO_BASE_VERSION))) { 3456 dev_err(&octeon_dev->pci_dev->dev, 3457 "Unmatched firmware version. Expected %s.x, got %s.\n", 3458 LIQUIDIO_BASE_VERSION, fw_ver); 3459 WRITE_ONCE(sc->caller_is_done, true); 3460 goto setup_nic_dev_done; 3461 } else if (atomic_read(octeon_dev->adapter_fw_state) == 3462 FW_IS_PRELOADED) { 3463 dev_info(&octeon_dev->pci_dev->dev, 3464 "Using auto-loaded firmware version %s.\n", 3465 fw_ver); 3466 } 3467 3468 /* extract micro version field; point past '<maj>.<min>.' */ 3469 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; 3470 if (kstrtoul(micro_ver, 10, µ) != 0) 3471 micro = 0; 3472 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; 3473 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; 3474 octeon_dev->fw_info.ver.rev = micro; 3475 3476 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3477 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3478 3479 num_iqueues = hweight64(resp->cfg_info.iqmask); 3480 num_oqueues = hweight64(resp->cfg_info.oqmask); 3481 3482 if (!(num_iqueues) || !(num_oqueues)) { 3483 dev_err(&octeon_dev->pci_dev->dev, 3484 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3485 resp->cfg_info.iqmask, 3486 resp->cfg_info.oqmask); 3487 WRITE_ONCE(sc->caller_is_done, true); 3488 goto setup_nic_dev_done; 3489 } 3490 3491 if (OCTEON_CN6XXX(octeon_dev)) { 3492 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3493 cn6xxx)); 3494 } else if (OCTEON_CN23XX_PF(octeon_dev)) { 3495 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3496 cn23xx_pf)); 3497 } 3498 3499 dev_dbg(&octeon_dev->pci_dev->dev, 3500 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", 3501 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3502 num_iqueues, num_oqueues, max_num_queues); 3503 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); 3504 3505 if (!netdev) { 3506 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3507 WRITE_ONCE(sc->caller_is_done, true); 3508 goto setup_nic_dev_done; 3509 } 3510 3511 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 3512 3513 /* Associate the routines that will handle different 3514 * netdev tasks. 3515 */ 3516 netdev->netdev_ops = &lionetdevops; 3517 3518 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3519 if (retval) { 3520 dev_err(&octeon_dev->pci_dev->dev, 3521 "setting real number rx failed\n"); 3522 WRITE_ONCE(sc->caller_is_done, true); 3523 goto setup_nic_dev_free; 3524 } 3525 3526 retval = netif_set_real_num_tx_queues(netdev, num_iqueues); 3527 if (retval) { 3528 dev_err(&octeon_dev->pci_dev->dev, 3529 "setting real number tx failed\n"); 3530 WRITE_ONCE(sc->caller_is_done, true); 3531 goto setup_nic_dev_free; 3532 } 3533 3534 lio = GET_LIO(netdev); 3535 3536 memset(lio, 0, sizeof(struct lio)); 3537 3538 lio->ifidx = ifidx_or_pfnum; 3539 3540 props = &octeon_dev->props[i]; 3541 props->gmxport = resp->cfg_info.linfo.gmxport; 3542 props->netdev = netdev; 3543 3544 lio->linfo.num_rxpciq = num_oqueues; 3545 lio->linfo.num_txpciq = num_iqueues; 3546 for (j = 0; j < num_oqueues; j++) { 3547 lio->linfo.rxpciq[j].u64 = 3548 resp->cfg_info.linfo.rxpciq[j].u64; 3549 } 3550 for (j = 0; j < num_iqueues; j++) { 3551 lio->linfo.txpciq[j].u64 = 3552 resp->cfg_info.linfo.txpciq[j].u64; 3553 } 3554 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3555 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3556 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3557 3558 WRITE_ONCE(sc->caller_is_done, true); 3559 3560 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3561 3562 if (OCTEON_CN23XX_PF(octeon_dev) || 3563 OCTEON_CN6XXX(octeon_dev)) { 3564 lio->dev_capability = NETIF_F_HIGHDMA 3565 | NETIF_F_IP_CSUM 3566 | NETIF_F_IPV6_CSUM 3567 | NETIF_F_SG | NETIF_F_RXCSUM 3568 | NETIF_F_GRO 3569 | NETIF_F_TSO | NETIF_F_TSO6 3570 | NETIF_F_LRO; 3571 } 3572 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3573 3574 /* Copy of transmit encapsulation capabilities: 3575 * TSO, TSO6, Checksums for this device 3576 */ 3577 lio->enc_dev_capability = NETIF_F_IP_CSUM 3578 | NETIF_F_IPV6_CSUM 3579 | NETIF_F_GSO_UDP_TUNNEL 3580 | NETIF_F_HW_CSUM | NETIF_F_SG 3581 | NETIF_F_RXCSUM 3582 | NETIF_F_TSO | NETIF_F_TSO6 3583 | NETIF_F_LRO; 3584 3585 netdev->hw_enc_features = (lio->enc_dev_capability & 3586 ~NETIF_F_LRO); 3587 3588 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 3589 3590 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; 3591 3592 netdev->vlan_features = lio->dev_capability; 3593 /* Add any unchangeable hw features */ 3594 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 3595 NETIF_F_HW_VLAN_CTAG_RX | 3596 NETIF_F_HW_VLAN_CTAG_TX; 3597 3598 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 3599 3600 netdev->hw_features = lio->dev_capability; 3601 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ 3602 netdev->hw_features = netdev->hw_features & 3603 ~NETIF_F_HW_VLAN_CTAG_RX; 3604 3605 /* MTU range: 68 - 16000 */ 3606 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3607 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3608 3609 /* Point to the properties for octeon device to which this 3610 * interface belongs. 3611 */ 3612 lio->oct_dev = octeon_dev; 3613 lio->octprops = props; 3614 lio->netdev = netdev; 3615 3616 dev_dbg(&octeon_dev->pci_dev->dev, 3617 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3618 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3619 3620 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { 3621 u8 vfmac[ETH_ALEN]; 3622 3623 eth_random_addr(vfmac); 3624 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { 3625 dev_err(&octeon_dev->pci_dev->dev, 3626 "Error setting VF%d MAC address\n", 3627 j); 3628 goto setup_nic_dev_free; 3629 } 3630 } 3631 3632 /* 64-bit swap required on LE machines */ 3633 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3634 for (j = 0; j < 6; j++) 3635 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3636 3637 /* Copy MAC Address to OS network device structure */ 3638 3639 eth_hw_addr_set(netdev, mac); 3640 3641 /* By default all interfaces on a single Octeon uses the same 3642 * tx and rx queues 3643 */ 3644 lio->txq = lio->linfo.txpciq[0].s.q_no; 3645 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 3646 if (liquidio_setup_io_queues(octeon_dev, i, 3647 lio->linfo.num_txpciq, 3648 lio->linfo.num_rxpciq)) { 3649 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3650 goto setup_nic_dev_free; 3651 } 3652 3653 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3654 3655 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3656 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3657 3658 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 3659 dev_err(&octeon_dev->pci_dev->dev, 3660 "Gather list allocation failed\n"); 3661 goto setup_nic_dev_free; 3662 } 3663 3664 /* Register ethtool support */ 3665 liquidio_set_ethtool_ops(netdev); 3666 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) 3667 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 3668 else 3669 octeon_dev->priv_flags = 0x0; 3670 3671 if (netdev->features & NETIF_F_LRO) 3672 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3673 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3674 3675 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3676 OCTNET_CMD_VLAN_FILTER_ENABLE); 3677 3678 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3679 liquidio_set_feature(netdev, 3680 OCTNET_CMD_VERBOSE_ENABLE, 0); 3681 3682 if (setup_link_status_change_wq(netdev)) 3683 goto setup_nic_dev_free; 3684 3685 if ((octeon_dev->fw_info.app_cap_flags & 3686 LIQUIDIO_TIME_SYNC_CAP) && 3687 setup_sync_octeon_time_wq(netdev)) 3688 goto setup_nic_dev_free; 3689 3690 if (setup_rx_oom_poll_fn(netdev)) 3691 goto setup_nic_dev_free; 3692 3693 /* Register the network device with the OS */ 3694 if (register_netdev(netdev)) { 3695 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3696 goto setup_nic_dev_free; 3697 } 3698 3699 dev_dbg(&octeon_dev->pci_dev->dev, 3700 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3701 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3702 netif_carrier_off(netdev); 3703 lio->link_changes++; 3704 3705 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3706 3707 /* Sending command to firmware to enable Rx checksum offload 3708 * by default at the time of setup of Liquidio driver for 3709 * this device 3710 */ 3711 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3712 OCTNET_CMD_RXCSUM_ENABLE); 3713 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3714 OCTNET_CMD_TXCSUM_ENABLE); 3715 3716 dev_dbg(&octeon_dev->pci_dev->dev, 3717 "NIC ifidx:%d Setup successful\n", i); 3718 3719 if (octeon_dev->subsystem_id == 3720 OCTEON_CN2350_25GB_SUBSYS_ID || 3721 octeon_dev->subsystem_id == 3722 OCTEON_CN2360_25GB_SUBSYS_ID) { 3723 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, 3724 octeon_dev->fw_info.ver.min, 3725 octeon_dev->fw_info.ver.rev); 3726 3727 /* speed control unsupported in f/w older than 1.7.2 */ 3728 if (cur_ver < OCT_FW_VER(1, 7, 2)) { 3729 dev_info(&octeon_dev->pci_dev->dev, 3730 "speed setting not supported by f/w."); 3731 octeon_dev->speed_setting = 25; 3732 octeon_dev->no_speed_setting = 1; 3733 } else { 3734 liquidio_get_speed(lio); 3735 } 3736 3737 if (octeon_dev->speed_setting == 0) { 3738 octeon_dev->speed_setting = 25; 3739 octeon_dev->no_speed_setting = 1; 3740 } 3741 } else { 3742 octeon_dev->no_speed_setting = 1; 3743 octeon_dev->speed_setting = 10; 3744 } 3745 octeon_dev->speed_boot = octeon_dev->speed_setting; 3746 3747 /* don't read FEC setting if unsupported by f/w (see above) */ 3748 if (octeon_dev->speed_boot == 25 && 3749 !octeon_dev->no_speed_setting) { 3750 liquidio_get_fec(lio); 3751 octeon_dev->props[lio->ifidx].fec_boot = 3752 octeon_dev->props[lio->ifidx].fec; 3753 } 3754 } 3755 3756 device_lock(&octeon_dev->pci_dev->dev); 3757 devlink = devlink_alloc(&liquidio_devlink_ops, 3758 sizeof(struct lio_devlink_priv), 3759 &octeon_dev->pci_dev->dev); 3760 if (!devlink) { 3761 device_unlock(&octeon_dev->pci_dev->dev); 3762 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3763 goto setup_nic_dev_free; 3764 } 3765 3766 lio_devlink = devlink_priv(devlink); 3767 lio_devlink->oct = octeon_dev; 3768 3769 octeon_dev->devlink = devlink; 3770 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 3771 devlink_register(devlink); 3772 device_unlock(&octeon_dev->pci_dev->dev); 3773 3774 return 0; 3775 3776 setup_nic_dev_free: 3777 3778 while (i--) { 3779 dev_err(&octeon_dev->pci_dev->dev, 3780 "NIC ifidx:%d Setup failed\n", i); 3781 liquidio_destroy_nic_device(octeon_dev, i); 3782 } 3783 3784 setup_nic_dev_done: 3785 3786 return -ENODEV; 3787 } 3788 3789 #ifdef CONFIG_PCI_IOV 3790 static int octeon_enable_sriov(struct octeon_device *oct) 3791 { 3792 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; 3793 struct pci_dev *vfdev; 3794 int err; 3795 u32 u; 3796 3797 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { 3798 err = pci_enable_sriov(oct->pci_dev, 3799 oct->sriov_info.num_vfs_alloced); 3800 if (err) { 3801 dev_err(&oct->pci_dev->dev, 3802 "OCTEON: Failed to enable PCI sriov: %d\n", 3803 err); 3804 oct->sriov_info.num_vfs_alloced = 0; 3805 return err; 3806 } 3807 oct->sriov_info.sriov_enabled = 1; 3808 3809 /* init lookup table that maps DPI ring number to VF pci_dev 3810 * struct pointer 3811 */ 3812 u = 0; 3813 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3814 OCTEON_CN23XX_VF_VID, NULL); 3815 while (vfdev) { 3816 if (vfdev->is_virtfn && 3817 (vfdev->physfn == oct->pci_dev)) { 3818 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = 3819 vfdev; 3820 u += oct->sriov_info.rings_per_vf; 3821 } 3822 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3823 OCTEON_CN23XX_VF_VID, vfdev); 3824 } 3825 } 3826 3827 return num_vfs_alloced; 3828 } 3829 3830 static int lio_pci_sriov_disable(struct octeon_device *oct) 3831 { 3832 int u; 3833 3834 if (pci_vfs_assigned(oct->pci_dev)) { 3835 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); 3836 return -EPERM; 3837 } 3838 3839 pci_disable_sriov(oct->pci_dev); 3840 3841 u = 0; 3842 while (u < MAX_POSSIBLE_VFS) { 3843 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; 3844 u += oct->sriov_info.rings_per_vf; 3845 } 3846 3847 oct->sriov_info.num_vfs_alloced = 0; 3848 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", 3849 oct->pf_num); 3850 3851 return 0; 3852 } 3853 3854 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) 3855 { 3856 struct octeon_device *oct = pci_get_drvdata(dev); 3857 int ret = 0; 3858 3859 if ((num_vfs == oct->sriov_info.num_vfs_alloced) && 3860 (oct->sriov_info.sriov_enabled)) { 3861 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", 3862 oct->pf_num, num_vfs); 3863 return 0; 3864 } 3865 3866 if (!num_vfs) { 3867 lio_vf_rep_destroy(oct); 3868 ret = lio_pci_sriov_disable(oct); 3869 } else if (num_vfs > oct->sriov_info.max_vfs) { 3870 dev_err(&oct->pci_dev->dev, 3871 "OCTEON: Max allowed VFs:%d user requested:%d", 3872 oct->sriov_info.max_vfs, num_vfs); 3873 ret = -EPERM; 3874 } else { 3875 oct->sriov_info.num_vfs_alloced = num_vfs; 3876 ret = octeon_enable_sriov(oct); 3877 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", 3878 oct->pf_num, num_vfs); 3879 ret = lio_vf_rep_create(oct); 3880 if (ret) 3881 dev_info(&oct->pci_dev->dev, 3882 "vf representor create failed"); 3883 } 3884 3885 return ret; 3886 } 3887 #endif 3888 3889 /** 3890 * liquidio_init_nic_module - initialize the NIC 3891 * @oct: octeon device 3892 * 3893 * This initialization routine is called once the Octeon device application is 3894 * up and running 3895 */ 3896 static int liquidio_init_nic_module(struct octeon_device *oct) 3897 { 3898 int i, retval = 0; 3899 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3900 3901 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3902 3903 /* only default iq and oq were initialized 3904 * initialize the rest as well 3905 */ 3906 /* run port_config command for each port */ 3907 oct->ifcount = num_nic_ports; 3908 3909 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); 3910 3911 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3912 oct->props[i].gmxport = -1; 3913 3914 retval = setup_nic_devices(oct); 3915 if (retval) { 3916 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3917 goto octnet_init_failure; 3918 } 3919 3920 /* Call vf_rep_modinit if the firmware is switchdev capable 3921 * and do it from the first liquidio function probed. 3922 */ 3923 if (!oct->octeon_id && 3924 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { 3925 retval = lio_vf_rep_modinit(); 3926 if (retval) { 3927 liquidio_stop_nic_module(oct); 3928 goto octnet_init_failure; 3929 } 3930 } 3931 3932 liquidio_ptp_init(oct); 3933 3934 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3935 3936 return retval; 3937 3938 octnet_init_failure: 3939 3940 oct->ifcount = 0; 3941 3942 return retval; 3943 } 3944 3945 /** 3946 * nic_starter - finish init 3947 * @work: work struct work_struct 3948 * 3949 * starter callback that invokes the remaining initialization work after the NIC is up and running. 3950 */ 3951 static void nic_starter(struct work_struct *work) 3952 { 3953 struct octeon_device *oct; 3954 struct cavium_wk *wk = (struct cavium_wk *)work; 3955 3956 oct = (struct octeon_device *)wk->ctxptr; 3957 3958 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3959 return; 3960 3961 /* If the status of the device is CORE_OK, the core 3962 * application has reported its application type. Call 3963 * any registered handlers now and move to the RUNNING 3964 * state. 3965 */ 3966 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3967 schedule_delayed_work(&oct->nic_poll_work.work, 3968 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3969 return; 3970 } 3971 3972 atomic_set(&oct->status, OCT_DEV_RUNNING); 3973 3974 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3975 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3976 3977 if (liquidio_init_nic_module(oct)) 3978 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3979 else 3980 handshake[oct->octeon_id].started_ok = 1; 3981 } else { 3982 dev_err(&oct->pci_dev->dev, 3983 "Unexpected application running on NIC (%d). Check firmware.\n", 3984 oct->app_mode); 3985 } 3986 3987 complete(&handshake[oct->octeon_id].started); 3988 } 3989 3990 static int 3991 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) 3992 { 3993 struct octeon_device *oct = (struct octeon_device *)buf; 3994 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3995 int i, notice, vf_idx; 3996 bool cores_crashed; 3997 u64 *data, vf_num; 3998 3999 notice = recv_pkt->rh.r.ossp; 4000 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); 4001 4002 /* the first 64-bit word of data is the vf_num */ 4003 vf_num = data[0]; 4004 octeon_swap_8B_data(&vf_num, 1); 4005 vf_idx = (int)vf_num - 1; 4006 4007 cores_crashed = READ_ONCE(oct->cores_crashed); 4008 4009 if (notice == VF_DRV_LOADED) { 4010 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { 4011 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); 4012 dev_info(&oct->pci_dev->dev, 4013 "driver for VF%d was loaded\n", vf_idx); 4014 if (!cores_crashed) 4015 try_module_get(THIS_MODULE); 4016 } 4017 } else if (notice == VF_DRV_REMOVED) { 4018 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { 4019 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); 4020 dev_info(&oct->pci_dev->dev, 4021 "driver for VF%d was removed\n", vf_idx); 4022 if (!cores_crashed) 4023 module_put(THIS_MODULE); 4024 } 4025 } else if (notice == VF_DRV_MACADDR_CHANGED) { 4026 u8 *b = (u8 *)&data[1]; 4027 4028 oct->sriov_info.vf_macaddr[vf_idx] = data[1]; 4029 dev_info(&oct->pci_dev->dev, 4030 "VF driver changed VF%d's MAC address to %pM\n", 4031 vf_idx, b + 2); 4032 } 4033 4034 for (i = 0; i < recv_pkt->buffer_count; i++) 4035 recv_buffer_free(recv_pkt->buffer_ptr[i]); 4036 octeon_free_recv_info(recv_info); 4037 4038 return 0; 4039 } 4040 4041 /** 4042 * octeon_device_init - Device initialization for each Octeon device that is probed 4043 * @octeon_dev: octeon device 4044 */ 4045 static int octeon_device_init(struct octeon_device *octeon_dev) 4046 { 4047 int j, ret; 4048 char bootcmd[] = "\n"; 4049 char *dbg_enb = NULL; 4050 enum lio_fw_state fw_state; 4051 struct octeon_device_priv *oct_priv = octeon_dev->priv; 4052 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 4053 4054 /* Enable access to the octeon device and make its DMA capability 4055 * known to the OS. 4056 */ 4057 if (octeon_pci_os_setup(octeon_dev)) 4058 return 1; 4059 4060 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); 4061 4062 /* Identify the Octeon type and map the BAR address space. */ 4063 if (octeon_chip_specific_setup(octeon_dev)) { 4064 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 4065 return 1; 4066 } 4067 4068 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 4069 4070 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', 4071 * since that is what is required for the reference to be removed 4072 * during de-initialization (see 'octeon_destroy_resources'). 4073 */ 4074 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number, 4075 PCI_SLOT(octeon_dev->pci_dev->devfn), 4076 PCI_FUNC(octeon_dev->pci_dev->devfn), 4077 true); 4078 4079 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 4080 4081 /* CN23XX supports preloaded firmware if the following is true: 4082 * 4083 * The adapter indicates that firmware is currently running AND 4084 * 'fw_type' is 'auto'. 4085 * 4086 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). 4087 */ 4088 if (OCTEON_CN23XX_PF(octeon_dev) && 4089 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { 4090 atomic_cmpxchg(octeon_dev->adapter_fw_state, 4091 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); 4092 } 4093 4094 /* If loading firmware, only first device of adapter needs to do so. */ 4095 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, 4096 FW_NEEDS_TO_BE_LOADED, 4097 FW_IS_BEING_LOADED); 4098 4099 /* Here, [local variable] 'fw_state' is set to one of: 4100 * 4101 * FW_IS_PRELOADED: No firmware is to be loaded (see above) 4102 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load 4103 * firmware to the adapter. 4104 * FW_IS_BEING_LOADED: The driver's second instance will not load 4105 * firmware to the adapter. 4106 */ 4107 4108 /* Prior to f/w load, perform a soft reset of the Octeon device; 4109 * if error resetting, return w/error. 4110 */ 4111 if (fw_state == FW_NEEDS_TO_BE_LOADED) 4112 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 4113 return 1; 4114 4115 /* Initialize the dispatch mechanism used to push packets arriving on 4116 * Octeon Output queues. 4117 */ 4118 if (octeon_init_dispatch_list(octeon_dev)) 4119 return 1; 4120 4121 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4122 OPCODE_NIC_CORE_DRV_ACTIVE, 4123 octeon_core_drv_init, 4124 octeon_dev); 4125 4126 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4127 OPCODE_NIC_VF_DRV_NOTICE, 4128 octeon_recv_vf_drv_notice, octeon_dev); 4129 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 4130 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 4131 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 4132 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4133 4134 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 4135 4136 if (octeon_set_io_queues_off(octeon_dev)) { 4137 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); 4138 return 1; 4139 } 4140 4141 if (OCTEON_CN23XX_PF(octeon_dev)) { 4142 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4143 if (ret) { 4144 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); 4145 return ret; 4146 } 4147 } 4148 4149 /* Initialize soft command buffer pool 4150 */ 4151 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 4152 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 4153 return 1; 4154 } 4155 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 4156 4157 /* Setup the data structures that manage this Octeon's Input queues. */ 4158 if (octeon_setup_instr_queues(octeon_dev)) { 4159 dev_err(&octeon_dev->pci_dev->dev, 4160 "instruction queue initialization failed\n"); 4161 return 1; 4162 } 4163 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 4164 4165 /* Initialize lists to manage the requests of different types that 4166 * arrive from user & kernel applications for this octeon device. 4167 */ 4168 if (octeon_setup_response_list(octeon_dev)) { 4169 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 4170 return 1; 4171 } 4172 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 4173 4174 if (octeon_setup_output_queues(octeon_dev)) { 4175 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 4176 return 1; 4177 } 4178 4179 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 4180 4181 if (OCTEON_CN23XX_PF(octeon_dev)) { 4182 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { 4183 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); 4184 return 1; 4185 } 4186 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4187 4188 if (octeon_allocate_ioq_vector 4189 (octeon_dev, 4190 octeon_dev->sriov_info.num_pf_rings)) { 4191 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4192 return 1; 4193 } 4194 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 4195 4196 } else { 4197 /* The input and output queue registers were setup earlier (the 4198 * queues were not enabled). Any additional registers 4199 * that need to be programmed should be done now. 4200 */ 4201 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4202 if (ret) { 4203 dev_err(&octeon_dev->pci_dev->dev, 4204 "Failed to configure device registers\n"); 4205 return ret; 4206 } 4207 } 4208 4209 /* Initialize the tasklet that handles output queue packet processing.*/ 4210 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 4211 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh); 4212 4213 /* Setup the interrupt handler and record the INT SUM register address 4214 */ 4215 if (octeon_setup_interrupt(octeon_dev, 4216 octeon_dev->sriov_info.num_pf_rings)) 4217 return 1; 4218 4219 /* Enable Octeon device interrupts */ 4220 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 4221 4222 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); 4223 4224 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE 4225 * the output queue is enabled. 4226 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in 4227 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. 4228 * Otherwise, it is possible that the DRV_ACTIVE message will be sent 4229 * before any credits have been issued, causing the ring to be reset 4230 * (and the f/w appear to never have started). 4231 */ 4232 for (j = 0; j < octeon_dev->num_oqs; j++) 4233 writel(octeon_dev->droq[j]->max_count, 4234 octeon_dev->droq[j]->pkts_credit_reg); 4235 4236 /* Enable the input and output queues for this Octeon device */ 4237 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 4238 if (ret) { 4239 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); 4240 return ret; 4241 } 4242 4243 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 4244 4245 if (fw_state == FW_NEEDS_TO_BE_LOADED) { 4246 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 4247 if (!ddr_timeout) { 4248 dev_info(&octeon_dev->pci_dev->dev, 4249 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 4250 } 4251 4252 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 4253 4254 /* Wait for the octeon to initialize DDR after the soft-reset.*/ 4255 while (!ddr_timeout) { 4256 set_current_state(TASK_INTERRUPTIBLE); 4257 if (schedule_timeout(HZ / 10)) { 4258 /* user probably pressed Control-C */ 4259 return 1; 4260 } 4261 } 4262 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 4263 if (ret) { 4264 dev_err(&octeon_dev->pci_dev->dev, 4265 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 4266 ret); 4267 return 1; 4268 } 4269 4270 if (octeon_wait_for_bootloader(octeon_dev, 1000)) { 4271 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 4272 return 1; 4273 } 4274 4275 /* Divert uboot to take commands from host instead. */ 4276 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); 4277 4278 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 4279 ret = octeon_init_consoles(octeon_dev); 4280 if (ret) { 4281 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 4282 return 1; 4283 } 4284 /* If console debug enabled, specify empty string to use default 4285 * enablement ELSE specify NULL string for 'disabled'. 4286 */ 4287 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; 4288 ret = octeon_add_console(octeon_dev, 0, dbg_enb); 4289 if (ret) { 4290 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 4291 return 1; 4292 } else if (octeon_console_debug_enabled(0)) { 4293 /* If console was added AND we're logging console output 4294 * then set our console print function. 4295 */ 4296 octeon_dev->console[0].print = octeon_dbg_console_print; 4297 } 4298 4299 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 4300 4301 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 4302 ret = load_firmware(octeon_dev); 4303 if (ret) { 4304 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 4305 return 1; 4306 } 4307 4308 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); 4309 } 4310 4311 handshake[octeon_dev->octeon_id].init_ok = 1; 4312 complete(&handshake[octeon_dev->octeon_id].init); 4313 4314 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 4315 oct_priv->dev = octeon_dev; 4316 4317 return 0; 4318 } 4319 4320 /** 4321 * octeon_dbg_console_print - Debug console print function 4322 * @oct: octeon device 4323 * @console_num: console number 4324 * @prefix: first portion of line to display 4325 * @suffix: second portion of line to display 4326 * 4327 * The OCTEON debug console outputs entire lines (excluding '\n'). 4328 * Normally, the line will be passed in the 'prefix' parameter. 4329 * However, due to buffering, it is possible for a line to be split into two 4330 * parts, in which case they will be passed as the 'prefix' parameter and 4331 * 'suffix' parameter. 4332 */ 4333 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 4334 char *prefix, char *suffix) 4335 { 4336 if (prefix && suffix) 4337 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, 4338 suffix); 4339 else if (prefix) 4340 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); 4341 else if (suffix) 4342 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); 4343 4344 return 0; 4345 } 4346 4347 /** 4348 * liquidio_exit - Exits the module 4349 */ 4350 static void __exit liquidio_exit(void) 4351 { 4352 liquidio_deinit_pci(); 4353 4354 pr_info("LiquidIO network module is now unloaded\n"); 4355 } 4356 4357 module_init(liquidio_init); 4358 module_exit(liquidio_exit); 4359