1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/firmware.h> 22 #include <net/vxlan.h> 23 #include <linux/kthread.h> 24 #include "liquidio_common.h" 25 #include "octeon_droq.h" 26 #include "octeon_iq.h" 27 #include "response_manager.h" 28 #include "octeon_device.h" 29 #include "octeon_nic.h" 30 #include "octeon_main.h" 31 #include "octeon_network.h" 32 #include "cn66xx_regs.h" 33 #include "cn66xx_device.h" 34 #include "cn68xx_device.h" 35 #include "cn23xx_pf_device.h" 36 #include "liquidio_image.h" 37 38 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 39 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 40 MODULE_LICENSE("GPL"); 41 MODULE_VERSION(LIQUIDIO_VERSION); 42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME 43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME 45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME 47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME 49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 50 51 static int ddr_timeout = 10000; 52 module_param(ddr_timeout, int, 0644); 53 MODULE_PARM_DESC(ddr_timeout, 54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 55 56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 57 58 static int debug = -1; 59 module_param(debug, int, 0644); 60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 61 62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; 63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); 64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); 65 66 static u32 console_bitmask; 67 module_param(console_bitmask, int, 0644); 68 MODULE_PARM_DESC(console_bitmask, 69 "Bitmask indicating which consoles have debug output redirected to syslog."); 70 71 /** 72 * \brief determines if a given console has debug enabled. 73 * @param console console to check 74 * @returns 1 = enabled. 0 otherwise 75 */ 76 static int octeon_console_debug_enabled(u32 console) 77 { 78 return (console_bitmask >> (console)) & 0x1; 79 } 80 81 /* Polling interval for determining when NIC application is alive */ 82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 83 84 /* runtime link query interval */ 85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 86 /* update localtime to octeon firmware every 60 seconds. 87 * make firmware to use same time reference, so that it will be easy to 88 * correlate firmware logged events/errors with host events, for debugging. 89 */ 90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 91 92 struct liquidio_if_cfg_context { 93 int octeon_id; 94 95 wait_queue_head_t wc; 96 97 int cond; 98 }; 99 100 struct liquidio_if_cfg_resp { 101 u64 rh; 102 struct liquidio_if_cfg_info cfg_info; 103 u64 status; 104 }; 105 106 struct liquidio_rx_ctl_context { 107 int octeon_id; 108 109 wait_queue_head_t wc; 110 111 int cond; 112 }; 113 114 struct oct_link_status_resp { 115 u64 rh; 116 struct oct_link_info link_info; 117 u64 status; 118 }; 119 120 struct oct_timestamp_resp { 121 u64 rh; 122 u64 timestamp; 123 u64 status; 124 }; 125 126 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 127 128 union tx_info { 129 u64 u64; 130 struct { 131 #ifdef __BIG_ENDIAN_BITFIELD 132 u16 gso_size; 133 u16 gso_segs; 134 u32 reserved; 135 #else 136 u32 reserved; 137 u16 gso_segs; 138 u16 gso_size; 139 #endif 140 } s; 141 }; 142 143 /** Octeon device properties to be used by the NIC module. 144 * Each octeon device in the system will be represented 145 * by this structure in the NIC module. 146 */ 147 148 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS) 149 150 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 151 #define OCTNIC_GSO_MAX_SIZE \ 152 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 153 154 /** Structure of a node in list of gather components maintained by 155 * NIC driver for each network device. 156 */ 157 struct octnic_gather { 158 /** List manipulation. Next and prev pointers. */ 159 struct list_head list; 160 161 /** Size of the gather component at sg in bytes. */ 162 int sg_size; 163 164 /** Number of bytes that sg was adjusted to make it 8B-aligned. */ 165 int adjust; 166 167 /** Gather component that can accommodate max sized fragment list 168 * received from the IP layer. 169 */ 170 struct octeon_sg_entry *sg; 171 172 dma_addr_t sg_dma_ptr; 173 }; 174 175 struct handshake { 176 struct completion init; 177 struct completion started; 178 struct pci_dev *pci_dev; 179 int init_ok; 180 int started_ok; 181 }; 182 183 #ifdef CONFIG_PCI_IOV 184 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); 185 #endif 186 187 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 188 char *prefix, char *suffix); 189 190 static int octeon_device_init(struct octeon_device *); 191 static int liquidio_stop(struct net_device *netdev); 192 static void liquidio_remove(struct pci_dev *pdev); 193 static int liquidio_probe(struct pci_dev *pdev, 194 const struct pci_device_id *ent); 195 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 196 int linkstate); 197 198 static struct handshake handshake[MAX_OCTEON_DEVICES]; 199 static struct completion first_stage; 200 201 static void octeon_droq_bh(unsigned long pdev) 202 { 203 int q_no; 204 int reschedule = 0; 205 struct octeon_device *oct = (struct octeon_device *)pdev; 206 struct octeon_device_priv *oct_priv = 207 (struct octeon_device_priv *)oct->priv; 208 209 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { 210 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) 211 continue; 212 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 213 MAX_PACKET_BUDGET); 214 lio_enable_irq(oct->droq[q_no], NULL); 215 216 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 217 /* set time and cnt interrupt thresholds for this DROQ 218 * for NAPI 219 */ 220 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; 221 222 octeon_write_csr64( 223 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), 224 0x5700000040ULL); 225 octeon_write_csr64( 226 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); 227 } 228 } 229 230 if (reschedule) 231 tasklet_schedule(&oct_priv->droq_tasklet); 232 } 233 234 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 235 { 236 struct octeon_device_priv *oct_priv = 237 (struct octeon_device_priv *)oct->priv; 238 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 239 int i; 240 241 do { 242 pending_pkts = 0; 243 244 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 245 if (!(oct->io_qmask.oq & BIT_ULL(i))) 246 continue; 247 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 248 } 249 if (pkt_cnt > 0) { 250 pending_pkts += pkt_cnt; 251 tasklet_schedule(&oct_priv->droq_tasklet); 252 } 253 pkt_cnt = 0; 254 schedule_timeout_uninterruptible(1); 255 256 } while (retry-- && pending_pkts); 257 258 return pkt_cnt; 259 } 260 261 /** 262 * \brief Forces all IO queues off on a given device 263 * @param oct Pointer to Octeon device 264 */ 265 static void force_io_queues_off(struct octeon_device *oct) 266 { 267 if ((oct->chip_id == OCTEON_CN66XX) || 268 (oct->chip_id == OCTEON_CN68XX)) { 269 /* Reset the Enable bits for Input Queues. */ 270 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 271 272 /* Reset the Enable bits for Output Queues. */ 273 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 274 } 275 } 276 277 /** 278 * \brief Cause device to go quiet so it can be safely removed/reset/etc 279 * @param oct Pointer to Octeon device 280 */ 281 static inline void pcierror_quiesce_device(struct octeon_device *oct) 282 { 283 int i; 284 285 /* Disable the input and output queues now. No more packets will 286 * arrive from Octeon, but we should wait for all packet processing 287 * to finish. 288 */ 289 force_io_queues_off(oct); 290 291 /* To allow for in-flight requests */ 292 schedule_timeout_uninterruptible(100); 293 294 if (wait_for_pending_requests(oct)) 295 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 296 297 /* Force all requests waiting to be fetched by OCTEON to complete. */ 298 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 299 struct octeon_instr_queue *iq; 300 301 if (!(oct->io_qmask.iq & BIT_ULL(i))) 302 continue; 303 iq = oct->instr_queue[i]; 304 305 if (atomic_read(&iq->instr_pending)) { 306 spin_lock_bh(&iq->lock); 307 iq->fill_cnt = 0; 308 iq->octeon_read_index = iq->host_write_index; 309 iq->stats.instr_processed += 310 atomic_read(&iq->instr_pending); 311 lio_process_iq_request_list(oct, iq, 0); 312 spin_unlock_bh(&iq->lock); 313 } 314 } 315 316 /* Force all pending ordered list requests to time out. */ 317 lio_process_ordered_list(oct, 1); 318 319 /* We do not need to wait for output queue packets to be processed. */ 320 } 321 322 /** 323 * \brief Cleanup PCI AER uncorrectable error status 324 * @param dev Pointer to PCI device 325 */ 326 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 327 { 328 int pos = 0x100; 329 u32 status, mask; 330 331 pr_info("%s :\n", __func__); 332 333 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 334 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 335 if (dev->error_state == pci_channel_io_normal) 336 status &= ~mask; /* Clear corresponding nonfatal bits */ 337 else 338 status &= mask; /* Clear corresponding fatal bits */ 339 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 340 } 341 342 /** 343 * \brief Stop all PCI IO to a given device 344 * @param dev Pointer to Octeon device 345 */ 346 static void stop_pci_io(struct octeon_device *oct) 347 { 348 /* No more instructions will be forwarded. */ 349 atomic_set(&oct->status, OCT_DEV_IN_RESET); 350 351 pci_disable_device(oct->pci_dev); 352 353 /* Disable interrupts */ 354 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 355 356 pcierror_quiesce_device(oct); 357 358 /* Release the interrupt line */ 359 free_irq(oct->pci_dev->irq, oct); 360 361 if (oct->flags & LIO_FLAG_MSI_ENABLED) 362 pci_disable_msi(oct->pci_dev); 363 364 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 365 lio_get_state_string(&oct->status)); 366 367 /* making it a common function for all OCTEON models */ 368 cleanup_aer_uncorrect_error_status(oct->pci_dev); 369 } 370 371 /** 372 * \brief called when PCI error is detected 373 * @param pdev Pointer to PCI device 374 * @param state The current pci connection state 375 * 376 * This function is called after a PCI bus error affecting 377 * this device has been detected. 378 */ 379 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 380 pci_channel_state_t state) 381 { 382 struct octeon_device *oct = pci_get_drvdata(pdev); 383 384 /* Non-correctable Non-fatal errors */ 385 if (state == pci_channel_io_normal) { 386 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 387 cleanup_aer_uncorrect_error_status(oct->pci_dev); 388 return PCI_ERS_RESULT_CAN_RECOVER; 389 } 390 391 /* Non-correctable Fatal errors */ 392 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 393 stop_pci_io(oct); 394 395 /* Always return a DISCONNECT. There is no support for recovery but only 396 * for a clean shutdown. 397 */ 398 return PCI_ERS_RESULT_DISCONNECT; 399 } 400 401 /** 402 * \brief mmio handler 403 * @param pdev Pointer to PCI device 404 */ 405 static pci_ers_result_t liquidio_pcie_mmio_enabled( 406 struct pci_dev *pdev __attribute__((unused))) 407 { 408 /* We should never hit this since we never ask for a reset for a Fatal 409 * Error. We always return DISCONNECT in io_error above. 410 * But play safe and return RECOVERED for now. 411 */ 412 return PCI_ERS_RESULT_RECOVERED; 413 } 414 415 /** 416 * \brief called after the pci bus has been reset. 417 * @param pdev Pointer to PCI device 418 * 419 * Restart the card from scratch, as if from a cold-boot. Implementation 420 * resembles the first-half of the octeon_resume routine. 421 */ 422 static pci_ers_result_t liquidio_pcie_slot_reset( 423 struct pci_dev *pdev __attribute__((unused))) 424 { 425 /* We should never hit this since we never ask for a reset for a Fatal 426 * Error. We always return DISCONNECT in io_error above. 427 * But play safe and return RECOVERED for now. 428 */ 429 return PCI_ERS_RESULT_RECOVERED; 430 } 431 432 /** 433 * \brief called when traffic can start flowing again. 434 * @param pdev Pointer to PCI device 435 * 436 * This callback is called when the error recovery driver tells us that 437 * its OK to resume normal operation. Implementation resembles the 438 * second-half of the octeon_resume routine. 439 */ 440 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused))) 441 { 442 /* Nothing to be done here. */ 443 } 444 445 #ifdef CONFIG_PM 446 /** 447 * \brief called when suspending 448 * @param pdev Pointer to PCI device 449 * @param state state to suspend to 450 */ 451 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)), 452 pm_message_t state __attribute__((unused))) 453 { 454 return 0; 455 } 456 457 /** 458 * \brief called when resuming 459 * @param pdev Pointer to PCI device 460 */ 461 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused))) 462 { 463 return 0; 464 } 465 #endif 466 467 /* For PCI-E Advanced Error Recovery (AER) Interface */ 468 static const struct pci_error_handlers liquidio_err_handler = { 469 .error_detected = liquidio_pcie_error_detected, 470 .mmio_enabled = liquidio_pcie_mmio_enabled, 471 .slot_reset = liquidio_pcie_slot_reset, 472 .resume = liquidio_pcie_resume, 473 }; 474 475 static const struct pci_device_id liquidio_pci_tbl[] = { 476 { /* 68xx */ 477 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 478 }, 479 { /* 66xx */ 480 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 481 }, 482 { /* 23xx pf */ 483 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 484 }, 485 { 486 0, 0, 0, 0, 0, 0, 0 487 } 488 }; 489 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 490 491 static struct pci_driver liquidio_pci_driver = { 492 .name = "LiquidIO", 493 .id_table = liquidio_pci_tbl, 494 .probe = liquidio_probe, 495 .remove = liquidio_remove, 496 .err_handler = &liquidio_err_handler, /* For AER */ 497 498 #ifdef CONFIG_PM 499 .suspend = liquidio_suspend, 500 .resume = liquidio_resume, 501 #endif 502 #ifdef CONFIG_PCI_IOV 503 .sriov_configure = liquidio_enable_sriov, 504 #endif 505 }; 506 507 /** 508 * \brief register PCI driver 509 */ 510 static int liquidio_init_pci(void) 511 { 512 return pci_register_driver(&liquidio_pci_driver); 513 } 514 515 /** 516 * \brief unregister PCI driver 517 */ 518 static void liquidio_deinit_pci(void) 519 { 520 pci_unregister_driver(&liquidio_pci_driver); 521 } 522 523 /** 524 * \brief Stop Tx queues 525 * @param netdev network device 526 */ 527 static inline void txqs_stop(struct net_device *netdev) 528 { 529 if (netif_is_multiqueue(netdev)) { 530 int i; 531 532 for (i = 0; i < netdev->num_tx_queues; i++) 533 netif_stop_subqueue(netdev, i); 534 } else { 535 netif_stop_queue(netdev); 536 } 537 } 538 539 /** 540 * \brief Start Tx queues 541 * @param netdev network device 542 */ 543 static inline void txqs_start(struct net_device *netdev) 544 { 545 if (netif_is_multiqueue(netdev)) { 546 int i; 547 548 for (i = 0; i < netdev->num_tx_queues; i++) 549 netif_start_subqueue(netdev, i); 550 } else { 551 netif_start_queue(netdev); 552 } 553 } 554 555 /** 556 * \brief Wake Tx queues 557 * @param netdev network device 558 */ 559 static inline void txqs_wake(struct net_device *netdev) 560 { 561 struct lio *lio = GET_LIO(netdev); 562 563 if (netif_is_multiqueue(netdev)) { 564 int i; 565 566 for (i = 0; i < netdev->num_tx_queues; i++) { 567 int qno = lio->linfo.txpciq[i % 568 lio->oct_dev->num_iqs].s.q_no; 569 570 if (__netif_subqueue_stopped(netdev, i)) { 571 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, 572 tx_restart, 1); 573 netif_wake_subqueue(netdev, i); 574 } 575 } 576 } else { 577 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, 578 tx_restart, 1); 579 netif_wake_queue(netdev); 580 } 581 } 582 583 /** 584 * \brief Stop Tx queue 585 * @param netdev network device 586 */ 587 static void stop_txq(struct net_device *netdev) 588 { 589 txqs_stop(netdev); 590 } 591 592 /** 593 * \brief Start Tx queue 594 * @param netdev network device 595 */ 596 static void start_txq(struct net_device *netdev) 597 { 598 struct lio *lio = GET_LIO(netdev); 599 600 if (lio->linfo.link.s.link_up) { 601 txqs_start(netdev); 602 return; 603 } 604 } 605 606 /** 607 * \brief Wake a queue 608 * @param netdev network device 609 * @param q which queue to wake 610 */ 611 static inline void wake_q(struct net_device *netdev, int q) 612 { 613 if (netif_is_multiqueue(netdev)) 614 netif_wake_subqueue(netdev, q); 615 else 616 netif_wake_queue(netdev); 617 } 618 619 /** 620 * \brief Stop a queue 621 * @param netdev network device 622 * @param q which queue to stop 623 */ 624 static inline void stop_q(struct net_device *netdev, int q) 625 { 626 if (netif_is_multiqueue(netdev)) 627 netif_stop_subqueue(netdev, q); 628 else 629 netif_stop_queue(netdev); 630 } 631 632 /** 633 * \brief Check Tx queue status, and take appropriate action 634 * @param lio per-network private data 635 * @returns 0 if full, number of queues woken up otherwise 636 */ 637 static inline int check_txq_status(struct lio *lio) 638 { 639 int ret_val = 0; 640 641 if (netif_is_multiqueue(lio->netdev)) { 642 int numqs = lio->netdev->num_tx_queues; 643 int q, iq = 0; 644 645 /* check each sub-queue state */ 646 for (q = 0; q < numqs; q++) { 647 iq = lio->linfo.txpciq[q % 648 lio->oct_dev->num_iqs].s.q_no; 649 if (octnet_iq_is_full(lio->oct_dev, iq)) 650 continue; 651 if (__netif_subqueue_stopped(lio->netdev, q)) { 652 wake_q(lio->netdev, q); 653 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, 654 tx_restart, 1); 655 ret_val++; 656 } 657 } 658 } else { 659 if (octnet_iq_is_full(lio->oct_dev, lio->txq)) 660 return 0; 661 wake_q(lio->netdev, lio->txq); 662 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, 663 tx_restart, 1); 664 ret_val = 1; 665 } 666 return ret_val; 667 } 668 669 /** 670 * Remove the node at the head of the list. The list would be empty at 671 * the end of this call if there are no more nodes in the list. 672 */ 673 static inline struct list_head *list_delete_head(struct list_head *root) 674 { 675 struct list_head *node; 676 677 if ((root->prev == root) && (root->next == root)) 678 node = NULL; 679 else 680 node = root->next; 681 682 if (node) 683 list_del(node); 684 685 return node; 686 } 687 688 /** 689 * \brief Delete gather lists 690 * @param lio per-network private data 691 */ 692 static void delete_glists(struct lio *lio) 693 { 694 struct octnic_gather *g; 695 int i; 696 697 kfree(lio->glist_lock); 698 lio->glist_lock = NULL; 699 700 if (!lio->glist) 701 return; 702 703 for (i = 0; i < lio->linfo.num_txpciq; i++) { 704 do { 705 g = (struct octnic_gather *) 706 list_delete_head(&lio->glist[i]); 707 if (g) 708 kfree(g); 709 } while (g); 710 711 if (lio->glists_virt_base && lio->glists_virt_base[i] && 712 lio->glists_dma_base && lio->glists_dma_base[i]) { 713 lio_dma_free(lio->oct_dev, 714 lio->glist_entry_size * lio->tx_qsize, 715 lio->glists_virt_base[i], 716 lio->glists_dma_base[i]); 717 } 718 } 719 720 kfree(lio->glists_virt_base); 721 lio->glists_virt_base = NULL; 722 723 kfree(lio->glists_dma_base); 724 lio->glists_dma_base = NULL; 725 726 kfree(lio->glist); 727 lio->glist = NULL; 728 } 729 730 /** 731 * \brief Setup gather lists 732 * @param lio per-network private data 733 */ 734 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) 735 { 736 int i, j; 737 struct octnic_gather *g; 738 739 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), 740 GFP_KERNEL); 741 if (!lio->glist_lock) 742 return -ENOMEM; 743 744 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), 745 GFP_KERNEL); 746 if (!lio->glist) { 747 kfree(lio->glist_lock); 748 lio->glist_lock = NULL; 749 return -ENOMEM; 750 } 751 752 lio->glist_entry_size = 753 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); 754 755 /* allocate memory to store virtual and dma base address of 756 * per glist consistent memory 757 */ 758 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), 759 GFP_KERNEL); 760 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), 761 GFP_KERNEL); 762 763 if (!lio->glists_virt_base || !lio->glists_dma_base) { 764 delete_glists(lio); 765 return -ENOMEM; 766 } 767 768 for (i = 0; i < num_iqs; i++) { 769 int numa_node = dev_to_node(&oct->pci_dev->dev); 770 771 spin_lock_init(&lio->glist_lock[i]); 772 773 INIT_LIST_HEAD(&lio->glist[i]); 774 775 lio->glists_virt_base[i] = 776 lio_dma_alloc(oct, 777 lio->glist_entry_size * lio->tx_qsize, 778 &lio->glists_dma_base[i]); 779 780 if (!lio->glists_virt_base[i]) { 781 delete_glists(lio); 782 return -ENOMEM; 783 } 784 785 for (j = 0; j < lio->tx_qsize; j++) { 786 g = kzalloc_node(sizeof(*g), GFP_KERNEL, 787 numa_node); 788 if (!g) 789 g = kzalloc(sizeof(*g), GFP_KERNEL); 790 if (!g) 791 break; 792 793 g->sg = lio->glists_virt_base[i] + 794 (j * lio->glist_entry_size); 795 796 g->sg_dma_ptr = lio->glists_dma_base[i] + 797 (j * lio->glist_entry_size); 798 799 list_add_tail(&g->list, &lio->glist[i]); 800 } 801 802 if (j != lio->tx_qsize) { 803 delete_glists(lio); 804 return -ENOMEM; 805 } 806 } 807 808 return 0; 809 } 810 811 /** 812 * \brief Print link information 813 * @param netdev network device 814 */ 815 static void print_link_info(struct net_device *netdev) 816 { 817 struct lio *lio = GET_LIO(netdev); 818 819 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 820 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 821 struct oct_link_info *linfo = &lio->linfo; 822 823 if (linfo->link.s.link_up) { 824 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 825 linfo->link.s.speed, 826 (linfo->link.s.duplex) ? "Full" : "Half"); 827 } else { 828 netif_info(lio, link, lio->netdev, "Link Down\n"); 829 } 830 } 831 } 832 833 /** 834 * \brief Routine to notify MTU change 835 * @param work work_struct data structure 836 */ 837 static void octnet_link_status_change(struct work_struct *work) 838 { 839 struct cavium_wk *wk = (struct cavium_wk *)work; 840 struct lio *lio = (struct lio *)wk->ctxptr; 841 842 rtnl_lock(); 843 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); 844 rtnl_unlock(); 845 } 846 847 /** 848 * \brief Sets up the mtu status change work 849 * @param netdev network device 850 */ 851 static inline int setup_link_status_change_wq(struct net_device *netdev) 852 { 853 struct lio *lio = GET_LIO(netdev); 854 struct octeon_device *oct = lio->oct_dev; 855 856 lio->link_status_wq.wq = alloc_workqueue("link-status", 857 WQ_MEM_RECLAIM, 0); 858 if (!lio->link_status_wq.wq) { 859 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 860 return -1; 861 } 862 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 863 octnet_link_status_change); 864 lio->link_status_wq.wk.ctxptr = lio; 865 866 return 0; 867 } 868 869 static inline void cleanup_link_status_change_wq(struct net_device *netdev) 870 { 871 struct lio *lio = GET_LIO(netdev); 872 873 if (lio->link_status_wq.wq) { 874 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 875 destroy_workqueue(lio->link_status_wq.wq); 876 } 877 } 878 879 /** 880 * \brief Update link status 881 * @param netdev network device 882 * @param ls link status structure 883 * 884 * Called on receipt of a link status response from the core application to 885 * update each interface's link status. 886 */ 887 static inline void update_link_status(struct net_device *netdev, 888 union oct_link_status *ls) 889 { 890 struct lio *lio = GET_LIO(netdev); 891 int changed = (lio->linfo.link.u64 != ls->u64); 892 893 lio->linfo.link.u64 = ls->u64; 894 895 if ((lio->intf_open) && (changed)) { 896 print_link_info(netdev); 897 lio->link_changes++; 898 899 if (lio->linfo.link.s.link_up) { 900 netif_carrier_on(netdev); 901 txqs_wake(netdev); 902 } else { 903 netif_carrier_off(netdev); 904 stop_txq(netdev); 905 } 906 } 907 } 908 909 /** 910 * lio_sync_octeon_time_cb - callback that is invoked when soft command 911 * sent by lio_sync_octeon_time() has completed successfully or failed 912 * 913 * @oct - octeon device structure 914 * @status - indicates success or failure 915 * @buf - pointer to the command that was sent to firmware 916 **/ 917 static void lio_sync_octeon_time_cb(struct octeon_device *oct, 918 u32 status, void *buf) 919 { 920 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 921 922 if (status) 923 dev_err(&oct->pci_dev->dev, 924 "Failed to sync time to octeon; error=%d\n", status); 925 926 octeon_free_soft_command(oct, sc); 927 } 928 929 /** 930 * lio_sync_octeon_time - send latest localtime to octeon firmware so that 931 * firmware will correct it's time, in case there is a time skew 932 * 933 * @work: work scheduled to send time update to octeon firmware 934 **/ 935 static void lio_sync_octeon_time(struct work_struct *work) 936 { 937 struct cavium_wk *wk = (struct cavium_wk *)work; 938 struct lio *lio = (struct lio *)wk->ctxptr; 939 struct octeon_device *oct = lio->oct_dev; 940 struct octeon_soft_command *sc; 941 struct timespec64 ts; 942 struct lio_time *lt; 943 int ret; 944 945 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0); 946 if (!sc) { 947 dev_err(&oct->pci_dev->dev, 948 "Failed to sync time to octeon: soft command allocation failed\n"); 949 return; 950 } 951 952 lt = (struct lio_time *)sc->virtdptr; 953 954 /* Get time of the day */ 955 getnstimeofday64(&ts); 956 lt->sec = ts.tv_sec; 957 lt->nsec = ts.tv_nsec; 958 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); 959 960 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 961 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 962 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); 963 964 sc->callback = lio_sync_octeon_time_cb; 965 sc->callback_arg = sc; 966 sc->wait_time = 1000; 967 968 ret = octeon_send_soft_command(oct, sc); 969 if (ret == IQ_SEND_FAILED) { 970 dev_err(&oct->pci_dev->dev, 971 "Failed to sync time to octeon: failed to send soft command\n"); 972 octeon_free_soft_command(oct, sc); 973 } 974 975 queue_delayed_work(lio->sync_octeon_time_wq.wq, 976 &lio->sync_octeon_time_wq.wk.work, 977 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 978 } 979 980 /** 981 * setup_sync_octeon_time_wq - Sets up the work to periodically update 982 * local time to octeon firmware 983 * 984 * @netdev - network device which should send time update to firmware 985 **/ 986 static inline int setup_sync_octeon_time_wq(struct net_device *netdev) 987 { 988 struct lio *lio = GET_LIO(netdev); 989 struct octeon_device *oct = lio->oct_dev; 990 991 lio->sync_octeon_time_wq.wq = 992 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); 993 if (!lio->sync_octeon_time_wq.wq) { 994 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); 995 return -1; 996 } 997 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, 998 lio_sync_octeon_time); 999 lio->sync_octeon_time_wq.wk.ctxptr = lio; 1000 queue_delayed_work(lio->sync_octeon_time_wq.wq, 1001 &lio->sync_octeon_time_wq.wk.work, 1002 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 1003 1004 return 0; 1005 } 1006 1007 /** 1008 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created 1009 * to periodically update local time to octeon firmware 1010 * 1011 * @netdev - network device which should send time update to firmware 1012 **/ 1013 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) 1014 { 1015 struct lio *lio = GET_LIO(netdev); 1016 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; 1017 1018 if (time_wq->wq) { 1019 cancel_delayed_work_sync(&time_wq->wk.work); 1020 destroy_workqueue(time_wq->wq); 1021 } 1022 } 1023 1024 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) 1025 { 1026 struct octeon_device *other_oct; 1027 1028 other_oct = lio_get_device(oct->octeon_id + 1); 1029 1030 if (other_oct && other_oct->pci_dev) { 1031 int oct_busnum, other_oct_busnum; 1032 1033 oct_busnum = oct->pci_dev->bus->number; 1034 other_oct_busnum = other_oct->pci_dev->bus->number; 1035 1036 if (oct_busnum == other_oct_busnum) { 1037 int oct_slot, other_oct_slot; 1038 1039 oct_slot = PCI_SLOT(oct->pci_dev->devfn); 1040 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); 1041 1042 if (oct_slot == other_oct_slot) 1043 return other_oct; 1044 } 1045 } 1046 1047 return NULL; 1048 } 1049 1050 static void disable_all_vf_links(struct octeon_device *oct) 1051 { 1052 struct net_device *netdev; 1053 int max_vfs, vf, i; 1054 1055 if (!oct) 1056 return; 1057 1058 max_vfs = oct->sriov_info.max_vfs; 1059 1060 for (i = 0; i < oct->ifcount; i++) { 1061 netdev = oct->props[i].netdev; 1062 if (!netdev) 1063 continue; 1064 1065 for (vf = 0; vf < max_vfs; vf++) 1066 liquidio_set_vf_link_state(netdev, vf, 1067 IFLA_VF_LINK_STATE_DISABLE); 1068 } 1069 } 1070 1071 static int liquidio_watchdog(void *param) 1072 { 1073 bool err_msg_was_printed[LIO_MAX_CORES]; 1074 u16 mask_of_crashed_or_stuck_cores = 0; 1075 bool all_vf_links_are_disabled = false; 1076 struct octeon_device *oct = param; 1077 struct octeon_device *other_oct; 1078 #ifdef CONFIG_MODULE_UNLOAD 1079 long refcount, vfs_referencing_pf; 1080 u64 vfs_mask1, vfs_mask2; 1081 #endif 1082 int core; 1083 1084 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); 1085 1086 while (!kthread_should_stop()) { 1087 /* sleep for a couple of seconds so that we don't hog the CPU */ 1088 set_current_state(TASK_INTERRUPTIBLE); 1089 schedule_timeout(msecs_to_jiffies(2000)); 1090 1091 mask_of_crashed_or_stuck_cores = 1092 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); 1093 1094 if (!mask_of_crashed_or_stuck_cores) 1095 continue; 1096 1097 WRITE_ONCE(oct->cores_crashed, true); 1098 other_oct = get_other_octeon_device(oct); 1099 if (other_oct) 1100 WRITE_ONCE(other_oct->cores_crashed, true); 1101 1102 for (core = 0; core < LIO_MAX_CORES; core++) { 1103 bool core_crashed_or_got_stuck; 1104 1105 core_crashed_or_got_stuck = 1106 (mask_of_crashed_or_stuck_cores 1107 >> core) & 1; 1108 1109 if (core_crashed_or_got_stuck && 1110 !err_msg_was_printed[core]) { 1111 dev_err(&oct->pci_dev->dev, 1112 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 1113 core); 1114 err_msg_was_printed[core] = true; 1115 } 1116 } 1117 1118 if (all_vf_links_are_disabled) 1119 continue; 1120 1121 disable_all_vf_links(oct); 1122 disable_all_vf_links(other_oct); 1123 all_vf_links_are_disabled = true; 1124 1125 #ifdef CONFIG_MODULE_UNLOAD 1126 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); 1127 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); 1128 1129 vfs_referencing_pf = hweight64(vfs_mask1); 1130 vfs_referencing_pf += hweight64(vfs_mask2); 1131 1132 refcount = module_refcount(THIS_MODULE); 1133 if (refcount >= vfs_referencing_pf) { 1134 while (vfs_referencing_pf) { 1135 module_put(THIS_MODULE); 1136 vfs_referencing_pf--; 1137 } 1138 } 1139 #endif 1140 } 1141 1142 return 0; 1143 } 1144 1145 /** 1146 * \brief PCI probe handler 1147 * @param pdev PCI device structure 1148 * @param ent unused 1149 */ 1150 static int 1151 liquidio_probe(struct pci_dev *pdev, 1152 const struct pci_device_id *ent __attribute__((unused))) 1153 { 1154 struct octeon_device *oct_dev = NULL; 1155 struct handshake *hs; 1156 1157 oct_dev = octeon_allocate_device(pdev->device, 1158 sizeof(struct octeon_device_priv)); 1159 if (!oct_dev) { 1160 dev_err(&pdev->dev, "Unable to allocate device\n"); 1161 return -ENOMEM; 1162 } 1163 1164 if (pdev->device == OCTEON_CN23XX_PF_VID) 1165 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 1166 1167 /* Enable PTP for 6XXX Device */ 1168 if (((pdev->device == OCTEON_CN66XX) || 1169 (pdev->device == OCTEON_CN68XX))) 1170 oct_dev->ptp_enable = true; 1171 else 1172 oct_dev->ptp_enable = false; 1173 1174 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 1175 (u32)pdev->vendor, (u32)pdev->device); 1176 1177 /* Assign octeon_device for this device to the private data area. */ 1178 pci_set_drvdata(pdev, oct_dev); 1179 1180 /* set linux specific device pointer */ 1181 oct_dev->pci_dev = (void *)pdev; 1182 1183 hs = &handshake[oct_dev->octeon_id]; 1184 init_completion(&hs->init); 1185 init_completion(&hs->started); 1186 hs->pci_dev = pdev; 1187 1188 if (oct_dev->octeon_id == 0) 1189 /* first LiquidIO NIC is detected */ 1190 complete(&first_stage); 1191 1192 if (octeon_device_init(oct_dev)) { 1193 complete(&hs->init); 1194 liquidio_remove(pdev); 1195 return -ENOMEM; 1196 } 1197 1198 if (OCTEON_CN23XX_PF(oct_dev)) { 1199 u8 bus, device, function; 1200 1201 if (atomic_read(oct_dev->adapter_refcount) == 1) { 1202 /* Each NIC gets one watchdog kernel thread. The first 1203 * PF (of each NIC) that gets pci_driver->probe()'d 1204 * creates that thread. 1205 */ 1206 bus = pdev->bus->number; 1207 device = PCI_SLOT(pdev->devfn); 1208 function = PCI_FUNC(pdev->devfn); 1209 oct_dev->watchdog_task = kthread_create( 1210 liquidio_watchdog, oct_dev, 1211 "liowd/%02hhx:%02hhx.%hhx", bus, device, function); 1212 if (!IS_ERR(oct_dev->watchdog_task)) { 1213 wake_up_process(oct_dev->watchdog_task); 1214 } else { 1215 oct_dev->watchdog_task = NULL; 1216 dev_err(&oct_dev->pci_dev->dev, 1217 "failed to create kernel_thread\n"); 1218 liquidio_remove(pdev); 1219 return -1; 1220 } 1221 } 1222 } 1223 1224 oct_dev->rx_pause = 1; 1225 oct_dev->tx_pause = 1; 1226 1227 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 1228 1229 return 0; 1230 } 1231 1232 static bool fw_type_is_auto(void) 1233 { 1234 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, 1235 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; 1236 } 1237 1238 /** 1239 * \brief PCI FLR for each Octeon device. 1240 * @param oct octeon device 1241 */ 1242 static void octeon_pci_flr(struct octeon_device *oct) 1243 { 1244 int rc; 1245 1246 pci_save_state(oct->pci_dev); 1247 1248 pci_cfg_access_lock(oct->pci_dev); 1249 1250 /* Quiesce the device completely */ 1251 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 1252 PCI_COMMAND_INTX_DISABLE); 1253 1254 rc = __pci_reset_function_locked(oct->pci_dev); 1255 1256 if (rc != 0) 1257 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", 1258 rc, oct->pf_num); 1259 1260 pci_cfg_access_unlock(oct->pci_dev); 1261 1262 pci_restore_state(oct->pci_dev); 1263 } 1264 1265 /** 1266 *\brief Destroy resources associated with octeon device 1267 * @param pdev PCI device structure 1268 * @param ent unused 1269 */ 1270 static void octeon_destroy_resources(struct octeon_device *oct) 1271 { 1272 int i, refcount; 1273 struct msix_entry *msix_entries; 1274 struct octeon_device_priv *oct_priv = 1275 (struct octeon_device_priv *)oct->priv; 1276 1277 struct handshake *hs; 1278 1279 switch (atomic_read(&oct->status)) { 1280 case OCT_DEV_RUNNING: 1281 case OCT_DEV_CORE_OK: 1282 1283 /* No more instructions will be forwarded. */ 1284 atomic_set(&oct->status, OCT_DEV_IN_RESET); 1285 1286 oct->app_mode = CVM_DRV_INVALID_APP; 1287 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 1288 lio_get_state_string(&oct->status)); 1289 1290 schedule_timeout_uninterruptible(HZ / 10); 1291 1292 /* fallthrough */ 1293 case OCT_DEV_HOST_OK: 1294 1295 /* fallthrough */ 1296 case OCT_DEV_CONSOLE_INIT_DONE: 1297 /* Remove any consoles */ 1298 octeon_remove_consoles(oct); 1299 1300 /* fallthrough */ 1301 case OCT_DEV_IO_QUEUES_DONE: 1302 if (wait_for_pending_requests(oct)) 1303 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1304 1305 if (lio_wait_for_instr_fetch(oct)) 1306 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1307 1308 /* Disable the input and output queues now. No more packets will 1309 * arrive from Octeon, but we should wait for all packet 1310 * processing to finish. 1311 */ 1312 oct->fn_list.disable_io_queues(oct); 1313 1314 if (lio_wait_for_oq_pkts(oct)) 1315 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 1316 1317 /* fallthrough */ 1318 case OCT_DEV_INTR_SET_DONE: 1319 /* Disable interrupts */ 1320 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1321 1322 if (oct->msix_on) { 1323 msix_entries = (struct msix_entry *)oct->msix_entries; 1324 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 1325 if (oct->ioq_vector[i].vector) { 1326 /* clear the affinity_cpumask */ 1327 irq_set_affinity_hint( 1328 msix_entries[i].vector, 1329 NULL); 1330 free_irq(msix_entries[i].vector, 1331 &oct->ioq_vector[i]); 1332 oct->ioq_vector[i].vector = 0; 1333 } 1334 } 1335 /* non-iov vector's argument is oct struct */ 1336 free_irq(msix_entries[i].vector, oct); 1337 1338 pci_disable_msix(oct->pci_dev); 1339 kfree(oct->msix_entries); 1340 oct->msix_entries = NULL; 1341 } else { 1342 /* Release the interrupt line */ 1343 free_irq(oct->pci_dev->irq, oct); 1344 1345 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1346 pci_disable_msi(oct->pci_dev); 1347 } 1348 1349 kfree(oct->irq_name_storage); 1350 oct->irq_name_storage = NULL; 1351 1352 /* fallthrough */ 1353 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1354 if (OCTEON_CN23XX_PF(oct)) 1355 octeon_free_ioq_vector(oct); 1356 1357 /* fallthrough */ 1358 case OCT_DEV_MBOX_SETUP_DONE: 1359 if (OCTEON_CN23XX_PF(oct)) 1360 oct->fn_list.free_mbox(oct); 1361 1362 /* fallthrough */ 1363 case OCT_DEV_IN_RESET: 1364 case OCT_DEV_DROQ_INIT_DONE: 1365 /* Wait for any pending operations */ 1366 mdelay(100); 1367 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1368 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1369 continue; 1370 octeon_delete_droq(oct, i); 1371 } 1372 1373 /* Force any pending handshakes to complete */ 1374 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1375 hs = &handshake[i]; 1376 1377 if (hs->pci_dev) { 1378 handshake[oct->octeon_id].init_ok = 0; 1379 complete(&handshake[oct->octeon_id].init); 1380 handshake[oct->octeon_id].started_ok = 0; 1381 complete(&handshake[oct->octeon_id].started); 1382 } 1383 } 1384 1385 /* fallthrough */ 1386 case OCT_DEV_RESP_LIST_INIT_DONE: 1387 octeon_delete_response_list(oct); 1388 1389 /* fallthrough */ 1390 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1391 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1392 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1393 continue; 1394 octeon_delete_instr_queue(oct, i); 1395 } 1396 #ifdef CONFIG_PCI_IOV 1397 if (oct->sriov_info.sriov_enabled) 1398 pci_disable_sriov(oct->pci_dev); 1399 #endif 1400 /* fallthrough */ 1401 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1402 octeon_free_sc_buffer_pool(oct); 1403 1404 /* fallthrough */ 1405 case OCT_DEV_DISPATCH_INIT_DONE: 1406 octeon_delete_dispatch_list(oct); 1407 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1408 1409 /* fallthrough */ 1410 case OCT_DEV_PCI_MAP_DONE: 1411 refcount = octeon_deregister_device(oct); 1412 1413 /* Soft reset the octeon device before exiting. 1414 * However, if fw was loaded from card (i.e. autoboot), 1415 * perform an FLR instead. 1416 * Implementation note: only soft-reset the device 1417 * if it is a CN6XXX OR the LAST CN23XX device. 1418 */ 1419 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) 1420 octeon_pci_flr(oct); 1421 else if (OCTEON_CN6XXX(oct) || !refcount) 1422 oct->fn_list.soft_reset(oct); 1423 1424 octeon_unmap_pci_barx(oct, 0); 1425 octeon_unmap_pci_barx(oct, 1); 1426 1427 /* fallthrough */ 1428 case OCT_DEV_PCI_ENABLE_DONE: 1429 pci_clear_master(oct->pci_dev); 1430 /* Disable the device, releasing the PCI INT */ 1431 pci_disable_device(oct->pci_dev); 1432 1433 /* fallthrough */ 1434 case OCT_DEV_BEGIN_STATE: 1435 /* Nothing to be done here either */ 1436 break; 1437 } /* end switch (oct->status) */ 1438 1439 tasklet_kill(&oct_priv->droq_tasklet); 1440 } 1441 1442 /** 1443 * \brief Callback for rx ctrl 1444 * @param status status of request 1445 * @param buf pointer to resp structure 1446 */ 1447 static void rx_ctl_callback(struct octeon_device *oct, 1448 u32 status, 1449 void *buf) 1450 { 1451 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1452 struct liquidio_rx_ctl_context *ctx; 1453 1454 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1455 1456 oct = lio_get_device(ctx->octeon_id); 1457 if (status) 1458 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", 1459 CVM_CAST64(status)); 1460 WRITE_ONCE(ctx->cond, 1); 1461 1462 /* This barrier is required to be sure that the response has been 1463 * written fully before waking up the handler 1464 */ 1465 wmb(); 1466 1467 wake_up_interruptible(&ctx->wc); 1468 } 1469 1470 /** 1471 * \brief Send Rx control command 1472 * @param lio per-network private data 1473 * @param start_stop whether to start or stop 1474 */ 1475 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1476 { 1477 struct octeon_soft_command *sc; 1478 struct liquidio_rx_ctl_context *ctx; 1479 union octnet_cmd *ncmd; 1480 int ctx_size = sizeof(struct liquidio_rx_ctl_context); 1481 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1482 int retval; 1483 1484 if (oct->props[lio->ifidx].rx_on == start_stop) 1485 return; 1486 1487 sc = (struct octeon_soft_command *) 1488 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1489 16, ctx_size); 1490 1491 ncmd = (union octnet_cmd *)sc->virtdptr; 1492 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1493 1494 WRITE_ONCE(ctx->cond, 0); 1495 ctx->octeon_id = lio_get_device_id(oct); 1496 init_waitqueue_head(&ctx->wc); 1497 1498 ncmd->u64 = 0; 1499 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1500 ncmd->s.param1 = start_stop; 1501 1502 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1503 1504 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1505 1506 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1507 OPCODE_NIC_CMD, 0, 0, 0); 1508 1509 sc->callback = rx_ctl_callback; 1510 sc->callback_arg = sc; 1511 sc->wait_time = 5000; 1512 1513 retval = octeon_send_soft_command(oct, sc); 1514 if (retval == IQ_SEND_FAILED) { 1515 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1516 } else { 1517 /* Sleep on a wait queue till the cond flag indicates that the 1518 * response arrived or timed-out. 1519 */ 1520 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) 1521 return; 1522 oct->props[lio->ifidx].rx_on = start_stop; 1523 } 1524 1525 octeon_free_soft_command(oct, sc); 1526 } 1527 1528 /** 1529 * \brief Destroy NIC device interface 1530 * @param oct octeon device 1531 * @param ifidx which interface to destroy 1532 * 1533 * Cleanup associated with each interface for an Octeon device when NIC 1534 * module is being unloaded or if initialization fails during load. 1535 */ 1536 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1537 { 1538 struct net_device *netdev = oct->props[ifidx].netdev; 1539 struct lio *lio; 1540 struct napi_struct *napi, *n; 1541 1542 if (!netdev) { 1543 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1544 __func__, ifidx); 1545 return; 1546 } 1547 1548 lio = GET_LIO(netdev); 1549 1550 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1551 1552 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1553 liquidio_stop(netdev); 1554 1555 if (oct->props[lio->ifidx].napi_enabled == 1) { 1556 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1557 napi_disable(napi); 1558 1559 oct->props[lio->ifidx].napi_enabled = 0; 1560 1561 if (OCTEON_CN23XX_PF(oct)) 1562 oct->droq[0]->ops.poll_mode = 0; 1563 } 1564 1565 /* Delete NAPI */ 1566 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1567 netif_napi_del(napi); 1568 1569 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1570 unregister_netdev(netdev); 1571 1572 cleanup_sync_octeon_time_wq(netdev); 1573 cleanup_link_status_change_wq(netdev); 1574 1575 cleanup_rx_oom_poll_fn(netdev); 1576 1577 delete_glists(lio); 1578 1579 free_netdev(netdev); 1580 1581 oct->props[ifidx].gmxport = -1; 1582 1583 oct->props[ifidx].netdev = NULL; 1584 } 1585 1586 /** 1587 * \brief Stop complete NIC functionality 1588 * @param oct octeon device 1589 */ 1590 static int liquidio_stop_nic_module(struct octeon_device *oct) 1591 { 1592 int i, j; 1593 struct lio *lio; 1594 1595 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1596 if (!oct->ifcount) { 1597 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1598 return 1; 1599 } 1600 1601 spin_lock_bh(&oct->cmd_resp_wqlock); 1602 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1603 spin_unlock_bh(&oct->cmd_resp_wqlock); 1604 1605 for (i = 0; i < oct->ifcount; i++) { 1606 lio = GET_LIO(oct->props[i].netdev); 1607 for (j = 0; j < oct->num_oqs; j++) 1608 octeon_unregister_droq_ops(oct, 1609 lio->linfo.rxpciq[j].s.q_no); 1610 } 1611 1612 for (i = 0; i < oct->ifcount; i++) 1613 liquidio_destroy_nic_device(oct, i); 1614 1615 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1616 return 0; 1617 } 1618 1619 /** 1620 * \brief Cleans up resources at unload time 1621 * @param pdev PCI device structure 1622 */ 1623 static void liquidio_remove(struct pci_dev *pdev) 1624 { 1625 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1626 1627 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1628 1629 if (oct_dev->watchdog_task) 1630 kthread_stop(oct_dev->watchdog_task); 1631 1632 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1633 liquidio_stop_nic_module(oct_dev); 1634 1635 /* Reset the octeon device and cleanup all memory allocated for 1636 * the octeon device by driver. 1637 */ 1638 octeon_destroy_resources(oct_dev); 1639 1640 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1641 1642 /* This octeon device has been removed. Update the global 1643 * data structure to reflect this. Free the device structure. 1644 */ 1645 octeon_free_device_mem(oct_dev); 1646 } 1647 1648 /** 1649 * \brief Identify the Octeon device and to map the BAR address space 1650 * @param oct octeon device 1651 */ 1652 static int octeon_chip_specific_setup(struct octeon_device *oct) 1653 { 1654 u32 dev_id, rev_id; 1655 int ret = 1; 1656 char *s; 1657 1658 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1659 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1660 oct->rev_id = rev_id & 0xff; 1661 1662 switch (dev_id) { 1663 case OCTEON_CN68XX_PCIID: 1664 oct->chip_id = OCTEON_CN68XX; 1665 ret = lio_setup_cn68xx_octeon_device(oct); 1666 s = "CN68XX"; 1667 break; 1668 1669 case OCTEON_CN66XX_PCIID: 1670 oct->chip_id = OCTEON_CN66XX; 1671 ret = lio_setup_cn66xx_octeon_device(oct); 1672 s = "CN66XX"; 1673 break; 1674 1675 case OCTEON_CN23XX_PCIID_PF: 1676 oct->chip_id = OCTEON_CN23XX_PF_VID; 1677 ret = setup_cn23xx_octeon_pf_device(oct); 1678 if (ret) 1679 break; 1680 #ifdef CONFIG_PCI_IOV 1681 if (!ret) 1682 pci_sriov_set_totalvfs(oct->pci_dev, 1683 oct->sriov_info.max_vfs); 1684 #endif 1685 s = "CN23XX"; 1686 break; 1687 1688 default: 1689 s = "?"; 1690 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1691 dev_id); 1692 } 1693 1694 if (!ret) 1695 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s, 1696 OCTEON_MAJOR_REV(oct), 1697 OCTEON_MINOR_REV(oct), 1698 octeon_get_conf(oct)->card_name, 1699 LIQUIDIO_VERSION); 1700 1701 return ret; 1702 } 1703 1704 /** 1705 * \brief PCI initialization for each Octeon device. 1706 * @param oct octeon device 1707 */ 1708 static int octeon_pci_os_setup(struct octeon_device *oct) 1709 { 1710 /* setup PCI stuff first */ 1711 if (pci_enable_device(oct->pci_dev)) { 1712 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1713 return 1; 1714 } 1715 1716 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1717 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1718 pci_disable_device(oct->pci_dev); 1719 return 1; 1720 } 1721 1722 /* Enable PCI DMA Master. */ 1723 pci_set_master(oct->pci_dev); 1724 1725 return 0; 1726 } 1727 1728 static inline int skb_iq(struct lio *lio, struct sk_buff *skb) 1729 { 1730 int q = 0; 1731 1732 if (netif_is_multiqueue(lio->netdev)) 1733 q = skb->queue_mapping % lio->linfo.num_txpciq; 1734 1735 return q; 1736 } 1737 1738 /** 1739 * \brief Check Tx queue state for a given network buffer 1740 * @param lio per-network private data 1741 * @param skb network buffer 1742 */ 1743 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) 1744 { 1745 int q = 0, iq = 0; 1746 1747 if (netif_is_multiqueue(lio->netdev)) { 1748 q = skb->queue_mapping; 1749 iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no; 1750 } else { 1751 iq = lio->txq; 1752 q = iq; 1753 } 1754 1755 if (octnet_iq_is_full(lio->oct_dev, iq)) 1756 return 0; 1757 1758 if (__netif_subqueue_stopped(lio->netdev, q)) { 1759 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); 1760 wake_q(lio->netdev, q); 1761 } 1762 return 1; 1763 } 1764 1765 /** 1766 * \brief Unmap and free network buffer 1767 * @param buf buffer 1768 */ 1769 static void free_netbuf(void *buf) 1770 { 1771 struct sk_buff *skb; 1772 struct octnet_buf_free_info *finfo; 1773 struct lio *lio; 1774 1775 finfo = (struct octnet_buf_free_info *)buf; 1776 skb = finfo->skb; 1777 lio = finfo->lio; 1778 1779 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1780 DMA_TO_DEVICE); 1781 1782 check_txq_state(lio, skb); 1783 1784 tx_buffer_free(skb); 1785 } 1786 1787 /** 1788 * \brief Unmap and free gather buffer 1789 * @param buf buffer 1790 */ 1791 static void free_netsgbuf(void *buf) 1792 { 1793 struct octnet_buf_free_info *finfo; 1794 struct sk_buff *skb; 1795 struct lio *lio; 1796 struct octnic_gather *g; 1797 int i, frags, iq; 1798 1799 finfo = (struct octnet_buf_free_info *)buf; 1800 skb = finfo->skb; 1801 lio = finfo->lio; 1802 g = finfo->g; 1803 frags = skb_shinfo(skb)->nr_frags; 1804 1805 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1806 g->sg[0].ptr[0], (skb->len - skb->data_len), 1807 DMA_TO_DEVICE); 1808 1809 i = 1; 1810 while (frags--) { 1811 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1812 1813 pci_unmap_page((lio->oct_dev)->pci_dev, 1814 g->sg[(i >> 2)].ptr[(i & 3)], 1815 frag->size, DMA_TO_DEVICE); 1816 i++; 1817 } 1818 1819 iq = skb_iq(lio, skb); 1820 spin_lock(&lio->glist_lock[iq]); 1821 list_add_tail(&g->list, &lio->glist[iq]); 1822 spin_unlock(&lio->glist_lock[iq]); 1823 1824 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1825 1826 tx_buffer_free(skb); 1827 } 1828 1829 /** 1830 * \brief Unmap and free gather buffer with response 1831 * @param buf buffer 1832 */ 1833 static void free_netsgbuf_with_resp(void *buf) 1834 { 1835 struct octeon_soft_command *sc; 1836 struct octnet_buf_free_info *finfo; 1837 struct sk_buff *skb; 1838 struct lio *lio; 1839 struct octnic_gather *g; 1840 int i, frags, iq; 1841 1842 sc = (struct octeon_soft_command *)buf; 1843 skb = (struct sk_buff *)sc->callback_arg; 1844 finfo = (struct octnet_buf_free_info *)&skb->cb; 1845 1846 lio = finfo->lio; 1847 g = finfo->g; 1848 frags = skb_shinfo(skb)->nr_frags; 1849 1850 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1851 g->sg[0].ptr[0], (skb->len - skb->data_len), 1852 DMA_TO_DEVICE); 1853 1854 i = 1; 1855 while (frags--) { 1856 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1857 1858 pci_unmap_page((lio->oct_dev)->pci_dev, 1859 g->sg[(i >> 2)].ptr[(i & 3)], 1860 frag->size, DMA_TO_DEVICE); 1861 i++; 1862 } 1863 1864 iq = skb_iq(lio, skb); 1865 1866 spin_lock(&lio->glist_lock[iq]); 1867 list_add_tail(&g->list, &lio->glist[iq]); 1868 spin_unlock(&lio->glist_lock[iq]); 1869 1870 /* Don't free the skb yet */ 1871 1872 check_txq_state(lio, skb); 1873 } 1874 1875 /** 1876 * \brief Adjust ptp frequency 1877 * @param ptp PTP clock info 1878 * @param ppb how much to adjust by, in parts-per-billion 1879 */ 1880 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 1881 { 1882 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1883 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1884 u64 comp, delta; 1885 unsigned long flags; 1886 bool neg_adj = false; 1887 1888 if (ppb < 0) { 1889 neg_adj = true; 1890 ppb = -ppb; 1891 } 1892 1893 /* The hardware adds the clock compensation value to the 1894 * PTP clock on every coprocessor clock cycle, so we 1895 * compute the delta in terms of coprocessor clocks. 1896 */ 1897 delta = (u64)ppb << 32; 1898 do_div(delta, oct->coproc_clock_rate); 1899 1900 spin_lock_irqsave(&lio->ptp_lock, flags); 1901 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1902 if (neg_adj) 1903 comp -= delta; 1904 else 1905 comp += delta; 1906 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1907 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1908 1909 return 0; 1910 } 1911 1912 /** 1913 * \brief Adjust ptp time 1914 * @param ptp PTP clock info 1915 * @param delta how much to adjust by, in nanosecs 1916 */ 1917 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1918 { 1919 unsigned long flags; 1920 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1921 1922 spin_lock_irqsave(&lio->ptp_lock, flags); 1923 lio->ptp_adjust += delta; 1924 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1925 1926 return 0; 1927 } 1928 1929 /** 1930 * \brief Get hardware clock time, including any adjustment 1931 * @param ptp PTP clock info 1932 * @param ts timespec 1933 */ 1934 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1935 struct timespec64 *ts) 1936 { 1937 u64 ns; 1938 unsigned long flags; 1939 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1940 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1941 1942 spin_lock_irqsave(&lio->ptp_lock, flags); 1943 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1944 ns += lio->ptp_adjust; 1945 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1946 1947 *ts = ns_to_timespec64(ns); 1948 1949 return 0; 1950 } 1951 1952 /** 1953 * \brief Set hardware clock time. Reset adjustment 1954 * @param ptp PTP clock info 1955 * @param ts timespec 1956 */ 1957 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1958 const struct timespec64 *ts) 1959 { 1960 u64 ns; 1961 unsigned long flags; 1962 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1963 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1964 1965 ns = timespec64_to_ns(ts); 1966 1967 spin_lock_irqsave(&lio->ptp_lock, flags); 1968 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1969 lio->ptp_adjust = 0; 1970 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1971 1972 return 0; 1973 } 1974 1975 /** 1976 * \brief Check if PTP is enabled 1977 * @param ptp PTP clock info 1978 * @param rq request 1979 * @param on is it on 1980 */ 1981 static int 1982 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)), 1983 struct ptp_clock_request *rq __attribute__((unused)), 1984 int on __attribute__((unused))) 1985 { 1986 return -EOPNOTSUPP; 1987 } 1988 1989 /** 1990 * \brief Open PTP clock source 1991 * @param netdev network device 1992 */ 1993 static void oct_ptp_open(struct net_device *netdev) 1994 { 1995 struct lio *lio = GET_LIO(netdev); 1996 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1997 1998 spin_lock_init(&lio->ptp_lock); 1999 2000 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 2001 lio->ptp_info.owner = THIS_MODULE; 2002 lio->ptp_info.max_adj = 250000000; 2003 lio->ptp_info.n_alarm = 0; 2004 lio->ptp_info.n_ext_ts = 0; 2005 lio->ptp_info.n_per_out = 0; 2006 lio->ptp_info.pps = 0; 2007 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; 2008 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 2009 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 2010 lio->ptp_info.settime64 = liquidio_ptp_settime; 2011 lio->ptp_info.enable = liquidio_ptp_enable; 2012 2013 lio->ptp_adjust = 0; 2014 2015 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 2016 &oct->pci_dev->dev); 2017 2018 if (IS_ERR(lio->ptp_clock)) 2019 lio->ptp_clock = NULL; 2020 } 2021 2022 /** 2023 * \brief Init PTP clock 2024 * @param oct octeon device 2025 */ 2026 static void liquidio_ptp_init(struct octeon_device *oct) 2027 { 2028 u64 clock_comp, cfg; 2029 2030 clock_comp = (u64)NSEC_PER_SEC << 32; 2031 do_div(clock_comp, oct->coproc_clock_rate); 2032 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 2033 2034 /* Enable */ 2035 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 2036 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 2037 } 2038 2039 /** 2040 * \brief Load firmware to device 2041 * @param oct octeon device 2042 * 2043 * Maps device to firmware filename, requests firmware, and downloads it 2044 */ 2045 static int load_firmware(struct octeon_device *oct) 2046 { 2047 int ret = 0; 2048 const struct firmware *fw; 2049 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 2050 char *tmp_fw_type; 2051 2052 if (fw_type_is_auto()) { 2053 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 2054 strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); 2055 } else { 2056 tmp_fw_type = fw_type; 2057 } 2058 2059 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 2060 octeon_get_conf(oct)->card_name, tmp_fw_type, 2061 LIO_FW_NAME_SUFFIX); 2062 2063 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 2064 if (ret) { 2065 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", 2066 fw_name); 2067 release_firmware(fw); 2068 return ret; 2069 } 2070 2071 ret = octeon_download_firmware(oct, fw->data, fw->size); 2072 2073 release_firmware(fw); 2074 2075 return ret; 2076 } 2077 2078 /** 2079 * \brief Callback for getting interface configuration 2080 * @param status status of request 2081 * @param buf pointer to resp structure 2082 */ 2083 static void if_cfg_callback(struct octeon_device *oct, 2084 u32 status __attribute__((unused)), 2085 void *buf) 2086 { 2087 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 2088 struct liquidio_if_cfg_resp *resp; 2089 struct liquidio_if_cfg_context *ctx; 2090 2091 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 2092 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 2093 2094 oct = lio_get_device(ctx->octeon_id); 2095 if (resp->status) 2096 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n", 2097 CVM_CAST64(resp->status), status); 2098 WRITE_ONCE(ctx->cond, 1); 2099 2100 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", 2101 resp->cfg_info.liquidio_firmware_version); 2102 2103 /* This barrier is required to be sure that the response has been 2104 * written fully before waking up the handler 2105 */ 2106 wmb(); 2107 2108 wake_up_interruptible(&ctx->wc); 2109 } 2110 2111 /** 2112 * \brief Poll routine for checking transmit queue status 2113 * @param work work_struct data structure 2114 */ 2115 static void octnet_poll_check_txq_status(struct work_struct *work) 2116 { 2117 struct cavium_wk *wk = (struct cavium_wk *)work; 2118 struct lio *lio = (struct lio *)wk->ctxptr; 2119 2120 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 2121 return; 2122 2123 check_txq_status(lio); 2124 queue_delayed_work(lio->txq_status_wq.wq, 2125 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2126 } 2127 2128 /** 2129 * \brief Sets up the txq poll check 2130 * @param netdev network device 2131 */ 2132 static inline int setup_tx_poll_fn(struct net_device *netdev) 2133 { 2134 struct lio *lio = GET_LIO(netdev); 2135 struct octeon_device *oct = lio->oct_dev; 2136 2137 lio->txq_status_wq.wq = alloc_workqueue("txq-status", 2138 WQ_MEM_RECLAIM, 0); 2139 if (!lio->txq_status_wq.wq) { 2140 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 2141 return -1; 2142 } 2143 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 2144 octnet_poll_check_txq_status); 2145 lio->txq_status_wq.wk.ctxptr = lio; 2146 queue_delayed_work(lio->txq_status_wq.wq, 2147 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2148 return 0; 2149 } 2150 2151 static inline void cleanup_tx_poll_fn(struct net_device *netdev) 2152 { 2153 struct lio *lio = GET_LIO(netdev); 2154 2155 if (lio->txq_status_wq.wq) { 2156 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 2157 destroy_workqueue(lio->txq_status_wq.wq); 2158 } 2159 } 2160 2161 /** 2162 * \brief Net device open for LiquidIO 2163 * @param netdev network device 2164 */ 2165 static int liquidio_open(struct net_device *netdev) 2166 { 2167 struct lio *lio = GET_LIO(netdev); 2168 struct octeon_device *oct = lio->oct_dev; 2169 struct napi_struct *napi, *n; 2170 2171 if (oct->props[lio->ifidx].napi_enabled == 0) { 2172 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 2173 napi_enable(napi); 2174 2175 oct->props[lio->ifidx].napi_enabled = 1; 2176 2177 if (OCTEON_CN23XX_PF(oct)) 2178 oct->droq[0]->ops.poll_mode = 1; 2179 } 2180 2181 if (oct->ptp_enable) 2182 oct_ptp_open(netdev); 2183 2184 ifstate_set(lio, LIO_IFSTATE_RUNNING); 2185 2186 /* Ready for link status updates */ 2187 lio->intf_open = 1; 2188 2189 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 2190 2191 if (OCTEON_CN23XX_PF(oct)) { 2192 if (!oct->msix_on) 2193 if (setup_tx_poll_fn(netdev)) 2194 return -1; 2195 } else { 2196 if (setup_tx_poll_fn(netdev)) 2197 return -1; 2198 } 2199 2200 start_txq(netdev); 2201 2202 /* tell Octeon to start forwarding packets to host */ 2203 send_rx_ctrl_cmd(lio, 1); 2204 2205 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 2206 netdev->name); 2207 2208 return 0; 2209 } 2210 2211 /** 2212 * \brief Net device stop for LiquidIO 2213 * @param netdev network device 2214 */ 2215 static int liquidio_stop(struct net_device *netdev) 2216 { 2217 struct lio *lio = GET_LIO(netdev); 2218 struct octeon_device *oct = lio->oct_dev; 2219 struct napi_struct *napi, *n; 2220 2221 if (oct->props[lio->ifidx].napi_enabled) { 2222 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 2223 napi_disable(napi); 2224 2225 oct->props[lio->ifidx].napi_enabled = 0; 2226 2227 if (OCTEON_CN23XX_PF(oct)) 2228 oct->droq[0]->ops.poll_mode = 0; 2229 } 2230 2231 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 2232 2233 netif_tx_disable(netdev); 2234 2235 /* Inform that netif carrier is down */ 2236 netif_carrier_off(netdev); 2237 lio->intf_open = 0; 2238 lio->linfo.link.s.link_up = 0; 2239 lio->link_changes++; 2240 2241 /* Tell Octeon that nic interface is down. */ 2242 send_rx_ctrl_cmd(lio, 0); 2243 2244 if (OCTEON_CN23XX_PF(oct)) { 2245 if (!oct->msix_on) 2246 cleanup_tx_poll_fn(netdev); 2247 } else { 2248 cleanup_tx_poll_fn(netdev); 2249 } 2250 2251 if (lio->ptp_clock) { 2252 ptp_clock_unregister(lio->ptp_clock); 2253 lio->ptp_clock = NULL; 2254 } 2255 2256 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 2257 2258 return 0; 2259 } 2260 2261 /** 2262 * \brief Converts a mask based on net device flags 2263 * @param netdev network device 2264 * 2265 * This routine generates a octnet_ifflags mask from the net device flags 2266 * received from the OS. 2267 */ 2268 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 2269 { 2270 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 2271 2272 if (netdev->flags & IFF_PROMISC) 2273 f |= OCTNET_IFFLAG_PROMISC; 2274 2275 if (netdev->flags & IFF_ALLMULTI) 2276 f |= OCTNET_IFFLAG_ALLMULTI; 2277 2278 if (netdev->flags & IFF_MULTICAST) { 2279 f |= OCTNET_IFFLAG_MULTICAST; 2280 2281 /* Accept all multicast addresses if there are more than we 2282 * can handle 2283 */ 2284 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 2285 f |= OCTNET_IFFLAG_ALLMULTI; 2286 } 2287 2288 if (netdev->flags & IFF_BROADCAST) 2289 f |= OCTNET_IFFLAG_BROADCAST; 2290 2291 return f; 2292 } 2293 2294 /** 2295 * \brief Net device set_multicast_list 2296 * @param netdev network device 2297 */ 2298 static void liquidio_set_mcast_list(struct net_device *netdev) 2299 { 2300 struct lio *lio = GET_LIO(netdev); 2301 struct octeon_device *oct = lio->oct_dev; 2302 struct octnic_ctrl_pkt nctrl; 2303 struct netdev_hw_addr *ha; 2304 u64 *mc; 2305 int ret; 2306 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 2307 2308 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2309 2310 /* Create a ctrl pkt command to be sent to core app. */ 2311 nctrl.ncmd.u64 = 0; 2312 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 2313 nctrl.ncmd.s.param1 = get_new_flags(netdev); 2314 nctrl.ncmd.s.param2 = mc_count; 2315 nctrl.ncmd.s.more = mc_count; 2316 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2317 nctrl.netpndev = (u64)netdev; 2318 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2319 2320 /* copy all the addresses into the udd */ 2321 mc = &nctrl.udd[0]; 2322 netdev_for_each_mc_addr(ha, netdev) { 2323 *mc = 0; 2324 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 2325 /* no need to swap bytes */ 2326 2327 if (++mc > &nctrl.udd[mc_count]) 2328 break; 2329 } 2330 2331 /* Apparently, any activity in this call from the kernel has to 2332 * be atomic. So we won't wait for response. 2333 */ 2334 nctrl.wait_time = 0; 2335 2336 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2337 if (ret < 0) { 2338 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 2339 ret); 2340 } 2341 } 2342 2343 /** 2344 * \brief Net device set_mac_address 2345 * @param netdev network device 2346 */ 2347 static int liquidio_set_mac(struct net_device *netdev, void *p) 2348 { 2349 int ret = 0; 2350 struct lio *lio = GET_LIO(netdev); 2351 struct octeon_device *oct = lio->oct_dev; 2352 struct sockaddr *addr = (struct sockaddr *)p; 2353 struct octnic_ctrl_pkt nctrl; 2354 2355 if (!is_valid_ether_addr(addr->sa_data)) 2356 return -EADDRNOTAVAIL; 2357 2358 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2359 2360 nctrl.ncmd.u64 = 0; 2361 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2362 nctrl.ncmd.s.param1 = 0; 2363 nctrl.ncmd.s.more = 1; 2364 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2365 nctrl.netpndev = (u64)netdev; 2366 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2367 nctrl.wait_time = 100; 2368 2369 nctrl.udd[0] = 0; 2370 /* The MAC Address is presented in network byte order. */ 2371 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2372 2373 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2374 if (ret < 0) { 2375 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2376 return -ENOMEM; 2377 } 2378 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2379 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2380 2381 return 0; 2382 } 2383 2384 /** 2385 * \brief Net device get_stats 2386 * @param netdev network device 2387 */ 2388 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) 2389 { 2390 struct lio *lio = GET_LIO(netdev); 2391 struct net_device_stats *stats = &netdev->stats; 2392 struct octeon_device *oct; 2393 u64 pkts = 0, drop = 0, bytes = 0; 2394 struct oct_droq_stats *oq_stats; 2395 struct oct_iq_stats *iq_stats; 2396 int i, iq_no, oq_no; 2397 2398 oct = lio->oct_dev; 2399 2400 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 2401 return stats; 2402 2403 for (i = 0; i < oct->num_iqs; i++) { 2404 iq_no = lio->linfo.txpciq[i].s.q_no; 2405 iq_stats = &oct->instr_queue[iq_no]->stats; 2406 pkts += iq_stats->tx_done; 2407 drop += iq_stats->tx_dropped; 2408 bytes += iq_stats->tx_tot_bytes; 2409 } 2410 2411 stats->tx_packets = pkts; 2412 stats->tx_bytes = bytes; 2413 stats->tx_dropped = drop; 2414 2415 pkts = 0; 2416 drop = 0; 2417 bytes = 0; 2418 2419 for (i = 0; i < oct->num_oqs; i++) { 2420 oq_no = lio->linfo.rxpciq[i].s.q_no; 2421 oq_stats = &oct->droq[oq_no]->stats; 2422 pkts += oq_stats->rx_pkts_received; 2423 drop += (oq_stats->rx_dropped + 2424 oq_stats->dropped_nodispatch + 2425 oq_stats->dropped_toomany + 2426 oq_stats->dropped_nomem); 2427 bytes += oq_stats->rx_bytes_received; 2428 } 2429 2430 stats->rx_bytes = bytes; 2431 stats->rx_packets = pkts; 2432 stats->rx_dropped = drop; 2433 2434 return stats; 2435 } 2436 2437 /** 2438 * \brief Net device change_mtu 2439 * @param netdev network device 2440 */ 2441 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 2442 { 2443 struct lio *lio = GET_LIO(netdev); 2444 struct octeon_device *oct = lio->oct_dev; 2445 struct octnic_ctrl_pkt nctrl; 2446 int ret = 0; 2447 2448 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2449 2450 nctrl.ncmd.u64 = 0; 2451 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; 2452 nctrl.ncmd.s.param1 = new_mtu; 2453 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2454 nctrl.wait_time = 100; 2455 nctrl.netpndev = (u64)netdev; 2456 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2457 2458 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2459 if (ret < 0) { 2460 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); 2461 return -1; 2462 } 2463 2464 lio->mtu = new_mtu; 2465 2466 return 0; 2467 } 2468 2469 /** 2470 * \brief Handler for SIOCSHWTSTAMP ioctl 2471 * @param netdev network device 2472 * @param ifr interface request 2473 * @param cmd command 2474 */ 2475 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2476 { 2477 struct hwtstamp_config conf; 2478 struct lio *lio = GET_LIO(netdev); 2479 2480 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2481 return -EFAULT; 2482 2483 if (conf.flags) 2484 return -EINVAL; 2485 2486 switch (conf.tx_type) { 2487 case HWTSTAMP_TX_ON: 2488 case HWTSTAMP_TX_OFF: 2489 break; 2490 default: 2491 return -ERANGE; 2492 } 2493 2494 switch (conf.rx_filter) { 2495 case HWTSTAMP_FILTER_NONE: 2496 break; 2497 case HWTSTAMP_FILTER_ALL: 2498 case HWTSTAMP_FILTER_SOME: 2499 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2500 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2501 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2502 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2503 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2504 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2505 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2506 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2507 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2508 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2509 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2510 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2511 case HWTSTAMP_FILTER_NTP_ALL: 2512 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2513 break; 2514 default: 2515 return -ERANGE; 2516 } 2517 2518 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2519 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2520 2521 else 2522 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2523 2524 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2525 } 2526 2527 /** 2528 * \brief ioctl handler 2529 * @param netdev network device 2530 * @param ifr interface request 2531 * @param cmd command 2532 */ 2533 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2534 { 2535 struct lio *lio = GET_LIO(netdev); 2536 2537 switch (cmd) { 2538 case SIOCSHWTSTAMP: 2539 if (lio->oct_dev->ptp_enable) 2540 return hwtstamp_ioctl(netdev, ifr); 2541 default: 2542 return -EOPNOTSUPP; 2543 } 2544 } 2545 2546 /** 2547 * \brief handle a Tx timestamp response 2548 * @param status response status 2549 * @param buf pointer to skb 2550 */ 2551 static void handle_timestamp(struct octeon_device *oct, 2552 u32 status, 2553 void *buf) 2554 { 2555 struct octnet_buf_free_info *finfo; 2556 struct octeon_soft_command *sc; 2557 struct oct_timestamp_resp *resp; 2558 struct lio *lio; 2559 struct sk_buff *skb = (struct sk_buff *)buf; 2560 2561 finfo = (struct octnet_buf_free_info *)skb->cb; 2562 lio = finfo->lio; 2563 sc = finfo->sc; 2564 oct = lio->oct_dev; 2565 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2566 2567 if (status != OCTEON_REQUEST_DONE) { 2568 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2569 CVM_CAST64(status)); 2570 resp->timestamp = 0; 2571 } 2572 2573 octeon_swap_8B_data(&resp->timestamp, 1); 2574 2575 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { 2576 struct skb_shared_hwtstamps ts; 2577 u64 ns = resp->timestamp; 2578 2579 netif_info(lio, tx_done, lio->netdev, 2580 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2581 skb, (unsigned long long)ns); 2582 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2583 skb_tstamp_tx(skb, &ts); 2584 } 2585 2586 octeon_free_soft_command(oct, sc); 2587 tx_buffer_free(skb); 2588 } 2589 2590 /* \brief Send a data packet that will be timestamped 2591 * @param oct octeon device 2592 * @param ndata pointer to network data 2593 * @param finfo pointer to private network data 2594 */ 2595 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2596 struct octnic_data_pkt *ndata, 2597 struct octnet_buf_free_info *finfo, 2598 int xmit_more) 2599 { 2600 int retval; 2601 struct octeon_soft_command *sc; 2602 struct lio *lio; 2603 int ring_doorbell; 2604 u32 len; 2605 2606 lio = finfo->lio; 2607 2608 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2609 sizeof(struct oct_timestamp_resp)); 2610 finfo->sc = sc; 2611 2612 if (!sc) { 2613 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2614 return IQ_SEND_FAILED; 2615 } 2616 2617 if (ndata->reqtype == REQTYPE_NORESP_NET) 2618 ndata->reqtype = REQTYPE_RESP_NET; 2619 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2620 ndata->reqtype = REQTYPE_RESP_NET_SG; 2621 2622 sc->callback = handle_timestamp; 2623 sc->callback_arg = finfo->skb; 2624 sc->iq_no = ndata->q_no; 2625 2626 if (OCTEON_CN23XX_PF(oct)) 2627 len = (u32)((struct octeon_instr_ih3 *) 2628 (&sc->cmd.cmd3.ih3))->dlengsz; 2629 else 2630 len = (u32)((struct octeon_instr_ih2 *) 2631 (&sc->cmd.cmd2.ih2))->dlengsz; 2632 2633 ring_doorbell = !xmit_more; 2634 2635 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2636 sc, len, ndata->reqtype); 2637 2638 if (retval == IQ_SEND_FAILED) { 2639 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2640 retval); 2641 octeon_free_soft_command(oct, sc); 2642 } else { 2643 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2644 } 2645 2646 return retval; 2647 } 2648 2649 /** \brief Transmit networks packets to the Octeon interface 2650 * @param skbuff skbuff struct to be passed to network layer. 2651 * @param netdev pointer to network device 2652 * @returns whether the packet was transmitted to the device okay or not 2653 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2654 */ 2655 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2656 { 2657 struct lio *lio; 2658 struct octnet_buf_free_info *finfo; 2659 union octnic_cmd_setup cmdsetup; 2660 struct octnic_data_pkt ndata; 2661 struct octeon_device *oct; 2662 struct oct_iq_stats *stats; 2663 struct octeon_instr_irh *irh; 2664 union tx_info *tx_info; 2665 int status = 0; 2666 int q_idx = 0, iq_no = 0; 2667 int j, xmit_more = 0; 2668 u64 dptr = 0; 2669 u32 tag = 0; 2670 2671 lio = GET_LIO(netdev); 2672 oct = lio->oct_dev; 2673 2674 if (netif_is_multiqueue(netdev)) { 2675 q_idx = skb->queue_mapping; 2676 q_idx = (q_idx % (lio->linfo.num_txpciq)); 2677 tag = q_idx; 2678 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2679 } else { 2680 iq_no = lio->txq; 2681 } 2682 2683 stats = &oct->instr_queue[iq_no]->stats; 2684 2685 /* Check for all conditions in which the current packet cannot be 2686 * transmitted. 2687 */ 2688 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2689 (!lio->linfo.link.s.link_up) || 2690 (skb->len <= 0)) { 2691 netif_info(lio, tx_err, lio->netdev, 2692 "Transmit failed link_status : %d\n", 2693 lio->linfo.link.s.link_up); 2694 goto lio_xmit_failed; 2695 } 2696 2697 /* Use space in skb->cb to store info used to unmap and 2698 * free the buffers. 2699 */ 2700 finfo = (struct octnet_buf_free_info *)skb->cb; 2701 finfo->lio = lio; 2702 finfo->skb = skb; 2703 finfo->sc = NULL; 2704 2705 /* Prepare the attributes for the data to be passed to OSI. */ 2706 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2707 2708 ndata.buf = (void *)finfo; 2709 2710 ndata.q_no = iq_no; 2711 2712 if (netif_is_multiqueue(netdev)) { 2713 if (octnet_iq_is_full(oct, ndata.q_no)) { 2714 /* defer sending if queue is full */ 2715 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2716 ndata.q_no); 2717 stats->tx_iq_busy++; 2718 return NETDEV_TX_BUSY; 2719 } 2720 } else { 2721 if (octnet_iq_is_full(oct, lio->txq)) { 2722 /* defer sending if queue is full */ 2723 stats->tx_iq_busy++; 2724 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2725 lio->txq); 2726 return NETDEV_TX_BUSY; 2727 } 2728 } 2729 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2730 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); 2731 */ 2732 2733 ndata.datasize = skb->len; 2734 2735 cmdsetup.u64 = 0; 2736 cmdsetup.s.iq_no = iq_no; 2737 2738 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2739 if (skb->encapsulation) { 2740 cmdsetup.s.tnl_csum = 1; 2741 stats->tx_vxlan++; 2742 } else { 2743 cmdsetup.s.transport_csum = 1; 2744 } 2745 } 2746 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2747 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2748 cmdsetup.s.timestamp = 1; 2749 } 2750 2751 if (skb_shinfo(skb)->nr_frags == 0) { 2752 cmdsetup.s.u.datasize = skb->len; 2753 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2754 2755 /* Offload checksum calculation for TCP/UDP packets */ 2756 dptr = dma_map_single(&oct->pci_dev->dev, 2757 skb->data, 2758 skb->len, 2759 DMA_TO_DEVICE); 2760 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2761 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2762 __func__); 2763 return NETDEV_TX_BUSY; 2764 } 2765 2766 if (OCTEON_CN23XX_PF(oct)) 2767 ndata.cmd.cmd3.dptr = dptr; 2768 else 2769 ndata.cmd.cmd2.dptr = dptr; 2770 finfo->dptr = dptr; 2771 ndata.reqtype = REQTYPE_NORESP_NET; 2772 2773 } else { 2774 int i, frags; 2775 struct skb_frag_struct *frag; 2776 struct octnic_gather *g; 2777 2778 spin_lock(&lio->glist_lock[q_idx]); 2779 g = (struct octnic_gather *) 2780 list_delete_head(&lio->glist[q_idx]); 2781 spin_unlock(&lio->glist_lock[q_idx]); 2782 2783 if (!g) { 2784 netif_info(lio, tx_err, lio->netdev, 2785 "Transmit scatter gather: glist null!\n"); 2786 goto lio_xmit_failed; 2787 } 2788 2789 cmdsetup.s.gather = 1; 2790 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2791 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2792 2793 memset(g->sg, 0, g->sg_size); 2794 2795 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2796 skb->data, 2797 (skb->len - skb->data_len), 2798 DMA_TO_DEVICE); 2799 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2800 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2801 __func__); 2802 return NETDEV_TX_BUSY; 2803 } 2804 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2805 2806 frags = skb_shinfo(skb)->nr_frags; 2807 i = 1; 2808 while (frags--) { 2809 frag = &skb_shinfo(skb)->frags[i - 1]; 2810 2811 g->sg[(i >> 2)].ptr[(i & 3)] = 2812 dma_map_page(&oct->pci_dev->dev, 2813 frag->page.p, 2814 frag->page_offset, 2815 frag->size, 2816 DMA_TO_DEVICE); 2817 2818 if (dma_mapping_error(&oct->pci_dev->dev, 2819 g->sg[i >> 2].ptr[i & 3])) { 2820 dma_unmap_single(&oct->pci_dev->dev, 2821 g->sg[0].ptr[0], 2822 skb->len - skb->data_len, 2823 DMA_TO_DEVICE); 2824 for (j = 1; j < i; j++) { 2825 frag = &skb_shinfo(skb)->frags[j - 1]; 2826 dma_unmap_page(&oct->pci_dev->dev, 2827 g->sg[j >> 2].ptr[j & 3], 2828 frag->size, 2829 DMA_TO_DEVICE); 2830 } 2831 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2832 __func__); 2833 return NETDEV_TX_BUSY; 2834 } 2835 2836 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 2837 i++; 2838 } 2839 2840 dptr = g->sg_dma_ptr; 2841 2842 if (OCTEON_CN23XX_PF(oct)) 2843 ndata.cmd.cmd3.dptr = dptr; 2844 else 2845 ndata.cmd.cmd2.dptr = dptr; 2846 finfo->dptr = dptr; 2847 finfo->g = g; 2848 2849 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2850 } 2851 2852 if (OCTEON_CN23XX_PF(oct)) { 2853 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2854 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2855 } else { 2856 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; 2857 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; 2858 } 2859 2860 if (skb_shinfo(skb)->gso_size) { 2861 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2862 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2863 stats->tx_gso++; 2864 } 2865 2866 /* HW insert VLAN tag */ 2867 if (skb_vlan_tag_present(skb)) { 2868 irh->priority = skb_vlan_tag_get(skb) >> 13; 2869 irh->vlan = skb_vlan_tag_get(skb) & 0xfff; 2870 } 2871 2872 xmit_more = skb->xmit_more; 2873 2874 if (unlikely(cmdsetup.s.timestamp)) 2875 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2876 else 2877 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2878 if (status == IQ_SEND_FAILED) 2879 goto lio_xmit_failed; 2880 2881 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2882 2883 if (status == IQ_SEND_STOP) 2884 stop_q(netdev, q_idx); 2885 2886 netif_trans_update(netdev); 2887 2888 if (tx_info->s.gso_segs) 2889 stats->tx_done += tx_info->s.gso_segs; 2890 else 2891 stats->tx_done++; 2892 stats->tx_tot_bytes += ndata.datasize; 2893 2894 return NETDEV_TX_OK; 2895 2896 lio_xmit_failed: 2897 stats->tx_dropped++; 2898 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2899 iq_no, stats->tx_dropped); 2900 if (dptr) 2901 dma_unmap_single(&oct->pci_dev->dev, dptr, 2902 ndata.datasize, DMA_TO_DEVICE); 2903 2904 octeon_ring_doorbell_locked(oct, iq_no); 2905 2906 tx_buffer_free(skb); 2907 return NETDEV_TX_OK; 2908 } 2909 2910 /** \brief Network device Tx timeout 2911 * @param netdev pointer to network device 2912 */ 2913 static void liquidio_tx_timeout(struct net_device *netdev) 2914 { 2915 struct lio *lio; 2916 2917 lio = GET_LIO(netdev); 2918 2919 netif_info(lio, tx_err, lio->netdev, 2920 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2921 netdev->stats.tx_dropped); 2922 netif_trans_update(netdev); 2923 txqs_wake(netdev); 2924 } 2925 2926 static int liquidio_vlan_rx_add_vid(struct net_device *netdev, 2927 __be16 proto __attribute__((unused)), 2928 u16 vid) 2929 { 2930 struct lio *lio = GET_LIO(netdev); 2931 struct octeon_device *oct = lio->oct_dev; 2932 struct octnic_ctrl_pkt nctrl; 2933 int ret = 0; 2934 2935 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2936 2937 nctrl.ncmd.u64 = 0; 2938 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2939 nctrl.ncmd.s.param1 = vid; 2940 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2941 nctrl.wait_time = 100; 2942 nctrl.netpndev = (u64)netdev; 2943 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2944 2945 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2946 if (ret < 0) { 2947 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2948 ret); 2949 } 2950 2951 return ret; 2952 } 2953 2954 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2955 __be16 proto __attribute__((unused)), 2956 u16 vid) 2957 { 2958 struct lio *lio = GET_LIO(netdev); 2959 struct octeon_device *oct = lio->oct_dev; 2960 struct octnic_ctrl_pkt nctrl; 2961 int ret = 0; 2962 2963 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2964 2965 nctrl.ncmd.u64 = 0; 2966 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2967 nctrl.ncmd.s.param1 = vid; 2968 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2969 nctrl.wait_time = 100; 2970 nctrl.netpndev = (u64)netdev; 2971 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2972 2973 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2974 if (ret < 0) { 2975 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2976 ret); 2977 } 2978 return ret; 2979 } 2980 2981 /** Sending command to enable/disable RX checksum offload 2982 * @param netdev pointer to network device 2983 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 2984 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 2985 * OCTNET_CMD_RXCSUM_DISABLE 2986 * @returns SUCCESS or FAILURE 2987 */ 2988 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2989 u8 rx_cmd) 2990 { 2991 struct lio *lio = GET_LIO(netdev); 2992 struct octeon_device *oct = lio->oct_dev; 2993 struct octnic_ctrl_pkt nctrl; 2994 int ret = 0; 2995 2996 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2997 2998 nctrl.ncmd.u64 = 0; 2999 nctrl.ncmd.s.cmd = command; 3000 nctrl.ncmd.s.param1 = rx_cmd; 3001 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3002 nctrl.wait_time = 100; 3003 nctrl.netpndev = (u64)netdev; 3004 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 3005 3006 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 3007 if (ret < 0) { 3008 dev_err(&oct->pci_dev->dev, 3009 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 3010 ret); 3011 } 3012 return ret; 3013 } 3014 3015 /** Sending command to add/delete VxLAN UDP port to firmware 3016 * @param netdev pointer to network device 3017 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 3018 * @param vxlan_port VxLAN port to be added or deleted 3019 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 3020 * OCTNET_CMD_VXLAN_PORT_DEL 3021 * @returns SUCCESS or FAILURE 3022 */ 3023 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 3024 u16 vxlan_port, u8 vxlan_cmd_bit) 3025 { 3026 struct lio *lio = GET_LIO(netdev); 3027 struct octeon_device *oct = lio->oct_dev; 3028 struct octnic_ctrl_pkt nctrl; 3029 int ret = 0; 3030 3031 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3032 3033 nctrl.ncmd.u64 = 0; 3034 nctrl.ncmd.s.cmd = command; 3035 nctrl.ncmd.s.more = vxlan_cmd_bit; 3036 nctrl.ncmd.s.param1 = vxlan_port; 3037 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3038 nctrl.wait_time = 100; 3039 nctrl.netpndev = (u64)netdev; 3040 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 3041 3042 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 3043 if (ret < 0) { 3044 dev_err(&oct->pci_dev->dev, 3045 "VxLAN port add/delete failed in core (ret:0x%x)\n", 3046 ret); 3047 } 3048 return ret; 3049 } 3050 3051 /** \brief Net device fix features 3052 * @param netdev pointer to network device 3053 * @param request features requested 3054 * @returns updated features list 3055 */ 3056 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 3057 netdev_features_t request) 3058 { 3059 struct lio *lio = netdev_priv(netdev); 3060 3061 if ((request & NETIF_F_RXCSUM) && 3062 !(lio->dev_capability & NETIF_F_RXCSUM)) 3063 request &= ~NETIF_F_RXCSUM; 3064 3065 if ((request & NETIF_F_HW_CSUM) && 3066 !(lio->dev_capability & NETIF_F_HW_CSUM)) 3067 request &= ~NETIF_F_HW_CSUM; 3068 3069 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 3070 request &= ~NETIF_F_TSO; 3071 3072 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 3073 request &= ~NETIF_F_TSO6; 3074 3075 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 3076 request &= ~NETIF_F_LRO; 3077 3078 /*Disable LRO if RXCSUM is off */ 3079 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 3080 (lio->dev_capability & NETIF_F_LRO)) 3081 request &= ~NETIF_F_LRO; 3082 3083 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && 3084 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) 3085 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 3086 3087 return request; 3088 } 3089 3090 /** \brief Net device set features 3091 * @param netdev pointer to network device 3092 * @param features features to enable/disable 3093 */ 3094 static int liquidio_set_features(struct net_device *netdev, 3095 netdev_features_t features) 3096 { 3097 struct lio *lio = netdev_priv(netdev); 3098 3099 if ((features & NETIF_F_LRO) && 3100 (lio->dev_capability & NETIF_F_LRO) && 3101 !(netdev->features & NETIF_F_LRO)) 3102 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3103 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3104 else if (!(features & NETIF_F_LRO) && 3105 (lio->dev_capability & NETIF_F_LRO) && 3106 (netdev->features & NETIF_F_LRO)) 3107 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 3108 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3109 3110 /* Sending command to firmware to enable/disable RX checksum 3111 * offload settings using ethtool 3112 */ 3113 if (!(netdev->features & NETIF_F_RXCSUM) && 3114 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 3115 (features & NETIF_F_RXCSUM)) 3116 liquidio_set_rxcsum_command(netdev, 3117 OCTNET_CMD_TNL_RX_CSUM_CTL, 3118 OCTNET_CMD_RXCSUM_ENABLE); 3119 else if ((netdev->features & NETIF_F_RXCSUM) && 3120 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 3121 !(features & NETIF_F_RXCSUM)) 3122 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3123 OCTNET_CMD_RXCSUM_DISABLE); 3124 3125 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 3126 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 3127 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 3128 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3129 OCTNET_CMD_VLAN_FILTER_ENABLE); 3130 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 3131 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 3132 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 3133 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3134 OCTNET_CMD_VLAN_FILTER_DISABLE); 3135 3136 return 0; 3137 } 3138 3139 static void liquidio_add_vxlan_port(struct net_device *netdev, 3140 struct udp_tunnel_info *ti) 3141 { 3142 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 3143 return; 3144 3145 liquidio_vxlan_port_command(netdev, 3146 OCTNET_CMD_VXLAN_PORT_CONFIG, 3147 htons(ti->port), 3148 OCTNET_CMD_VXLAN_PORT_ADD); 3149 } 3150 3151 static void liquidio_del_vxlan_port(struct net_device *netdev, 3152 struct udp_tunnel_info *ti) 3153 { 3154 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 3155 return; 3156 3157 liquidio_vxlan_port_command(netdev, 3158 OCTNET_CMD_VXLAN_PORT_CONFIG, 3159 htons(ti->port), 3160 OCTNET_CMD_VXLAN_PORT_DEL); 3161 } 3162 3163 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, 3164 u8 *mac, bool is_admin_assigned) 3165 { 3166 struct lio *lio = GET_LIO(netdev); 3167 struct octeon_device *oct = lio->oct_dev; 3168 struct octnic_ctrl_pkt nctrl; 3169 3170 if (!is_valid_ether_addr(mac)) 3171 return -EINVAL; 3172 3173 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) 3174 return -EINVAL; 3175 3176 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3177 3178 nctrl.ncmd.u64 = 0; 3179 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 3180 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3181 nctrl.ncmd.s.param1 = vfidx + 1; 3182 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0); 3183 nctrl.ncmd.s.more = 1; 3184 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3185 nctrl.netpndev = (u64)netdev; 3186 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 3187 nctrl.wait_time = LIO_CMD_WAIT_TM; 3188 3189 nctrl.udd[0] = 0; 3190 /* The MAC Address is presented in network byte order. */ 3191 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); 3192 3193 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; 3194 3195 octnet_send_nic_ctrl_pkt(oct, &nctrl); 3196 3197 return 0; 3198 } 3199 3200 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) 3201 { 3202 struct lio *lio = GET_LIO(netdev); 3203 struct octeon_device *oct = lio->oct_dev; 3204 int retval; 3205 3206 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3207 return -EINVAL; 3208 3209 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); 3210 if (!retval) 3211 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); 3212 3213 return retval; 3214 } 3215 3216 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, 3217 u16 vlan, u8 qos, __be16 vlan_proto) 3218 { 3219 struct lio *lio = GET_LIO(netdev); 3220 struct octeon_device *oct = lio->oct_dev; 3221 struct octnic_ctrl_pkt nctrl; 3222 u16 vlantci; 3223 3224 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3225 return -EINVAL; 3226 3227 if (vlan_proto != htons(ETH_P_8021Q)) 3228 return -EPROTONOSUPPORT; 3229 3230 if (vlan >= VLAN_N_VID || qos > 7) 3231 return -EINVAL; 3232 3233 if (vlan) 3234 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; 3235 else 3236 vlantci = 0; 3237 3238 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) 3239 return 0; 3240 3241 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3242 3243 if (vlan) 3244 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 3245 else 3246 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 3247 3248 nctrl.ncmd.s.param1 = vlantci; 3249 nctrl.ncmd.s.param2 = 3250 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ 3251 nctrl.ncmd.s.more = 0; 3252 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3253 nctrl.cb_fn = 0; 3254 nctrl.wait_time = LIO_CMD_WAIT_TM; 3255 3256 octnet_send_nic_ctrl_pkt(oct, &nctrl); 3257 3258 oct->sriov_info.vf_vlantci[vfidx] = vlantci; 3259 3260 return 0; 3261 } 3262 3263 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, 3264 struct ifla_vf_info *ivi) 3265 { 3266 struct lio *lio = GET_LIO(netdev); 3267 struct octeon_device *oct = lio->oct_dev; 3268 u8 *macaddr; 3269 3270 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3271 return -EINVAL; 3272 3273 ivi->vf = vfidx; 3274 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; 3275 ether_addr_copy(&ivi->mac[0], macaddr); 3276 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; 3277 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; 3278 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; 3279 return 0; 3280 } 3281 3282 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 3283 int linkstate) 3284 { 3285 struct lio *lio = GET_LIO(netdev); 3286 struct octeon_device *oct = lio->oct_dev; 3287 struct octnic_ctrl_pkt nctrl; 3288 3289 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3290 return -EINVAL; 3291 3292 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) 3293 return 0; 3294 3295 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3296 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; 3297 nctrl.ncmd.s.param1 = 3298 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3299 nctrl.ncmd.s.param2 = linkstate; 3300 nctrl.ncmd.s.more = 0; 3301 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3302 nctrl.cb_fn = 0; 3303 nctrl.wait_time = LIO_CMD_WAIT_TM; 3304 3305 octnet_send_nic_ctrl_pkt(oct, &nctrl); 3306 3307 oct->sriov_info.vf_linkstate[vfidx] = linkstate; 3308 3309 return 0; 3310 } 3311 3312 static const struct net_device_ops lionetdevops = { 3313 .ndo_open = liquidio_open, 3314 .ndo_stop = liquidio_stop, 3315 .ndo_start_xmit = liquidio_xmit, 3316 .ndo_get_stats = liquidio_get_stats, 3317 .ndo_set_mac_address = liquidio_set_mac, 3318 .ndo_set_rx_mode = liquidio_set_mcast_list, 3319 .ndo_tx_timeout = liquidio_tx_timeout, 3320 3321 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 3322 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 3323 .ndo_change_mtu = liquidio_change_mtu, 3324 .ndo_do_ioctl = liquidio_ioctl, 3325 .ndo_fix_features = liquidio_fix_features, 3326 .ndo_set_features = liquidio_set_features, 3327 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 3328 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 3329 .ndo_set_vf_mac = liquidio_set_vf_mac, 3330 .ndo_set_vf_vlan = liquidio_set_vf_vlan, 3331 .ndo_get_vf_config = liquidio_get_vf_config, 3332 .ndo_set_vf_link_state = liquidio_set_vf_link_state, 3333 }; 3334 3335 /** \brief Entry point for the liquidio module 3336 */ 3337 static int __init liquidio_init(void) 3338 { 3339 int i; 3340 struct handshake *hs; 3341 3342 init_completion(&first_stage); 3343 3344 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); 3345 3346 if (liquidio_init_pci()) 3347 return -EINVAL; 3348 3349 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3350 3351 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3352 hs = &handshake[i]; 3353 if (hs->pci_dev) { 3354 wait_for_completion(&hs->init); 3355 if (!hs->init_ok) { 3356 /* init handshake failed */ 3357 dev_err(&hs->pci_dev->dev, 3358 "Failed to init device\n"); 3359 liquidio_deinit_pci(); 3360 return -EIO; 3361 } 3362 } 3363 } 3364 3365 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3366 hs = &handshake[i]; 3367 if (hs->pci_dev) { 3368 wait_for_completion_timeout(&hs->started, 3369 msecs_to_jiffies(30000)); 3370 if (!hs->started_ok) { 3371 /* starter handshake failed */ 3372 dev_err(&hs->pci_dev->dev, 3373 "Firmware failed to start\n"); 3374 liquidio_deinit_pci(); 3375 return -EIO; 3376 } 3377 } 3378 } 3379 3380 return 0; 3381 } 3382 3383 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3384 { 3385 struct octeon_device *oct = (struct octeon_device *)buf; 3386 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3387 int gmxport = 0; 3388 union oct_link_status *ls; 3389 int i; 3390 3391 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 3392 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3393 recv_pkt->buffer_size[0], 3394 recv_pkt->rh.r_nic_info.gmxport); 3395 goto nic_info_err; 3396 } 3397 3398 gmxport = recv_pkt->rh.r_nic_info.gmxport; 3399 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 3400 OCT_DROQ_INFO_SIZE); 3401 3402 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3403 for (i = 0; i < oct->ifcount; i++) { 3404 if (oct->props[i].gmxport == gmxport) { 3405 update_link_status(oct->props[i].netdev, ls); 3406 break; 3407 } 3408 } 3409 3410 nic_info_err: 3411 for (i = 0; i < recv_pkt->buffer_count; i++) 3412 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3413 octeon_free_recv_info(recv_info); 3414 return 0; 3415 } 3416 3417 /** 3418 * \brief Setup network interfaces 3419 * @param octeon_dev octeon device 3420 * 3421 * Called during init time for each device. It assumes the NIC 3422 * is already up and running. The link information for each 3423 * interface is passed in link_info. 3424 */ 3425 static int setup_nic_devices(struct octeon_device *octeon_dev) 3426 { 3427 struct lio *lio = NULL; 3428 struct net_device *netdev; 3429 u8 mac[6], i, j, *fw_ver; 3430 struct octeon_soft_command *sc; 3431 struct liquidio_if_cfg_context *ctx; 3432 struct liquidio_if_cfg_resp *resp; 3433 struct octdev_props *props; 3434 int retval, num_iqueues, num_oqueues; 3435 union oct_nic_if_cfg if_cfg; 3436 unsigned int base_queue; 3437 unsigned int gmx_port_id; 3438 u32 resp_size, ctx_size, data_size; 3439 u32 ifidx_or_pfnum; 3440 struct lio_version *vdata; 3441 3442 /* This is to handle link status changes */ 3443 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3444 OPCODE_NIC_INFO, 3445 lio_nic_info, octeon_dev); 3446 3447 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3448 * They are handled directly. 3449 */ 3450 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3451 free_netbuf); 3452 3453 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3454 free_netsgbuf); 3455 3456 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3457 free_netsgbuf_with_resp); 3458 3459 for (i = 0; i < octeon_dev->ifcount; i++) { 3460 resp_size = sizeof(struct liquidio_if_cfg_resp); 3461 ctx_size = sizeof(struct liquidio_if_cfg_context); 3462 data_size = sizeof(struct lio_version); 3463 sc = (struct octeon_soft_command *) 3464 octeon_alloc_soft_command(octeon_dev, data_size, 3465 resp_size, ctx_size); 3466 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3467 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 3468 vdata = (struct lio_version *)sc->virtdptr; 3469 3470 *((u64 *)vdata) = 0; 3471 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 3472 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 3473 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 3474 3475 if (OCTEON_CN23XX_PF(octeon_dev)) { 3476 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 3477 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 3478 base_queue = octeon_dev->sriov_info.pf_srn; 3479 3480 gmx_port_id = octeon_dev->pf_num; 3481 ifidx_or_pfnum = octeon_dev->pf_num; 3482 } else { 3483 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( 3484 octeon_get_conf(octeon_dev), i); 3485 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( 3486 octeon_get_conf(octeon_dev), i); 3487 base_queue = CFG_GET_BASE_QUE_NIC_IF( 3488 octeon_get_conf(octeon_dev), i); 3489 gmx_port_id = CFG_GET_GMXID_NIC_IF( 3490 octeon_get_conf(octeon_dev), i); 3491 ifidx_or_pfnum = i; 3492 } 3493 3494 dev_dbg(&octeon_dev->pci_dev->dev, 3495 "requesting config for interface %d, iqs %d, oqs %d\n", 3496 ifidx_or_pfnum, num_iqueues, num_oqueues); 3497 WRITE_ONCE(ctx->cond, 0); 3498 ctx->octeon_id = lio_get_device_id(octeon_dev); 3499 init_waitqueue_head(&ctx->wc); 3500 3501 if_cfg.u64 = 0; 3502 if_cfg.s.num_iqueues = num_iqueues; 3503 if_cfg.s.num_oqueues = num_oqueues; 3504 if_cfg.s.base_queue = base_queue; 3505 if_cfg.s.gmx_port_id = gmx_port_id; 3506 3507 sc->iq_no = 0; 3508 3509 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3510 OPCODE_NIC_IF_CFG, 0, 3511 if_cfg.u64, 0); 3512 3513 sc->callback = if_cfg_callback; 3514 sc->callback_arg = sc; 3515 sc->wait_time = 3000; 3516 3517 retval = octeon_send_soft_command(octeon_dev, sc); 3518 if (retval == IQ_SEND_FAILED) { 3519 dev_err(&octeon_dev->pci_dev->dev, 3520 "iq/oq config failed status: %x\n", 3521 retval); 3522 /* Soft instr is freed by driver in case of failure. */ 3523 goto setup_nic_dev_fail; 3524 } 3525 3526 /* Sleep on a wait queue till the cond flag indicates that the 3527 * response arrived or timed-out. 3528 */ 3529 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 3530 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); 3531 goto setup_nic_wait_intr; 3532 } 3533 3534 retval = resp->status; 3535 if (retval) { 3536 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3537 goto setup_nic_dev_fail; 3538 } 3539 3540 /* Verify f/w version (in case of 'auto' loading from flash) */ 3541 fw_ver = octeon_dev->fw_info.liquidio_firmware_version; 3542 if (memcmp(LIQUIDIO_BASE_VERSION, 3543 fw_ver, 3544 strlen(LIQUIDIO_BASE_VERSION))) { 3545 dev_err(&octeon_dev->pci_dev->dev, 3546 "Unmatched firmware version. Expected %s.x, got %s.\n", 3547 LIQUIDIO_BASE_VERSION, fw_ver); 3548 goto setup_nic_dev_fail; 3549 } else if (atomic_read(octeon_dev->adapter_fw_state) == 3550 FW_IS_PRELOADED) { 3551 dev_info(&octeon_dev->pci_dev->dev, 3552 "Using auto-loaded firmware version %s.\n", 3553 fw_ver); 3554 } 3555 3556 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3557 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3558 3559 num_iqueues = hweight64(resp->cfg_info.iqmask); 3560 num_oqueues = hweight64(resp->cfg_info.oqmask); 3561 3562 if (!(num_iqueues) || !(num_oqueues)) { 3563 dev_err(&octeon_dev->pci_dev->dev, 3564 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3565 resp->cfg_info.iqmask, 3566 resp->cfg_info.oqmask); 3567 goto setup_nic_dev_fail; 3568 } 3569 dev_dbg(&octeon_dev->pci_dev->dev, 3570 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 3571 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3572 num_iqueues, num_oqueues); 3573 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 3574 3575 if (!netdev) { 3576 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3577 goto setup_nic_dev_fail; 3578 } 3579 3580 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 3581 3582 /* Associate the routines that will handle different 3583 * netdev tasks. 3584 */ 3585 netdev->netdev_ops = &lionetdevops; 3586 3587 lio = GET_LIO(netdev); 3588 3589 memset(lio, 0, sizeof(struct lio)); 3590 3591 lio->ifidx = ifidx_or_pfnum; 3592 3593 props = &octeon_dev->props[i]; 3594 props->gmxport = resp->cfg_info.linfo.gmxport; 3595 props->netdev = netdev; 3596 3597 lio->linfo.num_rxpciq = num_oqueues; 3598 lio->linfo.num_txpciq = num_iqueues; 3599 for (j = 0; j < num_oqueues; j++) { 3600 lio->linfo.rxpciq[j].u64 = 3601 resp->cfg_info.linfo.rxpciq[j].u64; 3602 } 3603 for (j = 0; j < num_iqueues; j++) { 3604 lio->linfo.txpciq[j].u64 = 3605 resp->cfg_info.linfo.txpciq[j].u64; 3606 } 3607 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3608 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3609 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3610 3611 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3612 3613 if (OCTEON_CN23XX_PF(octeon_dev) || 3614 OCTEON_CN6XXX(octeon_dev)) { 3615 lio->dev_capability = NETIF_F_HIGHDMA 3616 | NETIF_F_IP_CSUM 3617 | NETIF_F_IPV6_CSUM 3618 | NETIF_F_SG | NETIF_F_RXCSUM 3619 | NETIF_F_GRO 3620 | NETIF_F_TSO | NETIF_F_TSO6 3621 | NETIF_F_LRO; 3622 } 3623 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3624 3625 /* Copy of transmit encapsulation capabilities: 3626 * TSO, TSO6, Checksums for this device 3627 */ 3628 lio->enc_dev_capability = NETIF_F_IP_CSUM 3629 | NETIF_F_IPV6_CSUM 3630 | NETIF_F_GSO_UDP_TUNNEL 3631 | NETIF_F_HW_CSUM | NETIF_F_SG 3632 | NETIF_F_RXCSUM 3633 | NETIF_F_TSO | NETIF_F_TSO6 3634 | NETIF_F_LRO; 3635 3636 netdev->hw_enc_features = (lio->enc_dev_capability & 3637 ~NETIF_F_LRO); 3638 3639 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; 3640 3641 netdev->vlan_features = lio->dev_capability; 3642 /* Add any unchangeable hw features */ 3643 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 3644 NETIF_F_HW_VLAN_CTAG_RX | 3645 NETIF_F_HW_VLAN_CTAG_TX; 3646 3647 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 3648 3649 netdev->hw_features = lio->dev_capability; 3650 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ 3651 netdev->hw_features = netdev->hw_features & 3652 ~NETIF_F_HW_VLAN_CTAG_RX; 3653 3654 /* MTU range: 68 - 16000 */ 3655 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3656 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3657 3658 /* Point to the properties for octeon device to which this 3659 * interface belongs. 3660 */ 3661 lio->oct_dev = octeon_dev; 3662 lio->octprops = props; 3663 lio->netdev = netdev; 3664 3665 dev_dbg(&octeon_dev->pci_dev->dev, 3666 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3667 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3668 3669 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { 3670 u8 vfmac[ETH_ALEN]; 3671 3672 random_ether_addr(&vfmac[0]); 3673 if (__liquidio_set_vf_mac(netdev, j, 3674 &vfmac[0], false)) { 3675 dev_err(&octeon_dev->pci_dev->dev, 3676 "Error setting VF%d MAC address\n", 3677 j); 3678 goto setup_nic_dev_fail; 3679 } 3680 } 3681 3682 /* 64-bit swap required on LE machines */ 3683 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3684 for (j = 0; j < 6; j++) 3685 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3686 3687 /* Copy MAC Address to OS network device structure */ 3688 3689 ether_addr_copy(netdev->dev_addr, mac); 3690 3691 /* By default all interfaces on a single Octeon uses the same 3692 * tx and rx queues 3693 */ 3694 lio->txq = lio->linfo.txpciq[0].s.q_no; 3695 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 3696 if (liquidio_setup_io_queues(octeon_dev, i, 3697 lio->linfo.num_txpciq, 3698 lio->linfo.num_rxpciq)) { 3699 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3700 goto setup_nic_dev_fail; 3701 } 3702 3703 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3704 3705 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3706 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3707 3708 if (setup_glists(octeon_dev, lio, num_iqueues)) { 3709 dev_err(&octeon_dev->pci_dev->dev, 3710 "Gather list allocation failed\n"); 3711 goto setup_nic_dev_fail; 3712 } 3713 3714 /* Register ethtool support */ 3715 liquidio_set_ethtool_ops(netdev); 3716 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) 3717 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 3718 else 3719 octeon_dev->priv_flags = 0x0; 3720 3721 if (netdev->features & NETIF_F_LRO) 3722 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3723 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3724 3725 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3726 OCTNET_CMD_VLAN_FILTER_ENABLE); 3727 3728 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3729 liquidio_set_feature(netdev, 3730 OCTNET_CMD_VERBOSE_ENABLE, 0); 3731 3732 if (setup_link_status_change_wq(netdev)) 3733 goto setup_nic_dev_fail; 3734 3735 if ((octeon_dev->fw_info.app_cap_flags & 3736 LIQUIDIO_TIME_SYNC_CAP) && 3737 setup_sync_octeon_time_wq(netdev)) 3738 goto setup_nic_dev_fail; 3739 3740 if (setup_rx_oom_poll_fn(netdev)) 3741 goto setup_nic_dev_fail; 3742 3743 /* Register the network device with the OS */ 3744 if (register_netdev(netdev)) { 3745 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3746 goto setup_nic_dev_fail; 3747 } 3748 3749 dev_dbg(&octeon_dev->pci_dev->dev, 3750 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3751 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3752 netif_carrier_off(netdev); 3753 lio->link_changes++; 3754 3755 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3756 3757 /* Sending command to firmware to enable Rx checksum offload 3758 * by default at the time of setup of Liquidio driver for 3759 * this device 3760 */ 3761 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3762 OCTNET_CMD_RXCSUM_ENABLE); 3763 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3764 OCTNET_CMD_TXCSUM_ENABLE); 3765 3766 dev_dbg(&octeon_dev->pci_dev->dev, 3767 "NIC ifidx:%d Setup successful\n", i); 3768 3769 octeon_free_soft_command(octeon_dev, sc); 3770 } 3771 3772 return 0; 3773 3774 setup_nic_dev_fail: 3775 3776 octeon_free_soft_command(octeon_dev, sc); 3777 3778 setup_nic_wait_intr: 3779 3780 while (i--) { 3781 dev_err(&octeon_dev->pci_dev->dev, 3782 "NIC ifidx:%d Setup failed\n", i); 3783 liquidio_destroy_nic_device(octeon_dev, i); 3784 } 3785 return -ENODEV; 3786 } 3787 3788 #ifdef CONFIG_PCI_IOV 3789 static int octeon_enable_sriov(struct octeon_device *oct) 3790 { 3791 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; 3792 struct pci_dev *vfdev; 3793 int err; 3794 u32 u; 3795 3796 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { 3797 err = pci_enable_sriov(oct->pci_dev, 3798 oct->sriov_info.num_vfs_alloced); 3799 if (err) { 3800 dev_err(&oct->pci_dev->dev, 3801 "OCTEON: Failed to enable PCI sriov: %d\n", 3802 err); 3803 oct->sriov_info.num_vfs_alloced = 0; 3804 return err; 3805 } 3806 oct->sriov_info.sriov_enabled = 1; 3807 3808 /* init lookup table that maps DPI ring number to VF pci_dev 3809 * struct pointer 3810 */ 3811 u = 0; 3812 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3813 OCTEON_CN23XX_VF_VID, NULL); 3814 while (vfdev) { 3815 if (vfdev->is_virtfn && 3816 (vfdev->physfn == oct->pci_dev)) { 3817 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = 3818 vfdev; 3819 u += oct->sriov_info.rings_per_vf; 3820 } 3821 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3822 OCTEON_CN23XX_VF_VID, vfdev); 3823 } 3824 } 3825 3826 return num_vfs_alloced; 3827 } 3828 3829 static int lio_pci_sriov_disable(struct octeon_device *oct) 3830 { 3831 int u; 3832 3833 if (pci_vfs_assigned(oct->pci_dev)) { 3834 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); 3835 return -EPERM; 3836 } 3837 3838 pci_disable_sriov(oct->pci_dev); 3839 3840 u = 0; 3841 while (u < MAX_POSSIBLE_VFS) { 3842 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; 3843 u += oct->sriov_info.rings_per_vf; 3844 } 3845 3846 oct->sriov_info.num_vfs_alloced = 0; 3847 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", 3848 oct->pf_num); 3849 3850 return 0; 3851 } 3852 3853 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) 3854 { 3855 struct octeon_device *oct = pci_get_drvdata(dev); 3856 int ret = 0; 3857 3858 if ((num_vfs == oct->sriov_info.num_vfs_alloced) && 3859 (oct->sriov_info.sriov_enabled)) { 3860 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", 3861 oct->pf_num, num_vfs); 3862 return 0; 3863 } 3864 3865 if (!num_vfs) { 3866 ret = lio_pci_sriov_disable(oct); 3867 } else if (num_vfs > oct->sriov_info.max_vfs) { 3868 dev_err(&oct->pci_dev->dev, 3869 "OCTEON: Max allowed VFs:%d user requested:%d", 3870 oct->sriov_info.max_vfs, num_vfs); 3871 ret = -EPERM; 3872 } else { 3873 oct->sriov_info.num_vfs_alloced = num_vfs; 3874 ret = octeon_enable_sriov(oct); 3875 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", 3876 oct->pf_num, num_vfs); 3877 } 3878 3879 return ret; 3880 } 3881 #endif 3882 3883 /** 3884 * \brief initialize the NIC 3885 * @param oct octeon device 3886 * 3887 * This initialization routine is called once the Octeon device application is 3888 * up and running 3889 */ 3890 static int liquidio_init_nic_module(struct octeon_device *oct) 3891 { 3892 int i, retval = 0; 3893 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3894 3895 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3896 3897 /* only default iq and oq were initialized 3898 * initialize the rest as well 3899 */ 3900 /* run port_config command for each port */ 3901 oct->ifcount = num_nic_ports; 3902 3903 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); 3904 3905 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3906 oct->props[i].gmxport = -1; 3907 3908 retval = setup_nic_devices(oct); 3909 if (retval) { 3910 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3911 goto octnet_init_failure; 3912 } 3913 3914 liquidio_ptp_init(oct); 3915 3916 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3917 3918 return retval; 3919 3920 octnet_init_failure: 3921 3922 oct->ifcount = 0; 3923 3924 return retval; 3925 } 3926 3927 /** 3928 * \brief starter callback that invokes the remaining initialization work after 3929 * the NIC is up and running. 3930 * @param octptr work struct work_struct 3931 */ 3932 static void nic_starter(struct work_struct *work) 3933 { 3934 struct octeon_device *oct; 3935 struct cavium_wk *wk = (struct cavium_wk *)work; 3936 3937 oct = (struct octeon_device *)wk->ctxptr; 3938 3939 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3940 return; 3941 3942 /* If the status of the device is CORE_OK, the core 3943 * application has reported its application type. Call 3944 * any registered handlers now and move to the RUNNING 3945 * state. 3946 */ 3947 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3948 schedule_delayed_work(&oct->nic_poll_work.work, 3949 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3950 return; 3951 } 3952 3953 atomic_set(&oct->status, OCT_DEV_RUNNING); 3954 3955 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3956 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3957 3958 if (liquidio_init_nic_module(oct)) 3959 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3960 else 3961 handshake[oct->octeon_id].started_ok = 1; 3962 } else { 3963 dev_err(&oct->pci_dev->dev, 3964 "Unexpected application running on NIC (%d). Check firmware.\n", 3965 oct->app_mode); 3966 } 3967 3968 complete(&handshake[oct->octeon_id].started); 3969 } 3970 3971 static int 3972 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) 3973 { 3974 struct octeon_device *oct = (struct octeon_device *)buf; 3975 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3976 int i, notice, vf_idx; 3977 bool cores_crashed; 3978 u64 *data, vf_num; 3979 3980 notice = recv_pkt->rh.r.ossp; 3981 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); 3982 3983 /* the first 64-bit word of data is the vf_num */ 3984 vf_num = data[0]; 3985 octeon_swap_8B_data(&vf_num, 1); 3986 vf_idx = (int)vf_num - 1; 3987 3988 cores_crashed = READ_ONCE(oct->cores_crashed); 3989 3990 if (notice == VF_DRV_LOADED) { 3991 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { 3992 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); 3993 dev_info(&oct->pci_dev->dev, 3994 "driver for VF%d was loaded\n", vf_idx); 3995 if (!cores_crashed) 3996 try_module_get(THIS_MODULE); 3997 } 3998 } else if (notice == VF_DRV_REMOVED) { 3999 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { 4000 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); 4001 dev_info(&oct->pci_dev->dev, 4002 "driver for VF%d was removed\n", vf_idx); 4003 if (!cores_crashed) 4004 module_put(THIS_MODULE); 4005 } 4006 } else if (notice == VF_DRV_MACADDR_CHANGED) { 4007 u8 *b = (u8 *)&data[1]; 4008 4009 oct->sriov_info.vf_macaddr[vf_idx] = data[1]; 4010 dev_info(&oct->pci_dev->dev, 4011 "VF driver changed VF%d's MAC address to %pM\n", 4012 vf_idx, b + 2); 4013 } 4014 4015 for (i = 0; i < recv_pkt->buffer_count; i++) 4016 recv_buffer_free(recv_pkt->buffer_ptr[i]); 4017 octeon_free_recv_info(recv_info); 4018 4019 return 0; 4020 } 4021 4022 /** 4023 * \brief Device initialization for each Octeon device that is probed 4024 * @param octeon_dev octeon device 4025 */ 4026 static int octeon_device_init(struct octeon_device *octeon_dev) 4027 { 4028 int j, ret; 4029 char bootcmd[] = "\n"; 4030 char *dbg_enb = NULL; 4031 enum lio_fw_state fw_state; 4032 struct octeon_device_priv *oct_priv = 4033 (struct octeon_device_priv *)octeon_dev->priv; 4034 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 4035 4036 /* Enable access to the octeon device and make its DMA capability 4037 * known to the OS. 4038 */ 4039 if (octeon_pci_os_setup(octeon_dev)) 4040 return 1; 4041 4042 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); 4043 4044 /* Identify the Octeon type and map the BAR address space. */ 4045 if (octeon_chip_specific_setup(octeon_dev)) { 4046 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 4047 return 1; 4048 } 4049 4050 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 4051 4052 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', 4053 * since that is what is required for the reference to be removed 4054 * during de-initialization (see 'octeon_destroy_resources'). 4055 */ 4056 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number, 4057 PCI_SLOT(octeon_dev->pci_dev->devfn), 4058 PCI_FUNC(octeon_dev->pci_dev->devfn), 4059 true); 4060 4061 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 4062 4063 /* CN23XX supports preloaded firmware if the following is true: 4064 * 4065 * The adapter indicates that firmware is currently running AND 4066 * 'fw_type' is 'auto'. 4067 * 4068 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). 4069 */ 4070 if (OCTEON_CN23XX_PF(octeon_dev) && 4071 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { 4072 atomic_cmpxchg(octeon_dev->adapter_fw_state, 4073 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); 4074 } 4075 4076 /* If loading firmware, only first device of adapter needs to do so. */ 4077 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, 4078 FW_NEEDS_TO_BE_LOADED, 4079 FW_IS_BEING_LOADED); 4080 4081 /* Here, [local variable] 'fw_state' is set to one of: 4082 * 4083 * FW_IS_PRELOADED: No firmware is to be loaded (see above) 4084 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load 4085 * firmware to the adapter. 4086 * FW_IS_BEING_LOADED: The driver's second instance will not load 4087 * firmware to the adapter. 4088 */ 4089 4090 /* Prior to f/w load, perform a soft reset of the Octeon device; 4091 * if error resetting, return w/error. 4092 */ 4093 if (fw_state == FW_NEEDS_TO_BE_LOADED) 4094 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 4095 return 1; 4096 4097 /* Initialize the dispatch mechanism used to push packets arriving on 4098 * Octeon Output queues. 4099 */ 4100 if (octeon_init_dispatch_list(octeon_dev)) 4101 return 1; 4102 4103 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4104 OPCODE_NIC_CORE_DRV_ACTIVE, 4105 octeon_core_drv_init, 4106 octeon_dev); 4107 4108 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4109 OPCODE_NIC_VF_DRV_NOTICE, 4110 octeon_recv_vf_drv_notice, octeon_dev); 4111 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 4112 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 4113 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 4114 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4115 4116 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 4117 4118 if (octeon_set_io_queues_off(octeon_dev)) { 4119 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); 4120 return 1; 4121 } 4122 4123 if (OCTEON_CN23XX_PF(octeon_dev)) { 4124 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4125 if (ret) { 4126 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); 4127 return ret; 4128 } 4129 } 4130 4131 /* Initialize soft command buffer pool 4132 */ 4133 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 4134 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 4135 return 1; 4136 } 4137 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 4138 4139 /* Setup the data structures that manage this Octeon's Input queues. */ 4140 if (octeon_setup_instr_queues(octeon_dev)) { 4141 dev_err(&octeon_dev->pci_dev->dev, 4142 "instruction queue initialization failed\n"); 4143 return 1; 4144 } 4145 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 4146 4147 /* Initialize lists to manage the requests of different types that 4148 * arrive from user & kernel applications for this octeon device. 4149 */ 4150 if (octeon_setup_response_list(octeon_dev)) { 4151 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 4152 return 1; 4153 } 4154 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 4155 4156 if (octeon_setup_output_queues(octeon_dev)) { 4157 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 4158 return 1; 4159 } 4160 4161 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 4162 4163 if (OCTEON_CN23XX_PF(octeon_dev)) { 4164 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { 4165 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); 4166 return 1; 4167 } 4168 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4169 4170 if (octeon_allocate_ioq_vector(octeon_dev)) { 4171 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4172 return 1; 4173 } 4174 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 4175 4176 } else { 4177 /* The input and output queue registers were setup earlier (the 4178 * queues were not enabled). Any additional registers 4179 * that need to be programmed should be done now. 4180 */ 4181 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4182 if (ret) { 4183 dev_err(&octeon_dev->pci_dev->dev, 4184 "Failed to configure device registers\n"); 4185 return ret; 4186 } 4187 } 4188 4189 /* Initialize the tasklet that handles output queue packet processing.*/ 4190 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 4191 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh, 4192 (unsigned long)octeon_dev); 4193 4194 /* Setup the interrupt handler and record the INT SUM register address 4195 */ 4196 if (octeon_setup_interrupt(octeon_dev, 4197 octeon_dev->sriov_info.num_pf_rings)) 4198 return 1; 4199 4200 /* Enable Octeon device interrupts */ 4201 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 4202 4203 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); 4204 4205 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE 4206 * the output queue is enabled. 4207 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in 4208 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. 4209 * Otherwise, it is possible that the DRV_ACTIVE message will be sent 4210 * before any credits have been issued, causing the ring to be reset 4211 * (and the f/w appear to never have started). 4212 */ 4213 for (j = 0; j < octeon_dev->num_oqs; j++) 4214 writel(octeon_dev->droq[j]->max_count, 4215 octeon_dev->droq[j]->pkts_credit_reg); 4216 4217 /* Enable the input and output queues for this Octeon device */ 4218 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 4219 if (ret) { 4220 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); 4221 return ret; 4222 } 4223 4224 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 4225 4226 if (fw_state == FW_NEEDS_TO_BE_LOADED) { 4227 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 4228 if (!ddr_timeout) { 4229 dev_info(&octeon_dev->pci_dev->dev, 4230 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 4231 } 4232 4233 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 4234 4235 /* Wait for the octeon to initialize DDR after the soft-reset.*/ 4236 while (!ddr_timeout) { 4237 set_current_state(TASK_INTERRUPTIBLE); 4238 if (schedule_timeout(HZ / 10)) { 4239 /* user probably pressed Control-C */ 4240 return 1; 4241 } 4242 } 4243 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 4244 if (ret) { 4245 dev_err(&octeon_dev->pci_dev->dev, 4246 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 4247 ret); 4248 return 1; 4249 } 4250 4251 if (octeon_wait_for_bootloader(octeon_dev, 1000)) { 4252 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 4253 return 1; 4254 } 4255 4256 /* Divert uboot to take commands from host instead. */ 4257 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); 4258 4259 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 4260 ret = octeon_init_consoles(octeon_dev); 4261 if (ret) { 4262 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 4263 return 1; 4264 } 4265 /* If console debug enabled, specify empty string to use default 4266 * enablement ELSE specify NULL string for 'disabled'. 4267 */ 4268 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; 4269 ret = octeon_add_console(octeon_dev, 0, dbg_enb); 4270 if (ret) { 4271 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 4272 return 1; 4273 } else if (octeon_console_debug_enabled(0)) { 4274 /* If console was added AND we're logging console output 4275 * then set our console print function. 4276 */ 4277 octeon_dev->console[0].print = octeon_dbg_console_print; 4278 } 4279 4280 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 4281 4282 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 4283 ret = load_firmware(octeon_dev); 4284 if (ret) { 4285 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 4286 return 1; 4287 } 4288 4289 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); 4290 } 4291 4292 handshake[octeon_dev->octeon_id].init_ok = 1; 4293 complete(&handshake[octeon_dev->octeon_id].init); 4294 4295 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 4296 4297 return 0; 4298 } 4299 4300 /** 4301 * \brief Debug console print function 4302 * @param octeon_dev octeon device 4303 * @param console_num console number 4304 * @param prefix first portion of line to display 4305 * @param suffix second portion of line to display 4306 * 4307 * The OCTEON debug console outputs entire lines (excluding '\n'). 4308 * Normally, the line will be passed in the 'prefix' parameter. 4309 * However, due to buffering, it is possible for a line to be split into two 4310 * parts, in which case they will be passed as the 'prefix' parameter and 4311 * 'suffix' parameter. 4312 */ 4313 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 4314 char *prefix, char *suffix) 4315 { 4316 if (prefix && suffix) 4317 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, 4318 suffix); 4319 else if (prefix) 4320 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); 4321 else if (suffix) 4322 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); 4323 4324 return 0; 4325 } 4326 4327 /** 4328 * \brief Exits the module 4329 */ 4330 static void __exit liquidio_exit(void) 4331 { 4332 liquidio_deinit_pci(); 4333 4334 pr_info("LiquidIO network module is now unloaded\n"); 4335 } 4336 4337 module_init(liquidio_init); 4338 module_exit(liquidio_exit); 4339