1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 36 static int debug = -1; 37 module_param(debug, int, 0644); 38 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 39 40 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 41 42 struct oct_timestamp_resp { 43 u64 rh; 44 u64 timestamp; 45 u64 status; 46 }; 47 48 union tx_info { 49 u64 u64; 50 struct { 51 #ifdef __BIG_ENDIAN_BITFIELD 52 u16 gso_size; 53 u16 gso_segs; 54 u32 reserved; 55 #else 56 u32 reserved; 57 u16 gso_segs; 58 u16 gso_size; 59 #endif 60 } s; 61 }; 62 63 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 64 #define OCTNIC_GSO_MAX_SIZE \ 65 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 66 67 static int 68 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 69 static void liquidio_vf_remove(struct pci_dev *pdev); 70 static int octeon_device_init(struct octeon_device *oct); 71 static int liquidio_stop(struct net_device *netdev); 72 73 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 74 { 75 struct octeon_device_priv *oct_priv = oct->priv; 76 int retry = MAX_IO_PENDING_PKT_COUNT; 77 int pkt_cnt = 0, pending_pkts; 78 int i; 79 80 do { 81 pending_pkts = 0; 82 83 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 84 if (!(oct->io_qmask.oq & BIT_ULL(i))) 85 continue; 86 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 87 } 88 if (pkt_cnt > 0) { 89 pending_pkts += pkt_cnt; 90 tasklet_schedule(&oct_priv->droq_tasklet); 91 } 92 pkt_cnt = 0; 93 schedule_timeout_uninterruptible(1); 94 95 } while (retry-- && pending_pkts); 96 97 return pkt_cnt; 98 } 99 100 /** 101 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc 102 * @oct: Pointer to Octeon device 103 */ 104 static void pcierror_quiesce_device(struct octeon_device *oct) 105 { 106 int i; 107 108 /* Disable the input and output queues now. No more packets will 109 * arrive from Octeon, but we should wait for all packet processing 110 * to finish. 111 */ 112 113 /* To allow for in-flight requests */ 114 schedule_timeout_uninterruptible(100); 115 116 if (wait_for_pending_requests(oct)) 117 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 118 119 /* Force all requests waiting to be fetched by OCTEON to complete. */ 120 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 121 struct octeon_instr_queue *iq; 122 123 if (!(oct->io_qmask.iq & BIT_ULL(i))) 124 continue; 125 iq = oct->instr_queue[i]; 126 127 if (atomic_read(&iq->instr_pending)) { 128 spin_lock_bh(&iq->lock); 129 iq->fill_cnt = 0; 130 iq->octeon_read_index = iq->host_write_index; 131 iq->stats.instr_processed += 132 atomic_read(&iq->instr_pending); 133 lio_process_iq_request_list(oct, iq, 0); 134 spin_unlock_bh(&iq->lock); 135 } 136 } 137 138 /* Force all pending ordered list requests to time out. */ 139 lio_process_ordered_list(oct, 1); 140 141 /* We do not need to wait for output queue packets to be processed. */ 142 } 143 144 /** 145 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status 146 * @dev: Pointer to PCI device 147 */ 148 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 149 { 150 u32 status, mask; 151 int pos = 0x100; 152 153 pr_info("%s :\n", __func__); 154 155 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 156 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 157 if (dev->error_state == pci_channel_io_normal) 158 status &= ~mask; /* Clear corresponding nonfatal bits */ 159 else 160 status &= mask; /* Clear corresponding fatal bits */ 161 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 162 } 163 164 /** 165 * stop_pci_io - Stop all PCI IO to a given device 166 * @oct: Pointer to Octeon device 167 */ 168 static void stop_pci_io(struct octeon_device *oct) 169 { 170 struct msix_entry *msix_entries; 171 int i; 172 173 /* No more instructions will be forwarded. */ 174 atomic_set(&oct->status, OCT_DEV_IN_RESET); 175 176 for (i = 0; i < oct->ifcount; i++) 177 netif_device_detach(oct->props[i].netdev); 178 179 /* Disable interrupts */ 180 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 181 182 pcierror_quiesce_device(oct); 183 if (oct->msix_on) { 184 msix_entries = (struct msix_entry *)oct->msix_entries; 185 for (i = 0; i < oct->num_msix_irqs; i++) { 186 /* clear the affinity_cpumask */ 187 irq_set_affinity_hint(msix_entries[i].vector, 188 NULL); 189 free_irq(msix_entries[i].vector, 190 &oct->ioq_vector[i]); 191 } 192 pci_disable_msix(oct->pci_dev); 193 kfree(oct->msix_entries); 194 oct->msix_entries = NULL; 195 octeon_free_ioq_vector(oct); 196 } 197 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 198 lio_get_state_string(&oct->status)); 199 200 /* making it a common function for all OCTEON models */ 201 cleanup_aer_uncorrect_error_status(oct->pci_dev); 202 203 pci_disable_device(oct->pci_dev); 204 } 205 206 /** 207 * liquidio_pcie_error_detected - called when PCI error is detected 208 * @pdev: Pointer to PCI device 209 * @state: The current pci connection state 210 * 211 * This function is called after a PCI bus error affecting 212 * this device has been detected. 213 */ 214 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 215 pci_channel_state_t state) 216 { 217 struct octeon_device *oct = pci_get_drvdata(pdev); 218 219 /* Non-correctable Non-fatal errors */ 220 if (state == pci_channel_io_normal) { 221 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 222 cleanup_aer_uncorrect_error_status(oct->pci_dev); 223 return PCI_ERS_RESULT_CAN_RECOVER; 224 } 225 226 /* Non-correctable Fatal errors */ 227 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 228 stop_pci_io(oct); 229 230 return PCI_ERS_RESULT_DISCONNECT; 231 } 232 233 /* For PCI-E Advanced Error Recovery (AER) Interface */ 234 static const struct pci_error_handlers liquidio_vf_err_handler = { 235 .error_detected = liquidio_pcie_error_detected, 236 }; 237 238 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 239 { 240 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 242 }, 243 { 244 0, 0, 0, 0, 0, 0, 0 245 } 246 }; 247 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 248 249 static struct pci_driver liquidio_vf_pci_driver = { 250 .name = "LiquidIO_VF", 251 .id_table = liquidio_vf_pci_tbl, 252 .probe = liquidio_vf_probe, 253 .remove = liquidio_vf_remove, 254 .err_handler = &liquidio_vf_err_handler, /* For AER */ 255 }; 256 257 /** 258 * print_link_info - Print link information 259 * @netdev: network device 260 */ 261 static void print_link_info(struct net_device *netdev) 262 { 263 struct lio *lio = GET_LIO(netdev); 264 265 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 266 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 267 struct oct_link_info *linfo = &lio->linfo; 268 269 if (linfo->link.s.link_up) { 270 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 271 linfo->link.s.speed, 272 (linfo->link.s.duplex) ? "Full" : "Half"); 273 } else { 274 netif_info(lio, link, lio->netdev, "Link Down\n"); 275 } 276 } 277 } 278 279 /** 280 * octnet_link_status_change - Routine to notify MTU change 281 * @work: work_struct data structure 282 */ 283 static void octnet_link_status_change(struct work_struct *work) 284 { 285 struct cavium_wk *wk = (struct cavium_wk *)work; 286 struct lio *lio = (struct lio *)wk->ctxptr; 287 288 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 289 * this API is invoked only when new max-MTU of the interface is 290 * less than current MTU. 291 */ 292 rtnl_lock(); 293 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 294 rtnl_unlock(); 295 } 296 297 /** 298 * setup_link_status_change_wq - Sets up the mtu status change work 299 * @netdev: network device 300 */ 301 static int setup_link_status_change_wq(struct net_device *netdev) 302 { 303 struct lio *lio = GET_LIO(netdev); 304 struct octeon_device *oct = lio->oct_dev; 305 306 lio->link_status_wq.wq = alloc_workqueue("link-status", 307 WQ_MEM_RECLAIM | WQ_PERCPU, 308 0); 309 if (!lio->link_status_wq.wq) { 310 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 311 return -1; 312 } 313 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 314 octnet_link_status_change); 315 lio->link_status_wq.wk.ctxptr = lio; 316 317 return 0; 318 } 319 320 static void cleanup_link_status_change_wq(struct net_device *netdev) 321 { 322 struct lio *lio = GET_LIO(netdev); 323 324 if (lio->link_status_wq.wq) { 325 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 326 destroy_workqueue(lio->link_status_wq.wq); 327 } 328 } 329 330 /** 331 * update_link_status - Update link status 332 * @netdev: network device 333 * @ls: link status structure 334 * 335 * Called on receipt of a link status response from the core application to 336 * update each interface's link status. 337 */ 338 static void update_link_status(struct net_device *netdev, 339 union oct_link_status *ls) 340 { 341 struct lio *lio = GET_LIO(netdev); 342 int current_max_mtu = lio->linfo.link.s.mtu; 343 struct octeon_device *oct = lio->oct_dev; 344 345 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 346 lio->linfo.link.u64 = ls->u64; 347 348 print_link_info(netdev); 349 lio->link_changes++; 350 351 if (lio->linfo.link.s.link_up) { 352 netif_carrier_on(netdev); 353 wake_txqs(netdev); 354 } else { 355 netif_carrier_off(netdev); 356 stop_txqs(netdev); 357 } 358 359 if (lio->linfo.link.s.mtu != current_max_mtu) { 360 dev_info(&oct->pci_dev->dev, 361 "Max MTU Changed from %d to %d\n", 362 current_max_mtu, lio->linfo.link.s.mtu); 363 netdev->max_mtu = lio->linfo.link.s.mtu; 364 } 365 366 if (lio->linfo.link.s.mtu < netdev->mtu) { 367 dev_warn(&oct->pci_dev->dev, 368 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 369 netdev->mtu, lio->linfo.link.s.mtu); 370 queue_delayed_work(lio->link_status_wq.wq, 371 &lio->link_status_wq.wk.work, 0); 372 } 373 } 374 } 375 376 /** 377 * liquidio_vf_probe - PCI probe handler 378 * @pdev: PCI device structure 379 * @ent: unused 380 */ 381 static int 382 liquidio_vf_probe(struct pci_dev *pdev, 383 const struct pci_device_id __maybe_unused *ent) 384 { 385 struct octeon_device *oct_dev = NULL; 386 387 oct_dev = octeon_allocate_device(pdev->device, 388 sizeof(struct octeon_device_priv)); 389 390 if (!oct_dev) { 391 dev_err(&pdev->dev, "Unable to allocate device\n"); 392 return -ENOMEM; 393 } 394 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 395 396 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 397 (u32)pdev->vendor, (u32)pdev->device); 398 399 /* Assign octeon_device for this device to the private data area. */ 400 pci_set_drvdata(pdev, oct_dev); 401 402 /* set linux specific device pointer */ 403 oct_dev->pci_dev = pdev; 404 405 oct_dev->subsystem_id = pdev->subsystem_vendor | 406 (pdev->subsystem_device << 16); 407 408 if (octeon_device_init(oct_dev)) { 409 liquidio_vf_remove(pdev); 410 return -ENOMEM; 411 } 412 413 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 414 415 return 0; 416 } 417 418 /** 419 * octeon_pci_flr - PCI FLR for each Octeon device. 420 * @oct: octeon device 421 */ 422 static void octeon_pci_flr(struct octeon_device *oct) 423 { 424 pci_save_state(oct->pci_dev); 425 426 pci_cfg_access_lock(oct->pci_dev); 427 428 /* Quiesce the device completely */ 429 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 430 PCI_COMMAND_INTX_DISABLE); 431 432 pcie_flr(oct->pci_dev); 433 434 pci_cfg_access_unlock(oct->pci_dev); 435 436 pci_restore_state(oct->pci_dev); 437 } 438 439 /** 440 * octeon_destroy_resources - Destroy resources associated with octeon device 441 * @oct: octeon device 442 */ 443 static void octeon_destroy_resources(struct octeon_device *oct) 444 { 445 struct octeon_device_priv *oct_priv = oct->priv; 446 struct msix_entry *msix_entries; 447 int i; 448 449 switch (atomic_read(&oct->status)) { 450 case OCT_DEV_RUNNING: 451 case OCT_DEV_CORE_OK: 452 /* No more instructions will be forwarded. */ 453 atomic_set(&oct->status, OCT_DEV_IN_RESET); 454 455 oct->app_mode = CVM_DRV_INVALID_APP; 456 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 457 lio_get_state_string(&oct->status)); 458 459 schedule_timeout_uninterruptible(HZ / 10); 460 461 fallthrough; 462 case OCT_DEV_HOST_OK: 463 case OCT_DEV_IO_QUEUES_DONE: 464 if (lio_wait_for_instr_fetch(oct)) 465 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 466 467 if (wait_for_pending_requests(oct)) 468 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 469 470 /* Disable the input and output queues now. No more packets will 471 * arrive from Octeon, but we should wait for all packet 472 * processing to finish. 473 */ 474 oct->fn_list.disable_io_queues(oct); 475 476 if (lio_wait_for_oq_pkts(oct)) 477 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 478 479 /* Force all requests waiting to be fetched by OCTEON to 480 * complete. 481 */ 482 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 483 struct octeon_instr_queue *iq; 484 485 if (!(oct->io_qmask.iq & BIT_ULL(i))) 486 continue; 487 iq = oct->instr_queue[i]; 488 489 if (atomic_read(&iq->instr_pending)) { 490 spin_lock_bh(&iq->lock); 491 iq->fill_cnt = 0; 492 iq->octeon_read_index = iq->host_write_index; 493 iq->stats.instr_processed += 494 atomic_read(&iq->instr_pending); 495 lio_process_iq_request_list(oct, iq, 0); 496 spin_unlock_bh(&iq->lock); 497 } 498 } 499 500 lio_process_ordered_list(oct, 1); 501 octeon_free_sc_done_list(oct); 502 octeon_free_sc_zombie_list(oct); 503 504 fallthrough; 505 case OCT_DEV_INTR_SET_DONE: 506 /* Disable interrupts */ 507 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 508 509 if (oct->msix_on) { 510 msix_entries = (struct msix_entry *)oct->msix_entries; 511 for (i = 0; i < oct->num_msix_irqs; i++) { 512 if (oct->ioq_vector[i].vector) { 513 irq_set_affinity_hint( 514 msix_entries[i].vector, 515 NULL); 516 free_irq(msix_entries[i].vector, 517 &oct->ioq_vector[i]); 518 oct->ioq_vector[i].vector = 0; 519 } 520 } 521 pci_disable_msix(oct->pci_dev); 522 kfree(oct->msix_entries); 523 oct->msix_entries = NULL; 524 kfree(oct->irq_name_storage); 525 oct->irq_name_storage = NULL; 526 } 527 /* Soft reset the octeon device before exiting */ 528 if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE)) 529 octeon_pci_flr(oct); 530 else 531 cn23xx_vf_ask_pf_to_do_flr(oct); 532 533 fallthrough; 534 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 535 octeon_free_ioq_vector(oct); 536 537 fallthrough; 538 case OCT_DEV_MBOX_SETUP_DONE: 539 oct->fn_list.free_mbox(oct); 540 541 fallthrough; 542 case OCT_DEV_IN_RESET: 543 case OCT_DEV_DROQ_INIT_DONE: 544 mdelay(100); 545 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 546 if (!(oct->io_qmask.oq & BIT_ULL(i))) 547 continue; 548 octeon_delete_droq(oct, i); 549 } 550 551 fallthrough; 552 case OCT_DEV_RESP_LIST_INIT_DONE: 553 octeon_delete_response_list(oct); 554 555 fallthrough; 556 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 557 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 558 if (!(oct->io_qmask.iq & BIT_ULL(i))) 559 continue; 560 octeon_delete_instr_queue(oct, i); 561 } 562 563 fallthrough; 564 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 565 octeon_free_sc_buffer_pool(oct); 566 567 fallthrough; 568 case OCT_DEV_DISPATCH_INIT_DONE: 569 octeon_delete_dispatch_list(oct); 570 cancel_delayed_work_sync(&oct->nic_poll_work.work); 571 572 fallthrough; 573 case OCT_DEV_PCI_MAP_DONE: 574 octeon_unmap_pci_barx(oct, 0); 575 octeon_unmap_pci_barx(oct, 1); 576 577 fallthrough; 578 case OCT_DEV_PCI_ENABLE_DONE: 579 /* Disable the device, releasing the PCI INT */ 580 pci_disable_device(oct->pci_dev); 581 582 fallthrough; 583 case OCT_DEV_BEGIN_STATE: 584 /* Nothing to be done here either */ 585 break; 586 } 587 588 tasklet_kill(&oct_priv->droq_tasklet); 589 } 590 591 /** 592 * send_rx_ctrl_cmd - Send Rx control command 593 * @lio: per-network private data 594 * @start_stop: whether to start or stop 595 */ 596 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) 597 { 598 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 599 struct octeon_soft_command *sc; 600 union octnet_cmd *ncmd; 601 int retval; 602 603 if (oct->props[lio->ifidx].rx_on == start_stop) 604 return 0; 605 606 sc = (struct octeon_soft_command *) 607 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 608 16, 0); 609 if (!sc) { 610 netif_info(lio, rx_err, lio->netdev, 611 "Failed to allocate octeon_soft_command struct\n"); 612 return -ENOMEM; 613 } 614 615 ncmd = (union octnet_cmd *)sc->virtdptr; 616 617 ncmd->u64 = 0; 618 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 619 ncmd->s.param1 = start_stop; 620 621 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 622 623 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 624 625 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 626 OPCODE_NIC_CMD, 0, 0, 0); 627 628 init_completion(&sc->complete); 629 sc->sc_status = OCTEON_REQUEST_PENDING; 630 631 retval = octeon_send_soft_command(oct, sc); 632 if (retval == IQ_SEND_FAILED) { 633 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 634 octeon_free_soft_command(oct, sc); 635 } else { 636 /* Sleep on a wait queue till the cond flag indicates that the 637 * response arrived or timed-out. 638 */ 639 retval = wait_for_sc_completion_timeout(oct, sc, 0); 640 if (retval) 641 return retval; 642 643 oct->props[lio->ifidx].rx_on = start_stop; 644 WRITE_ONCE(sc->caller_is_done, true); 645 } 646 647 return retval; 648 } 649 650 /** 651 * liquidio_destroy_nic_device - Destroy NIC device interface 652 * @oct: octeon device 653 * @ifidx: which interface to destroy 654 * 655 * Cleanup associated with each interface for an Octeon device when NIC 656 * module is being unloaded or if initialization fails during load. 657 */ 658 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 659 { 660 struct net_device *netdev = oct->props[ifidx].netdev; 661 struct octeon_device_priv *oct_priv = oct->priv; 662 struct napi_struct *napi, *n; 663 struct lio *lio; 664 665 if (!netdev) { 666 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 667 __func__, ifidx); 668 return; 669 } 670 671 lio = GET_LIO(netdev); 672 673 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 674 675 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 676 liquidio_stop(netdev); 677 678 if (oct->props[lio->ifidx].napi_enabled == 1) { 679 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 680 napi_disable(napi); 681 682 oct->props[lio->ifidx].napi_enabled = 0; 683 684 oct->droq[0]->ops.poll_mode = 0; 685 } 686 687 /* Delete NAPI */ 688 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 689 netif_napi_del(napi); 690 691 tasklet_enable(&oct_priv->droq_tasklet); 692 693 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 694 unregister_netdev(netdev); 695 696 cleanup_rx_oom_poll_fn(netdev); 697 698 cleanup_link_status_change_wq(netdev); 699 700 lio_delete_glists(lio); 701 702 free_netdev(netdev); 703 704 oct->props[ifidx].gmxport = -1; 705 706 oct->props[ifidx].netdev = NULL; 707 } 708 709 /** 710 * liquidio_stop_nic_module - Stop complete NIC functionality 711 * @oct: octeon device 712 */ 713 static int liquidio_stop_nic_module(struct octeon_device *oct) 714 { 715 struct lio *lio; 716 int i, j; 717 718 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 719 if (!oct->ifcount) { 720 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 721 return 1; 722 } 723 724 spin_lock_bh(&oct->cmd_resp_wqlock); 725 oct->cmd_resp_state = OCT_DRV_OFFLINE; 726 spin_unlock_bh(&oct->cmd_resp_wqlock); 727 728 for (i = 0; i < oct->ifcount; i++) { 729 lio = GET_LIO(oct->props[i].netdev); 730 for (j = 0; j < oct->num_oqs; j++) 731 octeon_unregister_droq_ops(oct, 732 lio->linfo.rxpciq[j].s.q_no); 733 } 734 735 for (i = 0; i < oct->ifcount; i++) 736 liquidio_destroy_nic_device(oct, i); 737 738 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 739 return 0; 740 } 741 742 /** 743 * liquidio_vf_remove - Cleans up resources at unload time 744 * @pdev: PCI device structure 745 */ 746 static void liquidio_vf_remove(struct pci_dev *pdev) 747 { 748 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 749 750 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 751 752 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 753 liquidio_stop_nic_module(oct_dev); 754 755 /* Reset the octeon device and cleanup all memory allocated for 756 * the octeon device by driver. 757 */ 758 octeon_destroy_resources(oct_dev); 759 760 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 761 762 /* This octeon device has been removed. Update the global 763 * data structure to reflect this. Free the device structure. 764 */ 765 octeon_free_device_mem(oct_dev); 766 } 767 768 /** 769 * octeon_pci_os_setup - PCI initialization for each Octeon device. 770 * @oct: octeon device 771 */ 772 static int octeon_pci_os_setup(struct octeon_device *oct) 773 { 774 #ifdef CONFIG_PCI_IOV 775 /* setup PCI stuff first */ 776 if (!oct->pci_dev->physfn) 777 octeon_pci_flr(oct); 778 #endif 779 780 if (pci_enable_device(oct->pci_dev)) { 781 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 782 return 1; 783 } 784 785 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 786 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 787 pci_disable_device(oct->pci_dev); 788 return 1; 789 } 790 791 /* Enable PCI DMA Master. */ 792 pci_set_master(oct->pci_dev); 793 794 return 0; 795 } 796 797 /** 798 * free_netbuf - Unmap and free network buffer 799 * @buf: buffer 800 */ 801 static void free_netbuf(void *buf) 802 { 803 struct octnet_buf_free_info *finfo; 804 struct sk_buff *skb; 805 struct lio *lio; 806 807 finfo = (struct octnet_buf_free_info *)buf; 808 skb = finfo->skb; 809 lio = finfo->lio; 810 811 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 812 DMA_TO_DEVICE); 813 814 tx_buffer_free(skb); 815 } 816 817 /** 818 * free_netsgbuf - Unmap and free gather buffer 819 * @buf: buffer 820 */ 821 static void free_netsgbuf(void *buf) 822 { 823 struct octnet_buf_free_info *finfo; 824 struct octnic_gather *g; 825 struct sk_buff *skb; 826 int i, frags, iq; 827 struct lio *lio; 828 829 finfo = (struct octnet_buf_free_info *)buf; 830 skb = finfo->skb; 831 lio = finfo->lio; 832 g = finfo->g; 833 frags = skb_shinfo(skb)->nr_frags; 834 835 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 836 g->sg[0].ptr[0], (skb->len - skb->data_len), 837 DMA_TO_DEVICE); 838 839 i = 1; 840 while (frags--) { 841 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 842 843 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 844 g->sg[(i >> 2)].ptr[(i & 3)], 845 skb_frag_size(frag), DMA_TO_DEVICE); 846 i++; 847 } 848 849 iq = skb_iq(lio->oct_dev, skb); 850 851 spin_lock(&lio->glist_lock[iq]); 852 list_add_tail(&g->list, &lio->glist[iq]); 853 spin_unlock(&lio->glist_lock[iq]); 854 855 tx_buffer_free(skb); 856 } 857 858 /** 859 * free_netsgbuf_with_resp - Unmap and free gather buffer with response 860 * @buf: buffer 861 */ 862 static void free_netsgbuf_with_resp(void *buf) 863 { 864 struct octnet_buf_free_info *finfo; 865 struct octeon_soft_command *sc; 866 struct octnic_gather *g; 867 struct sk_buff *skb; 868 int i, frags, iq; 869 struct lio *lio; 870 871 sc = (struct octeon_soft_command *)buf; 872 skb = (struct sk_buff *)sc->callback_arg; 873 finfo = (struct octnet_buf_free_info *)&skb->cb; 874 875 lio = finfo->lio; 876 g = finfo->g; 877 frags = skb_shinfo(skb)->nr_frags; 878 879 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 880 g->sg[0].ptr[0], (skb->len - skb->data_len), 881 DMA_TO_DEVICE); 882 883 i = 1; 884 while (frags--) { 885 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 886 887 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 888 g->sg[(i >> 2)].ptr[(i & 3)], 889 skb_frag_size(frag), DMA_TO_DEVICE); 890 i++; 891 } 892 893 iq = skb_iq(lio->oct_dev, skb); 894 895 spin_lock(&lio->glist_lock[iq]); 896 list_add_tail(&g->list, &lio->glist[iq]); 897 spin_unlock(&lio->glist_lock[iq]); 898 899 /* Don't free the skb yet */ 900 } 901 902 /** 903 * liquidio_open - Net device open for LiquidIO 904 * @netdev: network device 905 */ 906 static int liquidio_open(struct net_device *netdev) 907 { 908 struct lio *lio = GET_LIO(netdev); 909 struct octeon_device *oct = lio->oct_dev; 910 struct octeon_device_priv *oct_priv = oct->priv; 911 struct napi_struct *napi, *n; 912 int ret = 0; 913 914 if (!oct->props[lio->ifidx].napi_enabled) { 915 tasklet_disable(&oct_priv->droq_tasklet); 916 917 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 918 napi_enable(napi); 919 920 oct->props[lio->ifidx].napi_enabled = 1; 921 922 oct->droq[0]->ops.poll_mode = 1; 923 } 924 925 ifstate_set(lio, LIO_IFSTATE_RUNNING); 926 927 /* Ready for link status updates */ 928 lio->intf_open = 1; 929 930 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 931 start_txqs(netdev); 932 933 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 934 lio->stats_wk.ctxptr = lio; 935 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 936 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 937 938 /* tell Octeon to start forwarding packets to host */ 939 ret = send_rx_ctrl_cmd(lio, 1); 940 if (ret) 941 return ret; 942 943 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 944 945 return ret; 946 } 947 948 /** 949 * liquidio_stop - jNet device stop for LiquidIO 950 * @netdev: network device 951 */ 952 static int liquidio_stop(struct net_device *netdev) 953 { 954 struct lio *lio = GET_LIO(netdev); 955 struct octeon_device *oct = lio->oct_dev; 956 struct octeon_device_priv *oct_priv = oct->priv; 957 struct napi_struct *napi, *n; 958 int ret = 0; 959 960 /* tell Octeon to stop forwarding packets to host */ 961 ret = send_rx_ctrl_cmd(lio, 0); 962 if (ret) 963 return ret; 964 965 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 966 /* Inform that netif carrier is down */ 967 lio->intf_open = 0; 968 lio->linfo.link.s.link_up = 0; 969 970 netif_carrier_off(netdev); 971 lio->link_changes++; 972 973 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 974 975 stop_txqs(netdev); 976 977 /* Wait for any pending Rx descriptors */ 978 if (lio_wait_for_clean_oq(oct)) 979 netif_info(lio, rx_err, lio->netdev, 980 "Proceeding with stop interface after partial RX desc processing\n"); 981 982 if (oct->props[lio->ifidx].napi_enabled == 1) { 983 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 984 napi_disable(napi); 985 986 oct->props[lio->ifidx].napi_enabled = 0; 987 988 oct->droq[0]->ops.poll_mode = 0; 989 990 tasklet_enable(&oct_priv->droq_tasklet); 991 } 992 993 cancel_delayed_work_sync(&lio->stats_wk.work); 994 995 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 996 997 return ret; 998 } 999 1000 /** 1001 * get_new_flags - Converts a mask based on net device flags 1002 * @netdev: network device 1003 * 1004 * This routine generates a octnet_ifflags mask from the net device flags 1005 * received from the OS. 1006 */ 1007 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1008 { 1009 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1010 1011 if (netdev->flags & IFF_PROMISC) 1012 f |= OCTNET_IFFLAG_PROMISC; 1013 1014 if (netdev->flags & IFF_ALLMULTI) 1015 f |= OCTNET_IFFLAG_ALLMULTI; 1016 1017 if (netdev->flags & IFF_MULTICAST) { 1018 f |= OCTNET_IFFLAG_MULTICAST; 1019 1020 /* Accept all multicast addresses if there are more than we 1021 * can handle 1022 */ 1023 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1024 f |= OCTNET_IFFLAG_ALLMULTI; 1025 } 1026 1027 if (netdev->flags & IFF_BROADCAST) 1028 f |= OCTNET_IFFLAG_BROADCAST; 1029 1030 return f; 1031 } 1032 1033 static void liquidio_set_uc_list(struct net_device *netdev) 1034 { 1035 struct lio *lio = GET_LIO(netdev); 1036 struct octeon_device *oct = lio->oct_dev; 1037 struct octnic_ctrl_pkt nctrl; 1038 struct netdev_hw_addr *ha; 1039 u64 *mac; 1040 1041 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1042 return; 1043 1044 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1045 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1046 return; 1047 } 1048 1049 lio->netdev_uc_count = netdev_uc_count(netdev); 1050 1051 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1052 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1053 nctrl.ncmd.s.more = lio->netdev_uc_count; 1054 nctrl.ncmd.s.param1 = oct->vf_num; 1055 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1056 nctrl.netpndev = (u64)netdev; 1057 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1058 1059 /* copy all the addresses into the udd */ 1060 mac = &nctrl.udd[0]; 1061 netdev_for_each_uc_addr(ha, netdev) { 1062 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1063 mac++; 1064 } 1065 1066 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1067 } 1068 1069 /** 1070 * liquidio_set_mcast_list - Net device set_multicast_list 1071 * @netdev: network device 1072 */ 1073 static void liquidio_set_mcast_list(struct net_device *netdev) 1074 { 1075 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1076 struct lio *lio = GET_LIO(netdev); 1077 struct octeon_device *oct = lio->oct_dev; 1078 struct octnic_ctrl_pkt nctrl; 1079 struct netdev_hw_addr *ha; 1080 u64 *mc; 1081 int ret; 1082 1083 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1084 1085 /* Create a ctrl pkt command to be sent to core app. */ 1086 nctrl.ncmd.u64 = 0; 1087 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1088 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1089 nctrl.ncmd.s.param2 = mc_count; 1090 nctrl.ncmd.s.more = mc_count; 1091 nctrl.netpndev = (u64)netdev; 1092 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1093 1094 /* copy all the addresses into the udd */ 1095 mc = &nctrl.udd[0]; 1096 netdev_for_each_mc_addr(ha, netdev) { 1097 *mc = 0; 1098 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1099 /* no need to swap bytes */ 1100 if (++mc > &nctrl.udd[mc_count]) 1101 break; 1102 } 1103 1104 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1105 1106 /* Apparently, any activity in this call from the kernel has to 1107 * be atomic. So we won't wait for response. 1108 */ 1109 1110 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1111 if (ret) { 1112 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1113 ret); 1114 } 1115 1116 liquidio_set_uc_list(netdev); 1117 } 1118 1119 /** 1120 * liquidio_set_mac - Net device set_mac_address 1121 * @netdev: network device 1122 * @p: opaque pointer to sockaddr 1123 */ 1124 static int liquidio_set_mac(struct net_device *netdev, void *p) 1125 { 1126 struct sockaddr *addr = (struct sockaddr *)p; 1127 struct lio *lio = GET_LIO(netdev); 1128 struct octeon_device *oct = lio->oct_dev; 1129 struct octnic_ctrl_pkt nctrl; 1130 int ret = 0; 1131 1132 if (!is_valid_ether_addr(addr->sa_data)) 1133 return -EADDRNOTAVAIL; 1134 1135 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1136 return 0; 1137 1138 if (lio->linfo.macaddr_is_admin_asgnd) 1139 return -EPERM; 1140 1141 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1142 1143 nctrl.ncmd.u64 = 0; 1144 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1145 nctrl.ncmd.s.param1 = 0; 1146 nctrl.ncmd.s.more = 1; 1147 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1148 nctrl.netpndev = (u64)netdev; 1149 1150 nctrl.udd[0] = 0; 1151 /* The MAC Address is presented in network byte order. */ 1152 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1153 1154 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1155 if (ret < 0) { 1156 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1157 return -ENOMEM; 1158 } 1159 1160 if (nctrl.sc_status == 1161 FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { 1162 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); 1163 return -EPERM; 1164 } 1165 1166 eth_hw_addr_set(netdev, addr->sa_data); 1167 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1168 1169 return 0; 1170 } 1171 1172 static void 1173 liquidio_get_stats64(struct net_device *netdev, 1174 struct rtnl_link_stats64 *lstats) 1175 { 1176 struct lio *lio = GET_LIO(netdev); 1177 struct octeon_device *oct; 1178 u64 pkts = 0, drop = 0, bytes = 0; 1179 struct oct_droq_stats *oq_stats; 1180 struct oct_iq_stats *iq_stats; 1181 int i, iq_no, oq_no; 1182 1183 oct = lio->oct_dev; 1184 1185 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1186 return; 1187 1188 for (i = 0; i < oct->num_iqs; i++) { 1189 iq_no = lio->linfo.txpciq[i].s.q_no; 1190 iq_stats = &oct->instr_queue[iq_no]->stats; 1191 pkts += iq_stats->tx_done; 1192 drop += iq_stats->tx_dropped; 1193 bytes += iq_stats->tx_tot_bytes; 1194 } 1195 1196 lstats->tx_packets = pkts; 1197 lstats->tx_bytes = bytes; 1198 lstats->tx_dropped = drop; 1199 1200 pkts = 0; 1201 drop = 0; 1202 bytes = 0; 1203 1204 for (i = 0; i < oct->num_oqs; i++) { 1205 oq_no = lio->linfo.rxpciq[i].s.q_no; 1206 oq_stats = &oct->droq[oq_no]->stats; 1207 pkts += oq_stats->rx_pkts_received; 1208 drop += (oq_stats->rx_dropped + 1209 oq_stats->dropped_nodispatch + 1210 oq_stats->dropped_toomany + 1211 oq_stats->dropped_nomem); 1212 bytes += oq_stats->rx_bytes_received; 1213 } 1214 1215 lstats->rx_bytes = bytes; 1216 lstats->rx_packets = pkts; 1217 lstats->rx_dropped = drop; 1218 1219 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 1220 1221 /* detailed rx_errors: */ 1222 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 1223 /* recved pkt with crc error */ 1224 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 1225 /* recv'd frame alignment error */ 1226 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 1227 1228 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 1229 lstats->rx_frame_errors; 1230 1231 /* detailed tx_errors */ 1232 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 1233 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 1234 1235 lstats->tx_errors = lstats->tx_aborted_errors + 1236 lstats->tx_carrier_errors; 1237 } 1238 1239 /** 1240 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl 1241 * @netdev: network device 1242 * @ifr: interface request 1243 */ 1244 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 1245 { 1246 struct lio *lio = GET_LIO(netdev); 1247 struct hwtstamp_config conf; 1248 1249 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 1250 return -EFAULT; 1251 1252 switch (conf.tx_type) { 1253 case HWTSTAMP_TX_ON: 1254 case HWTSTAMP_TX_OFF: 1255 break; 1256 default: 1257 return -ERANGE; 1258 } 1259 1260 switch (conf.rx_filter) { 1261 case HWTSTAMP_FILTER_NONE: 1262 break; 1263 case HWTSTAMP_FILTER_ALL: 1264 case HWTSTAMP_FILTER_SOME: 1265 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1266 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1267 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1268 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1269 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1270 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1271 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1272 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1273 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1274 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1275 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1276 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1277 case HWTSTAMP_FILTER_NTP_ALL: 1278 conf.rx_filter = HWTSTAMP_FILTER_ALL; 1279 break; 1280 default: 1281 return -ERANGE; 1282 } 1283 1284 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 1285 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1286 1287 else 1288 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1289 1290 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 1291 } 1292 1293 /** 1294 * liquidio_ioctl - ioctl handler 1295 * @netdev: network device 1296 * @ifr: interface request 1297 * @cmd: command 1298 */ 1299 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1300 { 1301 switch (cmd) { 1302 case SIOCSHWTSTAMP: 1303 return hwtstamp_ioctl(netdev, ifr); 1304 default: 1305 return -EOPNOTSUPP; 1306 } 1307 } 1308 1309 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 1310 { 1311 struct sk_buff *skb = (struct sk_buff *)buf; 1312 struct octnet_buf_free_info *finfo; 1313 struct oct_timestamp_resp *resp; 1314 struct octeon_soft_command *sc; 1315 struct lio *lio; 1316 1317 finfo = (struct octnet_buf_free_info *)skb->cb; 1318 lio = finfo->lio; 1319 sc = finfo->sc; 1320 oct = lio->oct_dev; 1321 resp = (struct oct_timestamp_resp *)sc->virtrptr; 1322 1323 if (status != OCTEON_REQUEST_DONE) { 1324 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 1325 CVM_CAST64(status)); 1326 resp->timestamp = 0; 1327 } 1328 1329 octeon_swap_8B_data(&resp->timestamp, 1); 1330 1331 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 1332 struct skb_shared_hwtstamps ts; 1333 u64 ns = resp->timestamp; 1334 1335 netif_info(lio, tx_done, lio->netdev, 1336 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 1337 skb, (unsigned long long)ns); 1338 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 1339 skb_tstamp_tx(skb, &ts); 1340 } 1341 1342 octeon_free_soft_command(oct, sc); 1343 tx_buffer_free(skb); 1344 } 1345 1346 /* send_nic_timestamp_pkt - Send a data packet that will be timestamped 1347 * @oct: octeon device 1348 * @ndata: pointer to network data 1349 * @finfo: pointer to private network data 1350 */ 1351 static int send_nic_timestamp_pkt(struct octeon_device *oct, 1352 struct octnic_data_pkt *ndata, 1353 struct octnet_buf_free_info *finfo, 1354 int xmit_more) 1355 { 1356 struct octeon_soft_command *sc; 1357 int ring_doorbell; 1358 struct lio *lio; 1359 int retval; 1360 u32 len; 1361 1362 lio = finfo->lio; 1363 1364 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 1365 sizeof(struct oct_timestamp_resp)); 1366 finfo->sc = sc; 1367 1368 if (!sc) { 1369 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 1370 return IQ_SEND_FAILED; 1371 } 1372 1373 if (ndata->reqtype == REQTYPE_NORESP_NET) 1374 ndata->reqtype = REQTYPE_RESP_NET; 1375 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 1376 ndata->reqtype = REQTYPE_RESP_NET_SG; 1377 1378 sc->callback = handle_timestamp; 1379 sc->callback_arg = finfo->skb; 1380 sc->iq_no = ndata->q_no; 1381 1382 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 1383 1384 ring_doorbell = !xmit_more; 1385 1386 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 1387 sc, len, ndata->reqtype); 1388 1389 if (retval == IQ_SEND_FAILED) { 1390 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 1391 retval); 1392 octeon_free_soft_command(oct, sc); 1393 } else { 1394 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 1395 } 1396 1397 return retval; 1398 } 1399 1400 /** 1401 * liquidio_xmit - Transmit networks packets to the Octeon interface 1402 * @skb: skbuff struct to be passed to network layer. 1403 * @netdev: pointer to network device 1404 * @returns whether the packet was transmitted to the device okay or not 1405 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 1406 */ 1407 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 1408 { 1409 struct octnet_buf_free_info *finfo; 1410 union octnic_cmd_setup cmdsetup; 1411 struct octnic_data_pkt ndata; 1412 struct octeon_instr_irh *irh; 1413 struct oct_iq_stats *stats; 1414 struct octeon_device *oct; 1415 int q_idx = 0, iq_no = 0; 1416 union tx_info *tx_info; 1417 int xmit_more = 0; 1418 struct lio *lio; 1419 int status = 0; 1420 u64 dptr = 0; 1421 u32 tag = 0; 1422 int j; 1423 1424 lio = GET_LIO(netdev); 1425 oct = lio->oct_dev; 1426 1427 q_idx = skb_iq(lio->oct_dev, skb); 1428 tag = q_idx; 1429 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 1430 1431 stats = &oct->instr_queue[iq_no]->stats; 1432 1433 /* Check for all conditions in which the current packet cannot be 1434 * transmitted. 1435 */ 1436 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 1437 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 1438 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 1439 lio->linfo.link.s.link_up); 1440 goto lio_xmit_failed; 1441 } 1442 1443 /* Use space in skb->cb to store info used to unmap and 1444 * free the buffers. 1445 */ 1446 finfo = (struct octnet_buf_free_info *)skb->cb; 1447 finfo->lio = lio; 1448 finfo->skb = skb; 1449 finfo->sc = NULL; 1450 1451 /* Prepare the attributes for the data to be passed to OSI. */ 1452 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 1453 1454 ndata.buf = finfo; 1455 1456 ndata.q_no = iq_no; 1457 1458 if (octnet_iq_is_full(oct, ndata.q_no)) { 1459 /* defer sending if queue is full */ 1460 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 1461 ndata.q_no); 1462 stats->tx_iq_busy++; 1463 return NETDEV_TX_BUSY; 1464 } 1465 1466 ndata.datasize = skb->len; 1467 1468 cmdsetup.u64 = 0; 1469 cmdsetup.s.iq_no = iq_no; 1470 1471 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1472 if (skb->encapsulation) { 1473 cmdsetup.s.tnl_csum = 1; 1474 stats->tx_vxlan++; 1475 } else { 1476 cmdsetup.s.transport_csum = 1; 1477 } 1478 } 1479 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1480 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1481 cmdsetup.s.timestamp = 1; 1482 } 1483 1484 if (!skb_shinfo(skb)->nr_frags) { 1485 cmdsetup.s.u.datasize = skb->len; 1486 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1487 /* Offload checksum calculation for TCP/UDP packets */ 1488 dptr = dma_map_single(&oct->pci_dev->dev, 1489 skb->data, 1490 skb->len, 1491 DMA_TO_DEVICE); 1492 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 1493 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 1494 __func__); 1495 return NETDEV_TX_BUSY; 1496 } 1497 1498 ndata.cmd.cmd3.dptr = dptr; 1499 finfo->dptr = dptr; 1500 ndata.reqtype = REQTYPE_NORESP_NET; 1501 1502 } else { 1503 skb_frag_t *frag; 1504 struct octnic_gather *g; 1505 int i, frags; 1506 1507 spin_lock(&lio->glist_lock[q_idx]); 1508 g = (struct octnic_gather *) 1509 lio_list_delete_head(&lio->glist[q_idx]); 1510 spin_unlock(&lio->glist_lock[q_idx]); 1511 1512 if (!g) { 1513 netif_info(lio, tx_err, lio->netdev, 1514 "Transmit scatter gather: glist null!\n"); 1515 goto lio_xmit_failed; 1516 } 1517 1518 cmdsetup.s.gather = 1; 1519 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 1520 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1521 1522 memset(g->sg, 0, g->sg_size); 1523 1524 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 1525 skb->data, 1526 (skb->len - skb->data_len), 1527 DMA_TO_DEVICE); 1528 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 1529 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 1530 __func__); 1531 return NETDEV_TX_BUSY; 1532 } 1533 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 1534 1535 frags = skb_shinfo(skb)->nr_frags; 1536 i = 1; 1537 while (frags--) { 1538 frag = &skb_shinfo(skb)->frags[i - 1]; 1539 1540 g->sg[(i >> 2)].ptr[(i & 3)] = 1541 skb_frag_dma_map(&oct->pci_dev->dev, 1542 frag, 0, skb_frag_size(frag), 1543 DMA_TO_DEVICE); 1544 if (dma_mapping_error(&oct->pci_dev->dev, 1545 g->sg[i >> 2].ptr[i & 3])) { 1546 dma_unmap_single(&oct->pci_dev->dev, 1547 g->sg[0].ptr[0], 1548 skb->len - skb->data_len, 1549 DMA_TO_DEVICE); 1550 for (j = 1; j < i; j++) { 1551 frag = &skb_shinfo(skb)->frags[j - 1]; 1552 dma_unmap_page(&oct->pci_dev->dev, 1553 g->sg[j >> 2].ptr[j & 3], 1554 skb_frag_size(frag), 1555 DMA_TO_DEVICE); 1556 } 1557 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 1558 __func__); 1559 return NETDEV_TX_BUSY; 1560 } 1561 1562 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 1563 (i & 3)); 1564 i++; 1565 } 1566 1567 dptr = g->sg_dma_ptr; 1568 1569 ndata.cmd.cmd3.dptr = dptr; 1570 finfo->dptr = dptr; 1571 finfo->g = g; 1572 1573 ndata.reqtype = REQTYPE_NORESP_NET_SG; 1574 } 1575 1576 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 1577 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 1578 1579 if (skb_shinfo(skb)->gso_size) { 1580 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 1581 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 1582 } 1583 1584 /* HW insert VLAN tag */ 1585 if (skb_vlan_tag_present(skb)) { 1586 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 1587 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 1588 } 1589 1590 xmit_more = netdev_xmit_more(); 1591 1592 if (unlikely(cmdsetup.s.timestamp)) 1593 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 1594 else 1595 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 1596 if (status == IQ_SEND_FAILED) 1597 goto lio_xmit_failed; 1598 1599 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 1600 1601 if (status == IQ_SEND_STOP) { 1602 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 1603 iq_no); 1604 netif_stop_subqueue(netdev, q_idx); 1605 } 1606 1607 netif_trans_update(netdev); 1608 1609 if (tx_info->s.gso_segs) 1610 stats->tx_done += tx_info->s.gso_segs; 1611 else 1612 stats->tx_done++; 1613 stats->tx_tot_bytes += ndata.datasize; 1614 1615 return NETDEV_TX_OK; 1616 1617 lio_xmit_failed: 1618 stats->tx_dropped++; 1619 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 1620 iq_no, stats->tx_dropped); 1621 if (dptr) 1622 dma_unmap_single(&oct->pci_dev->dev, dptr, 1623 ndata.datasize, DMA_TO_DEVICE); 1624 1625 octeon_ring_doorbell_locked(oct, iq_no); 1626 1627 tx_buffer_free(skb); 1628 return NETDEV_TX_OK; 1629 } 1630 1631 /** 1632 * liquidio_tx_timeout - Network device Tx timeout 1633 * @netdev: pointer to network device 1634 * @txqueue: index of the hung transmit queue 1635 */ 1636 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1637 { 1638 struct lio *lio; 1639 1640 lio = GET_LIO(netdev); 1641 1642 netif_info(lio, tx_err, lio->netdev, 1643 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 1644 netdev->stats.tx_dropped); 1645 netif_trans_update(netdev); 1646 wake_txqs(netdev); 1647 } 1648 1649 static int 1650 liquidio_vlan_rx_add_vid(struct net_device *netdev, 1651 __be16 proto __attribute__((unused)), u16 vid) 1652 { 1653 struct lio *lio = GET_LIO(netdev); 1654 struct octeon_device *oct = lio->oct_dev; 1655 struct octnic_ctrl_pkt nctrl; 1656 int ret = 0; 1657 1658 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1659 1660 nctrl.ncmd.u64 = 0; 1661 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 1662 nctrl.ncmd.s.param1 = vid; 1663 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1664 nctrl.netpndev = (u64)netdev; 1665 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1666 1667 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1668 if (ret) { 1669 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 1670 ret); 1671 return -EPERM; 1672 } 1673 1674 return 0; 1675 } 1676 1677 static int 1678 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 1679 __be16 proto __attribute__((unused)), u16 vid) 1680 { 1681 struct lio *lio = GET_LIO(netdev); 1682 struct octeon_device *oct = lio->oct_dev; 1683 struct octnic_ctrl_pkt nctrl; 1684 int ret = 0; 1685 1686 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1687 1688 nctrl.ncmd.u64 = 0; 1689 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 1690 nctrl.ncmd.s.param1 = vid; 1691 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1692 nctrl.netpndev = (u64)netdev; 1693 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1694 1695 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1696 if (ret) { 1697 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 1698 ret); 1699 if (ret > 0) 1700 ret = -EIO; 1701 } 1702 return ret; 1703 } 1704 1705 /** Sending command to enable/disable RX checksum offload 1706 * @param netdev pointer to network device 1707 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 1708 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 1709 * OCTNET_CMD_RXCSUM_DISABLE 1710 * @returns SUCCESS or FAILURE 1711 */ 1712 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 1713 u8 rx_cmd) 1714 { 1715 struct lio *lio = GET_LIO(netdev); 1716 struct octeon_device *oct = lio->oct_dev; 1717 struct octnic_ctrl_pkt nctrl; 1718 int ret = 0; 1719 1720 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1721 1722 nctrl.ncmd.u64 = 0; 1723 nctrl.ncmd.s.cmd = command; 1724 nctrl.ncmd.s.param1 = rx_cmd; 1725 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1726 nctrl.netpndev = (u64)netdev; 1727 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1728 1729 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1730 if (ret) { 1731 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 1732 ret); 1733 if (ret > 0) 1734 ret = -EIO; 1735 } 1736 return ret; 1737 } 1738 1739 /** Sending command to add/delete VxLAN UDP port to firmware 1740 * @param netdev pointer to network device 1741 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 1742 * @param vxlan_port VxLAN port to be added or deleted 1743 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 1744 * OCTNET_CMD_VXLAN_PORT_DEL 1745 * @returns SUCCESS or FAILURE 1746 */ 1747 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 1748 u16 vxlan_port, u8 vxlan_cmd_bit) 1749 { 1750 struct lio *lio = GET_LIO(netdev); 1751 struct octeon_device *oct = lio->oct_dev; 1752 struct octnic_ctrl_pkt nctrl; 1753 int ret = 0; 1754 1755 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1756 1757 nctrl.ncmd.u64 = 0; 1758 nctrl.ncmd.s.cmd = command; 1759 nctrl.ncmd.s.more = vxlan_cmd_bit; 1760 nctrl.ncmd.s.param1 = vxlan_port; 1761 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1762 nctrl.netpndev = (u64)netdev; 1763 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1764 1765 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1766 if (ret) { 1767 dev_err(&oct->pci_dev->dev, 1768 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 1769 ret); 1770 if (ret > 0) 1771 ret = -EIO; 1772 } 1773 return ret; 1774 } 1775 1776 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 1777 unsigned int table, unsigned int entry, 1778 struct udp_tunnel_info *ti) 1779 { 1780 return liquidio_vxlan_port_command(netdev, 1781 OCTNET_CMD_VXLAN_PORT_CONFIG, 1782 htons(ti->port), 1783 OCTNET_CMD_VXLAN_PORT_ADD); 1784 } 1785 1786 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 1787 unsigned int table, 1788 unsigned int entry, 1789 struct udp_tunnel_info *ti) 1790 { 1791 return liquidio_vxlan_port_command(netdev, 1792 OCTNET_CMD_VXLAN_PORT_CONFIG, 1793 htons(ti->port), 1794 OCTNET_CMD_VXLAN_PORT_DEL); 1795 } 1796 1797 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 1798 .set_port = liquidio_udp_tunnel_set_port, 1799 .unset_port = liquidio_udp_tunnel_unset_port, 1800 .tables = { 1801 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 1802 }, 1803 }; 1804 1805 /** \brief Net device fix features 1806 * @param netdev pointer to network device 1807 * @param request features requested 1808 * @returns updated features list 1809 */ 1810 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 1811 netdev_features_t request) 1812 { 1813 struct lio *lio = netdev_priv(netdev); 1814 1815 if ((request & NETIF_F_RXCSUM) && 1816 !(lio->dev_capability & NETIF_F_RXCSUM)) 1817 request &= ~NETIF_F_RXCSUM; 1818 1819 if ((request & NETIF_F_HW_CSUM) && 1820 !(lio->dev_capability & NETIF_F_HW_CSUM)) 1821 request &= ~NETIF_F_HW_CSUM; 1822 1823 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 1824 request &= ~NETIF_F_TSO; 1825 1826 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 1827 request &= ~NETIF_F_TSO6; 1828 1829 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 1830 request &= ~NETIF_F_LRO; 1831 1832 /* Disable LRO if RXCSUM is off */ 1833 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 1834 (lio->dev_capability & NETIF_F_LRO)) 1835 request &= ~NETIF_F_LRO; 1836 1837 return request; 1838 } 1839 1840 /** \brief Net device set features 1841 * @param netdev pointer to network device 1842 * @param features features to enable/disable 1843 */ 1844 static int liquidio_set_features(struct net_device *netdev, 1845 netdev_features_t features) 1846 { 1847 struct lio *lio = netdev_priv(netdev); 1848 1849 if (!((netdev->features ^ features) & NETIF_F_LRO)) 1850 return 0; 1851 1852 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 1853 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 1854 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1855 else if (!(features & NETIF_F_LRO) && 1856 (lio->dev_capability & NETIF_F_LRO)) 1857 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 1858 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1859 if (!(netdev->features & NETIF_F_RXCSUM) && 1860 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1861 (features & NETIF_F_RXCSUM)) 1862 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1863 OCTNET_CMD_RXCSUM_ENABLE); 1864 else if ((netdev->features & NETIF_F_RXCSUM) && 1865 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1866 !(features & NETIF_F_RXCSUM)) 1867 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1868 OCTNET_CMD_RXCSUM_DISABLE); 1869 1870 return 0; 1871 } 1872 1873 static const struct net_device_ops lionetdevops = { 1874 .ndo_open = liquidio_open, 1875 .ndo_stop = liquidio_stop, 1876 .ndo_start_xmit = liquidio_xmit, 1877 .ndo_get_stats64 = liquidio_get_stats64, 1878 .ndo_set_mac_address = liquidio_set_mac, 1879 .ndo_set_rx_mode = liquidio_set_mcast_list, 1880 .ndo_tx_timeout = liquidio_tx_timeout, 1881 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 1882 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 1883 .ndo_change_mtu = liquidio_change_mtu, 1884 .ndo_eth_ioctl = liquidio_ioctl, 1885 .ndo_fix_features = liquidio_fix_features, 1886 .ndo_set_features = liquidio_set_features, 1887 }; 1888 1889 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 1890 { 1891 struct octeon_device *oct = (struct octeon_device *)buf; 1892 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 1893 union oct_link_status *ls; 1894 int gmxport = 0; 1895 int i; 1896 1897 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 1898 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 1899 recv_pkt->buffer_size[0], 1900 recv_pkt->rh.r_nic_info.gmxport); 1901 goto nic_info_err; 1902 } 1903 1904 gmxport = recv_pkt->rh.r_nic_info.gmxport; 1905 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 1906 OCT_DROQ_INFO_SIZE); 1907 1908 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 1909 1910 for (i = 0; i < oct->ifcount; i++) { 1911 if (oct->props[i].gmxport == gmxport) { 1912 update_link_status(oct->props[i].netdev, ls); 1913 break; 1914 } 1915 } 1916 1917 nic_info_err: 1918 for (i = 0; i < recv_pkt->buffer_count; i++) 1919 recv_buffer_free(recv_pkt->buffer_ptr[i]); 1920 octeon_free_recv_info(recv_info); 1921 return 0; 1922 } 1923 1924 /** 1925 * setup_nic_devices - Setup network interfaces 1926 * @octeon_dev: octeon device 1927 * 1928 * Called during init time for each device. It assumes the NIC 1929 * is already up and running. The link information for each 1930 * interface is passed in link_info. 1931 */ 1932 static int setup_nic_devices(struct octeon_device *octeon_dev) 1933 { 1934 int retval, num_iqueues, num_oqueues; 1935 u32 resp_size, data_size; 1936 struct liquidio_if_cfg_resp *resp; 1937 struct octeon_soft_command *sc; 1938 union oct_nic_if_cfg if_cfg; 1939 struct octdev_props *props; 1940 struct net_device *netdev; 1941 struct lio_version *vdata; 1942 struct lio *lio = NULL; 1943 u8 mac[ETH_ALEN], i, j; 1944 u32 ifidx_or_pfnum; 1945 1946 ifidx_or_pfnum = octeon_dev->pf_num; 1947 1948 /* This is to handle link status changes */ 1949 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 1950 lio_nic_info, octeon_dev); 1951 1952 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 1953 * They are handled directly. 1954 */ 1955 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 1956 free_netbuf); 1957 1958 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 1959 free_netsgbuf); 1960 1961 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 1962 free_netsgbuf_with_resp); 1963 1964 for (i = 0; i < octeon_dev->ifcount; i++) { 1965 resp_size = sizeof(struct liquidio_if_cfg_resp); 1966 data_size = sizeof(struct lio_version); 1967 sc = (struct octeon_soft_command *) 1968 octeon_alloc_soft_command(octeon_dev, data_size, 1969 resp_size, 0); 1970 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1971 vdata = (struct lio_version *)sc->virtdptr; 1972 1973 *((u64 *)vdata) = 0; 1974 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1975 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1976 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1977 1978 if_cfg.u64 = 0; 1979 1980 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 1981 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 1982 if_cfg.s.base_queue = 0; 1983 1984 sc->iq_no = 0; 1985 1986 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 1987 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 1988 0); 1989 1990 init_completion(&sc->complete); 1991 sc->sc_status = OCTEON_REQUEST_PENDING; 1992 1993 retval = octeon_send_soft_command(octeon_dev, sc); 1994 if (retval == IQ_SEND_FAILED) { 1995 dev_err(&octeon_dev->pci_dev->dev, 1996 "iq/oq config failed status: %x\n", retval); 1997 /* Soft instr is freed by driver in case of failure. */ 1998 octeon_free_soft_command(octeon_dev, sc); 1999 return(-EIO); 2000 } 2001 2002 /* Sleep on a wait queue till the cond flag indicates that the 2003 * response arrived or timed-out. 2004 */ 2005 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 2006 if (retval) 2007 return retval; 2008 2009 retval = resp->status; 2010 if (retval) { 2011 dev_err(&octeon_dev->pci_dev->dev, 2012 "iq/oq config failed, retval = %d\n", retval); 2013 WRITE_ONCE(sc->caller_is_done, true); 2014 return -EIO; 2015 } 2016 2017 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 2018 32, "%s", 2019 resp->cfg_info.liquidio_firmware_version); 2020 2021 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2022 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2023 2024 num_iqueues = hweight64(resp->cfg_info.iqmask); 2025 num_oqueues = hweight64(resp->cfg_info.oqmask); 2026 2027 if (!(num_iqueues) || !(num_oqueues)) { 2028 dev_err(&octeon_dev->pci_dev->dev, 2029 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2030 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2031 WRITE_ONCE(sc->caller_is_done, true); 2032 goto setup_nic_dev_done; 2033 } 2034 dev_dbg(&octeon_dev->pci_dev->dev, 2035 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2036 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2037 num_iqueues, num_oqueues); 2038 2039 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2040 2041 if (!netdev) { 2042 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2043 WRITE_ONCE(sc->caller_is_done, true); 2044 goto setup_nic_dev_done; 2045 } 2046 2047 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2048 2049 /* Associate the routines that will handle different 2050 * netdev tasks. 2051 */ 2052 netdev->netdev_ops = &lionetdevops; 2053 2054 lio = GET_LIO(netdev); 2055 2056 memset(lio, 0, sizeof(struct lio)); 2057 2058 lio->ifidx = ifidx_or_pfnum; 2059 2060 props = &octeon_dev->props[i]; 2061 props->gmxport = resp->cfg_info.linfo.gmxport; 2062 props->netdev = netdev; 2063 2064 lio->linfo.num_rxpciq = num_oqueues; 2065 lio->linfo.num_txpciq = num_iqueues; 2066 2067 for (j = 0; j < num_oqueues; j++) { 2068 lio->linfo.rxpciq[j].u64 = 2069 resp->cfg_info.linfo.rxpciq[j].u64; 2070 } 2071 for (j = 0; j < num_iqueues; j++) { 2072 lio->linfo.txpciq[j].u64 = 2073 resp->cfg_info.linfo.txpciq[j].u64; 2074 } 2075 2076 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2077 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2078 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2079 lio->linfo.macaddr_is_admin_asgnd = 2080 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2081 lio->linfo.macaddr_spoofchk = 2082 resp->cfg_info.linfo.macaddr_spoofchk; 2083 2084 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2085 2086 lio->dev_capability = NETIF_F_HIGHDMA 2087 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2088 | NETIF_F_SG | NETIF_F_RXCSUM 2089 | NETIF_F_TSO | NETIF_F_TSO6 2090 | NETIF_F_GRO 2091 | NETIF_F_LRO; 2092 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2093 2094 /* Copy of transmit encapsulation capabilities: 2095 * TSO, TSO6, Checksums for this device 2096 */ 2097 lio->enc_dev_capability = NETIF_F_IP_CSUM 2098 | NETIF_F_IPV6_CSUM 2099 | NETIF_F_GSO_UDP_TUNNEL 2100 | NETIF_F_HW_CSUM | NETIF_F_SG 2101 | NETIF_F_RXCSUM 2102 | NETIF_F_TSO | NETIF_F_TSO6 2103 | NETIF_F_LRO; 2104 2105 netdev->hw_enc_features = 2106 (lio->enc_dev_capability & ~NETIF_F_LRO); 2107 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 2108 2109 netdev->vlan_features = lio->dev_capability; 2110 /* Add any unchangeable hw features */ 2111 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2112 NETIF_F_HW_VLAN_CTAG_RX | 2113 NETIF_F_HW_VLAN_CTAG_TX; 2114 2115 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2116 2117 netdev->hw_features = lio->dev_capability; 2118 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 2119 2120 /* MTU range: 68 - 16000 */ 2121 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2122 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2123 2124 WRITE_ONCE(sc->caller_is_done, true); 2125 2126 /* Point to the properties for octeon device to which this 2127 * interface belongs. 2128 */ 2129 lio->oct_dev = octeon_dev; 2130 lio->octprops = props; 2131 lio->netdev = netdev; 2132 2133 dev_dbg(&octeon_dev->pci_dev->dev, 2134 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2135 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2136 2137 /* 64-bit swap required on LE machines */ 2138 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2139 for (j = 0; j < ETH_ALEN; j++) 2140 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2141 2142 /* Copy MAC Address to OS network device structure */ 2143 eth_hw_addr_set(netdev, mac); 2144 2145 if (liquidio_setup_io_queues(octeon_dev, i, 2146 lio->linfo.num_txpciq, 2147 lio->linfo.num_rxpciq)) { 2148 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2149 goto setup_nic_dev_free; 2150 } 2151 2152 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2153 2154 /* For VFs, enable Octeon device interrupts here, 2155 * as this is contingent upon IO queue setup 2156 */ 2157 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2158 OCTEON_ALL_INTR); 2159 2160 /* By default all interfaces on a single Octeon uses the same 2161 * tx and rx queues 2162 */ 2163 lio->txq = lio->linfo.txpciq[0].s.q_no; 2164 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2165 2166 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2167 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2168 2169 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 2170 dev_err(&octeon_dev->pci_dev->dev, 2171 "Gather list allocation failed\n"); 2172 goto setup_nic_dev_free; 2173 } 2174 2175 /* Register ethtool support */ 2176 liquidio_set_ethtool_ops(netdev); 2177 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2178 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2179 else 2180 octeon_dev->priv_flags = 0x0; 2181 2182 if (netdev->features & NETIF_F_LRO) 2183 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2184 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2185 2186 if (setup_link_status_change_wq(netdev)) 2187 goto setup_nic_dev_free; 2188 2189 if (setup_rx_oom_poll_fn(netdev)) 2190 goto setup_nic_dev_free; 2191 2192 /* Register the network device with the OS */ 2193 if (register_netdev(netdev)) { 2194 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 2195 goto setup_nic_dev_free; 2196 } 2197 2198 dev_dbg(&octeon_dev->pci_dev->dev, 2199 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 2200 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2201 netif_carrier_off(netdev); 2202 lio->link_changes++; 2203 2204 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 2205 2206 /* Sending command to firmware to enable Rx checksum offload 2207 * by default at the time of setup of Liquidio driver for 2208 * this device 2209 */ 2210 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2211 OCTNET_CMD_RXCSUM_ENABLE); 2212 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 2213 OCTNET_CMD_TXCSUM_ENABLE); 2214 2215 dev_dbg(&octeon_dev->pci_dev->dev, 2216 "NIC ifidx:%d Setup successful\n", i); 2217 2218 octeon_dev->no_speed_setting = 1; 2219 } 2220 2221 return 0; 2222 2223 setup_nic_dev_free: 2224 2225 while (i--) { 2226 dev_err(&octeon_dev->pci_dev->dev, 2227 "NIC ifidx:%d Setup failed\n", i); 2228 liquidio_destroy_nic_device(octeon_dev, i); 2229 } 2230 2231 setup_nic_dev_done: 2232 2233 return -ENODEV; 2234 } 2235 2236 /** 2237 * liquidio_init_nic_module - initialize the NIC 2238 * @oct: octeon device 2239 * 2240 * This initialization routine is called once the Octeon device application is 2241 * up and running 2242 */ 2243 static int liquidio_init_nic_module(struct octeon_device *oct) 2244 { 2245 int num_nic_ports = 1; 2246 int i, retval = 0; 2247 2248 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 2249 2250 /* only default iq and oq were initialized 2251 * initialize the rest as well run port_config command for each port 2252 */ 2253 oct->ifcount = num_nic_ports; 2254 memset(oct->props, 0, 2255 sizeof(struct octdev_props) * num_nic_ports); 2256 2257 for (i = 0; i < MAX_OCTEON_LINKS; i++) 2258 oct->props[i].gmxport = -1; 2259 2260 retval = setup_nic_devices(oct); 2261 if (retval) { 2262 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 2263 goto octnet_init_failure; 2264 } 2265 2266 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 2267 2268 return retval; 2269 2270 octnet_init_failure: 2271 2272 oct->ifcount = 0; 2273 2274 return retval; 2275 } 2276 2277 /** 2278 * octeon_device_init - Device initialization for each Octeon device that is probed 2279 * @oct: octeon device 2280 */ 2281 static int octeon_device_init(struct octeon_device *oct) 2282 { 2283 u32 rev_id; 2284 int j; 2285 2286 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 2287 2288 /* Enable access to the octeon device and make its DMA capability 2289 * known to the OS. 2290 */ 2291 if (octeon_pci_os_setup(oct)) 2292 return 1; 2293 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 2294 2295 oct->chip_id = OCTEON_CN23XX_VF_VID; 2296 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 2297 oct->rev_id = rev_id & 0xff; 2298 2299 if (cn23xx_setup_octeon_vf_device(oct)) 2300 return 1; 2301 2302 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 2303 2304 oct->app_mode = CVM_DRV_NIC_APP; 2305 2306 /* Initialize the dispatch mechanism used to push packets arriving on 2307 * Octeon Output queues. 2308 */ 2309 if (octeon_init_dispatch_list(oct)) 2310 return 1; 2311 2312 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 2313 2314 if (octeon_set_io_queues_off(oct)) { 2315 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 2316 return 1; 2317 } 2318 2319 if (oct->fn_list.setup_device_regs(oct)) { 2320 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 2321 return 1; 2322 } 2323 2324 /* Initialize soft command buffer pool */ 2325 if (octeon_setup_sc_buffer_pool(oct)) { 2326 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 2327 return 1; 2328 } 2329 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 2330 2331 /* Setup the data structures that manage this Octeon's Input queues. */ 2332 if (octeon_setup_instr_queues(oct)) { 2333 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 2334 return 1; 2335 } 2336 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 2337 2338 /* Initialize lists to manage the requests of different types that 2339 * arrive from user & kernel applications for this octeon device. 2340 */ 2341 if (octeon_setup_response_list(oct)) { 2342 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 2343 return 1; 2344 } 2345 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 2346 2347 if (octeon_setup_output_queues(oct)) { 2348 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 2349 return 1; 2350 } 2351 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 2352 2353 if (oct->fn_list.setup_mbox(oct)) { 2354 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 2355 return 1; 2356 } 2357 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 2358 2359 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { 2360 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 2361 return 1; 2362 } 2363 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 2364 2365 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n", 2366 oct->sriov_info.rings_per_vf); 2367 2368 /* Setup the interrupt handler and record the INT SUM register address*/ 2369 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) 2370 return 1; 2371 2372 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 2373 2374 /* *************************************************************** 2375 * The interrupts need to be enabled for the PF<-->VF handshake. 2376 * They are [re]-enabled after the PF<-->VF handshake so that the 2377 * correct OQ tick value is used (i.e. the value retrieved from 2378 * the PF as part of the handshake). 2379 */ 2380 2381 /* Enable Octeon device interrupts */ 2382 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2383 2384 if (cn23xx_octeon_pfvf_handshake(oct)) 2385 return 1; 2386 2387 /* Here we [re]-enable the interrupts so that the correct OQ tick value 2388 * is used (i.e. the value that was retrieved during the handshake) 2389 */ 2390 2391 /* Enable Octeon device interrupts */ 2392 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2393 /* *************************************************************** */ 2394 2395 /* Enable the input and output queues for this Octeon device */ 2396 if (oct->fn_list.enable_io_queues(oct)) { 2397 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 2398 return 1; 2399 } 2400 2401 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 2402 2403 atomic_set(&oct->status, OCT_DEV_HOST_OK); 2404 2405 /* Send Credit for Octeon Output queues. Credits are always sent after 2406 * the output queue is enabled. 2407 */ 2408 for (j = 0; j < oct->num_oqs; j++) 2409 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 2410 2411 /* Packets can start arriving on the output queues from this point. */ 2412 2413 atomic_set(&oct->status, OCT_DEV_CORE_OK); 2414 2415 atomic_set(&oct->status, OCT_DEV_RUNNING); 2416 2417 if (liquidio_init_nic_module(oct)) 2418 return 1; 2419 2420 return 0; 2421 } 2422 2423 static int __init liquidio_vf_init(void) 2424 { 2425 octeon_init_device_list(0); 2426 return pci_register_driver(&liquidio_vf_pci_driver); 2427 } 2428 2429 static void __exit liquidio_vf_exit(void) 2430 { 2431 pci_unregister_driver(&liquidio_vf_pci_driver); 2432 2433 pr_info("LiquidIO_VF network module is now unloaded\n"); 2434 } 2435 2436 module_init(liquidio_vf_init); 2437 module_exit(liquidio_vf_exit); 2438