1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 36 static int debug = -1; 37 module_param(debug, int, 0644); 38 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 39 40 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 41 42 struct oct_timestamp_resp { 43 u64 rh; 44 u64 timestamp; 45 u64 status; 46 }; 47 48 union tx_info { 49 u64 u64; 50 struct { 51 #ifdef __BIG_ENDIAN_BITFIELD 52 u16 gso_size; 53 u16 gso_segs; 54 u32 reserved; 55 #else 56 u32 reserved; 57 u16 gso_segs; 58 u16 gso_size; 59 #endif 60 } s; 61 }; 62 63 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 64 #define OCTNIC_GSO_MAX_SIZE \ 65 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 66 67 static int 68 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 69 static void liquidio_vf_remove(struct pci_dev *pdev); 70 static int octeon_device_init(struct octeon_device *oct); 71 static int liquidio_stop(struct net_device *netdev); 72 73 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 74 { 75 struct octeon_device_priv *oct_priv = 76 (struct octeon_device_priv *)oct->priv; 77 int retry = MAX_IO_PENDING_PKT_COUNT; 78 int pkt_cnt = 0, pending_pkts; 79 int i; 80 81 do { 82 pending_pkts = 0; 83 84 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 85 if (!(oct->io_qmask.oq & BIT_ULL(i))) 86 continue; 87 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 88 } 89 if (pkt_cnt > 0) { 90 pending_pkts += pkt_cnt; 91 tasklet_schedule(&oct_priv->droq_tasklet); 92 } 93 pkt_cnt = 0; 94 schedule_timeout_uninterruptible(1); 95 96 } while (retry-- && pending_pkts); 97 98 return pkt_cnt; 99 } 100 101 /** 102 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc 103 * @oct: Pointer to Octeon device 104 */ 105 static void pcierror_quiesce_device(struct octeon_device *oct) 106 { 107 int i; 108 109 /* Disable the input and output queues now. No more packets will 110 * arrive from Octeon, but we should wait for all packet processing 111 * to finish. 112 */ 113 114 /* To allow for in-flight requests */ 115 schedule_timeout_uninterruptible(100); 116 117 if (wait_for_pending_requests(oct)) 118 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 119 120 /* Force all requests waiting to be fetched by OCTEON to complete. */ 121 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 122 struct octeon_instr_queue *iq; 123 124 if (!(oct->io_qmask.iq & BIT_ULL(i))) 125 continue; 126 iq = oct->instr_queue[i]; 127 128 if (atomic_read(&iq->instr_pending)) { 129 spin_lock_bh(&iq->lock); 130 iq->fill_cnt = 0; 131 iq->octeon_read_index = iq->host_write_index; 132 iq->stats.instr_processed += 133 atomic_read(&iq->instr_pending); 134 lio_process_iq_request_list(oct, iq, 0); 135 spin_unlock_bh(&iq->lock); 136 } 137 } 138 139 /* Force all pending ordered list requests to time out. */ 140 lio_process_ordered_list(oct, 1); 141 142 /* We do not need to wait for output queue packets to be processed. */ 143 } 144 145 /** 146 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status 147 * @dev: Pointer to PCI device 148 */ 149 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 150 { 151 u32 status, mask; 152 int pos = 0x100; 153 154 pr_info("%s :\n", __func__); 155 156 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 157 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 158 if (dev->error_state == pci_channel_io_normal) 159 status &= ~mask; /* Clear corresponding nonfatal bits */ 160 else 161 status &= mask; /* Clear corresponding fatal bits */ 162 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 163 } 164 165 /** 166 * stop_pci_io - Stop all PCI IO to a given device 167 * @oct: Pointer to Octeon device 168 */ 169 static void stop_pci_io(struct octeon_device *oct) 170 { 171 struct msix_entry *msix_entries; 172 int i; 173 174 /* No more instructions will be forwarded. */ 175 atomic_set(&oct->status, OCT_DEV_IN_RESET); 176 177 for (i = 0; i < oct->ifcount; i++) 178 netif_device_detach(oct->props[i].netdev); 179 180 /* Disable interrupts */ 181 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 182 183 pcierror_quiesce_device(oct); 184 if (oct->msix_on) { 185 msix_entries = (struct msix_entry *)oct->msix_entries; 186 for (i = 0; i < oct->num_msix_irqs; i++) { 187 /* clear the affinity_cpumask */ 188 irq_set_affinity_hint(msix_entries[i].vector, 189 NULL); 190 free_irq(msix_entries[i].vector, 191 &oct->ioq_vector[i]); 192 } 193 pci_disable_msix(oct->pci_dev); 194 kfree(oct->msix_entries); 195 oct->msix_entries = NULL; 196 octeon_free_ioq_vector(oct); 197 } 198 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 199 lio_get_state_string(&oct->status)); 200 201 /* making it a common function for all OCTEON models */ 202 cleanup_aer_uncorrect_error_status(oct->pci_dev); 203 204 pci_disable_device(oct->pci_dev); 205 } 206 207 /** 208 * liquidio_pcie_error_detected - called when PCI error is detected 209 * @pdev: Pointer to PCI device 210 * @state: The current pci connection state 211 * 212 * This function is called after a PCI bus error affecting 213 * this device has been detected. 214 */ 215 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 216 pci_channel_state_t state) 217 { 218 struct octeon_device *oct = pci_get_drvdata(pdev); 219 220 /* Non-correctable Non-fatal errors */ 221 if (state == pci_channel_io_normal) { 222 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 223 cleanup_aer_uncorrect_error_status(oct->pci_dev); 224 return PCI_ERS_RESULT_CAN_RECOVER; 225 } 226 227 /* Non-correctable Fatal errors */ 228 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 229 stop_pci_io(oct); 230 231 return PCI_ERS_RESULT_DISCONNECT; 232 } 233 234 /* For PCI-E Advanced Error Recovery (AER) Interface */ 235 static const struct pci_error_handlers liquidio_vf_err_handler = { 236 .error_detected = liquidio_pcie_error_detected, 237 }; 238 239 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 240 { 241 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 243 }, 244 { 245 0, 0, 0, 0, 0, 0, 0 246 } 247 }; 248 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 249 250 static struct pci_driver liquidio_vf_pci_driver = { 251 .name = "LiquidIO_VF", 252 .id_table = liquidio_vf_pci_tbl, 253 .probe = liquidio_vf_probe, 254 .remove = liquidio_vf_remove, 255 .err_handler = &liquidio_vf_err_handler, /* For AER */ 256 }; 257 258 /** 259 * print_link_info - Print link information 260 * @netdev: network device 261 */ 262 static void print_link_info(struct net_device *netdev) 263 { 264 struct lio *lio = GET_LIO(netdev); 265 266 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 267 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 268 struct oct_link_info *linfo = &lio->linfo; 269 270 if (linfo->link.s.link_up) { 271 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 272 linfo->link.s.speed, 273 (linfo->link.s.duplex) ? "Full" : "Half"); 274 } else { 275 netif_info(lio, link, lio->netdev, "Link Down\n"); 276 } 277 } 278 } 279 280 /** 281 * octnet_link_status_change - Routine to notify MTU change 282 * @work: work_struct data structure 283 */ 284 static void octnet_link_status_change(struct work_struct *work) 285 { 286 struct cavium_wk *wk = (struct cavium_wk *)work; 287 struct lio *lio = (struct lio *)wk->ctxptr; 288 289 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 290 * this API is invoked only when new max-MTU of the interface is 291 * less than current MTU. 292 */ 293 rtnl_lock(); 294 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 295 rtnl_unlock(); 296 } 297 298 /** 299 * setup_link_status_change_wq - Sets up the mtu status change work 300 * @netdev: network device 301 */ 302 static int setup_link_status_change_wq(struct net_device *netdev) 303 { 304 struct lio *lio = GET_LIO(netdev); 305 struct octeon_device *oct = lio->oct_dev; 306 307 lio->link_status_wq.wq = alloc_workqueue("link-status", 308 WQ_MEM_RECLAIM, 0); 309 if (!lio->link_status_wq.wq) { 310 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 311 return -1; 312 } 313 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 314 octnet_link_status_change); 315 lio->link_status_wq.wk.ctxptr = lio; 316 317 return 0; 318 } 319 320 static void cleanup_link_status_change_wq(struct net_device *netdev) 321 { 322 struct lio *lio = GET_LIO(netdev); 323 324 if (lio->link_status_wq.wq) { 325 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 326 destroy_workqueue(lio->link_status_wq.wq); 327 } 328 } 329 330 /** 331 * update_link_status - Update link status 332 * @netdev: network device 333 * @ls: link status structure 334 * 335 * Called on receipt of a link status response from the core application to 336 * update each interface's link status. 337 */ 338 static void update_link_status(struct net_device *netdev, 339 union oct_link_status *ls) 340 { 341 struct lio *lio = GET_LIO(netdev); 342 int current_max_mtu = lio->linfo.link.s.mtu; 343 struct octeon_device *oct = lio->oct_dev; 344 345 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 346 lio->linfo.link.u64 = ls->u64; 347 348 print_link_info(netdev); 349 lio->link_changes++; 350 351 if (lio->linfo.link.s.link_up) { 352 netif_carrier_on(netdev); 353 wake_txqs(netdev); 354 } else { 355 netif_carrier_off(netdev); 356 stop_txqs(netdev); 357 } 358 359 if (lio->linfo.link.s.mtu != current_max_mtu) { 360 dev_info(&oct->pci_dev->dev, 361 "Max MTU Changed from %d to %d\n", 362 current_max_mtu, lio->linfo.link.s.mtu); 363 netdev->max_mtu = lio->linfo.link.s.mtu; 364 } 365 366 if (lio->linfo.link.s.mtu < netdev->mtu) { 367 dev_warn(&oct->pci_dev->dev, 368 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 369 netdev->mtu, lio->linfo.link.s.mtu); 370 queue_delayed_work(lio->link_status_wq.wq, 371 &lio->link_status_wq.wk.work, 0); 372 } 373 } 374 } 375 376 /** 377 * liquidio_vf_probe - PCI probe handler 378 * @pdev: PCI device structure 379 * @ent: unused 380 */ 381 static int 382 liquidio_vf_probe(struct pci_dev *pdev, 383 const struct pci_device_id __maybe_unused *ent) 384 { 385 struct octeon_device *oct_dev = NULL; 386 387 oct_dev = octeon_allocate_device(pdev->device, 388 sizeof(struct octeon_device_priv)); 389 390 if (!oct_dev) { 391 dev_err(&pdev->dev, "Unable to allocate device\n"); 392 return -ENOMEM; 393 } 394 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 395 396 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 397 (u32)pdev->vendor, (u32)pdev->device); 398 399 /* Assign octeon_device for this device to the private data area. */ 400 pci_set_drvdata(pdev, oct_dev); 401 402 /* set linux specific device pointer */ 403 oct_dev->pci_dev = pdev; 404 405 oct_dev->subsystem_id = pdev->subsystem_vendor | 406 (pdev->subsystem_device << 16); 407 408 if (octeon_device_init(oct_dev)) { 409 liquidio_vf_remove(pdev); 410 return -ENOMEM; 411 } 412 413 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 414 415 return 0; 416 } 417 418 /** 419 * octeon_pci_flr - PCI FLR for each Octeon device. 420 * @oct: octeon device 421 */ 422 static void octeon_pci_flr(struct octeon_device *oct) 423 { 424 pci_save_state(oct->pci_dev); 425 426 pci_cfg_access_lock(oct->pci_dev); 427 428 /* Quiesce the device completely */ 429 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 430 PCI_COMMAND_INTX_DISABLE); 431 432 pcie_flr(oct->pci_dev); 433 434 pci_cfg_access_unlock(oct->pci_dev); 435 436 pci_restore_state(oct->pci_dev); 437 } 438 439 /** 440 * octeon_destroy_resources - Destroy resources associated with octeon device 441 * @oct: octeon device 442 */ 443 static void octeon_destroy_resources(struct octeon_device *oct) 444 { 445 struct octeon_device_priv *oct_priv = 446 (struct octeon_device_priv *)oct->priv; 447 struct msix_entry *msix_entries; 448 int i; 449 450 switch (atomic_read(&oct->status)) { 451 case OCT_DEV_RUNNING: 452 case OCT_DEV_CORE_OK: 453 /* No more instructions will be forwarded. */ 454 atomic_set(&oct->status, OCT_DEV_IN_RESET); 455 456 oct->app_mode = CVM_DRV_INVALID_APP; 457 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 458 lio_get_state_string(&oct->status)); 459 460 schedule_timeout_uninterruptible(HZ / 10); 461 462 fallthrough; 463 case OCT_DEV_HOST_OK: 464 case OCT_DEV_IO_QUEUES_DONE: 465 if (lio_wait_for_instr_fetch(oct)) 466 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 467 468 if (wait_for_pending_requests(oct)) 469 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 470 471 /* Disable the input and output queues now. No more packets will 472 * arrive from Octeon, but we should wait for all packet 473 * processing to finish. 474 */ 475 oct->fn_list.disable_io_queues(oct); 476 477 if (lio_wait_for_oq_pkts(oct)) 478 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 479 480 /* Force all requests waiting to be fetched by OCTEON to 481 * complete. 482 */ 483 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 484 struct octeon_instr_queue *iq; 485 486 if (!(oct->io_qmask.iq & BIT_ULL(i))) 487 continue; 488 iq = oct->instr_queue[i]; 489 490 if (atomic_read(&iq->instr_pending)) { 491 spin_lock_bh(&iq->lock); 492 iq->fill_cnt = 0; 493 iq->octeon_read_index = iq->host_write_index; 494 iq->stats.instr_processed += 495 atomic_read(&iq->instr_pending); 496 lio_process_iq_request_list(oct, iq, 0); 497 spin_unlock_bh(&iq->lock); 498 } 499 } 500 501 lio_process_ordered_list(oct, 1); 502 octeon_free_sc_done_list(oct); 503 octeon_free_sc_zombie_list(oct); 504 505 fallthrough; 506 case OCT_DEV_INTR_SET_DONE: 507 /* Disable interrupts */ 508 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 509 510 if (oct->msix_on) { 511 msix_entries = (struct msix_entry *)oct->msix_entries; 512 for (i = 0; i < oct->num_msix_irqs; i++) { 513 if (oct->ioq_vector[i].vector) { 514 irq_set_affinity_hint( 515 msix_entries[i].vector, 516 NULL); 517 free_irq(msix_entries[i].vector, 518 &oct->ioq_vector[i]); 519 oct->ioq_vector[i].vector = 0; 520 } 521 } 522 pci_disable_msix(oct->pci_dev); 523 kfree(oct->msix_entries); 524 oct->msix_entries = NULL; 525 kfree(oct->irq_name_storage); 526 oct->irq_name_storage = NULL; 527 } 528 /* Soft reset the octeon device before exiting */ 529 if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE)) 530 octeon_pci_flr(oct); 531 else 532 cn23xx_vf_ask_pf_to_do_flr(oct); 533 534 fallthrough; 535 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 536 octeon_free_ioq_vector(oct); 537 538 fallthrough; 539 case OCT_DEV_MBOX_SETUP_DONE: 540 oct->fn_list.free_mbox(oct); 541 542 fallthrough; 543 case OCT_DEV_IN_RESET: 544 case OCT_DEV_DROQ_INIT_DONE: 545 mdelay(100); 546 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 547 if (!(oct->io_qmask.oq & BIT_ULL(i))) 548 continue; 549 octeon_delete_droq(oct, i); 550 } 551 552 fallthrough; 553 case OCT_DEV_RESP_LIST_INIT_DONE: 554 octeon_delete_response_list(oct); 555 556 fallthrough; 557 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 558 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 559 if (!(oct->io_qmask.iq & BIT_ULL(i))) 560 continue; 561 octeon_delete_instr_queue(oct, i); 562 } 563 564 fallthrough; 565 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 566 octeon_free_sc_buffer_pool(oct); 567 568 fallthrough; 569 case OCT_DEV_DISPATCH_INIT_DONE: 570 octeon_delete_dispatch_list(oct); 571 cancel_delayed_work_sync(&oct->nic_poll_work.work); 572 573 fallthrough; 574 case OCT_DEV_PCI_MAP_DONE: 575 octeon_unmap_pci_barx(oct, 0); 576 octeon_unmap_pci_barx(oct, 1); 577 578 fallthrough; 579 case OCT_DEV_PCI_ENABLE_DONE: 580 /* Disable the device, releasing the PCI INT */ 581 pci_disable_device(oct->pci_dev); 582 583 fallthrough; 584 case OCT_DEV_BEGIN_STATE: 585 /* Nothing to be done here either */ 586 break; 587 } 588 589 tasklet_kill(&oct_priv->droq_tasklet); 590 } 591 592 /** 593 * send_rx_ctrl_cmd - Send Rx control command 594 * @lio: per-network private data 595 * @start_stop: whether to start or stop 596 */ 597 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) 598 { 599 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 600 struct octeon_soft_command *sc; 601 union octnet_cmd *ncmd; 602 int retval; 603 604 if (oct->props[lio->ifidx].rx_on == start_stop) 605 return 0; 606 607 sc = (struct octeon_soft_command *) 608 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 609 16, 0); 610 if (!sc) { 611 netif_info(lio, rx_err, lio->netdev, 612 "Failed to allocate octeon_soft_command struct\n"); 613 return -ENOMEM; 614 } 615 616 ncmd = (union octnet_cmd *)sc->virtdptr; 617 618 ncmd->u64 = 0; 619 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 620 ncmd->s.param1 = start_stop; 621 622 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 623 624 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 625 626 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 627 OPCODE_NIC_CMD, 0, 0, 0); 628 629 init_completion(&sc->complete); 630 sc->sc_status = OCTEON_REQUEST_PENDING; 631 632 retval = octeon_send_soft_command(oct, sc); 633 if (retval == IQ_SEND_FAILED) { 634 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 635 octeon_free_soft_command(oct, sc); 636 } else { 637 /* Sleep on a wait queue till the cond flag indicates that the 638 * response arrived or timed-out. 639 */ 640 retval = wait_for_sc_completion_timeout(oct, sc, 0); 641 if (retval) 642 return retval; 643 644 oct->props[lio->ifidx].rx_on = start_stop; 645 WRITE_ONCE(sc->caller_is_done, true); 646 } 647 648 return retval; 649 } 650 651 /** 652 * liquidio_destroy_nic_device - Destroy NIC device interface 653 * @oct: octeon device 654 * @ifidx: which interface to destroy 655 * 656 * Cleanup associated with each interface for an Octeon device when NIC 657 * module is being unloaded or if initialization fails during load. 658 */ 659 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 660 { 661 struct net_device *netdev = oct->props[ifidx].netdev; 662 struct octeon_device_priv *oct_priv = 663 (struct octeon_device_priv *)oct->priv; 664 struct napi_struct *napi, *n; 665 struct lio *lio; 666 667 if (!netdev) { 668 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 669 __func__, ifidx); 670 return; 671 } 672 673 lio = GET_LIO(netdev); 674 675 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 676 677 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 678 liquidio_stop(netdev); 679 680 if (oct->props[lio->ifidx].napi_enabled == 1) { 681 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 682 napi_disable(napi); 683 684 oct->props[lio->ifidx].napi_enabled = 0; 685 686 oct->droq[0]->ops.poll_mode = 0; 687 } 688 689 /* Delete NAPI */ 690 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 691 netif_napi_del(napi); 692 693 tasklet_enable(&oct_priv->droq_tasklet); 694 695 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 696 unregister_netdev(netdev); 697 698 cleanup_rx_oom_poll_fn(netdev); 699 700 cleanup_link_status_change_wq(netdev); 701 702 lio_delete_glists(lio); 703 704 free_netdev(netdev); 705 706 oct->props[ifidx].gmxport = -1; 707 708 oct->props[ifidx].netdev = NULL; 709 } 710 711 /** 712 * liquidio_stop_nic_module - Stop complete NIC functionality 713 * @oct: octeon device 714 */ 715 static int liquidio_stop_nic_module(struct octeon_device *oct) 716 { 717 struct lio *lio; 718 int i, j; 719 720 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 721 if (!oct->ifcount) { 722 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 723 return 1; 724 } 725 726 spin_lock_bh(&oct->cmd_resp_wqlock); 727 oct->cmd_resp_state = OCT_DRV_OFFLINE; 728 spin_unlock_bh(&oct->cmd_resp_wqlock); 729 730 for (i = 0; i < oct->ifcount; i++) { 731 lio = GET_LIO(oct->props[i].netdev); 732 for (j = 0; j < oct->num_oqs; j++) 733 octeon_unregister_droq_ops(oct, 734 lio->linfo.rxpciq[j].s.q_no); 735 } 736 737 for (i = 0; i < oct->ifcount; i++) 738 liquidio_destroy_nic_device(oct, i); 739 740 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 741 return 0; 742 } 743 744 /** 745 * liquidio_vf_remove - Cleans up resources at unload time 746 * @pdev: PCI device structure 747 */ 748 static void liquidio_vf_remove(struct pci_dev *pdev) 749 { 750 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 751 752 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 753 754 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 755 liquidio_stop_nic_module(oct_dev); 756 757 /* Reset the octeon device and cleanup all memory allocated for 758 * the octeon device by driver. 759 */ 760 octeon_destroy_resources(oct_dev); 761 762 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 763 764 /* This octeon device has been removed. Update the global 765 * data structure to reflect this. Free the device structure. 766 */ 767 octeon_free_device_mem(oct_dev); 768 } 769 770 /** 771 * octeon_pci_os_setup - PCI initialization for each Octeon device. 772 * @oct: octeon device 773 */ 774 static int octeon_pci_os_setup(struct octeon_device *oct) 775 { 776 #ifdef CONFIG_PCI_IOV 777 /* setup PCI stuff first */ 778 if (!oct->pci_dev->physfn) 779 octeon_pci_flr(oct); 780 #endif 781 782 if (pci_enable_device(oct->pci_dev)) { 783 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 784 return 1; 785 } 786 787 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 788 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 789 pci_disable_device(oct->pci_dev); 790 return 1; 791 } 792 793 /* Enable PCI DMA Master. */ 794 pci_set_master(oct->pci_dev); 795 796 return 0; 797 } 798 799 /** 800 * free_netbuf - Unmap and free network buffer 801 * @buf: buffer 802 */ 803 static void free_netbuf(void *buf) 804 { 805 struct octnet_buf_free_info *finfo; 806 struct sk_buff *skb; 807 struct lio *lio; 808 809 finfo = (struct octnet_buf_free_info *)buf; 810 skb = finfo->skb; 811 lio = finfo->lio; 812 813 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 814 DMA_TO_DEVICE); 815 816 tx_buffer_free(skb); 817 } 818 819 /** 820 * free_netsgbuf - Unmap and free gather buffer 821 * @buf: buffer 822 */ 823 static void free_netsgbuf(void *buf) 824 { 825 struct octnet_buf_free_info *finfo; 826 struct octnic_gather *g; 827 struct sk_buff *skb; 828 int i, frags, iq; 829 struct lio *lio; 830 831 finfo = (struct octnet_buf_free_info *)buf; 832 skb = finfo->skb; 833 lio = finfo->lio; 834 g = finfo->g; 835 frags = skb_shinfo(skb)->nr_frags; 836 837 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 838 g->sg[0].ptr[0], (skb->len - skb->data_len), 839 DMA_TO_DEVICE); 840 841 i = 1; 842 while (frags--) { 843 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 844 845 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 846 g->sg[(i >> 2)].ptr[(i & 3)], 847 skb_frag_size(frag), DMA_TO_DEVICE); 848 i++; 849 } 850 851 iq = skb_iq(lio->oct_dev, skb); 852 853 spin_lock(&lio->glist_lock[iq]); 854 list_add_tail(&g->list, &lio->glist[iq]); 855 spin_unlock(&lio->glist_lock[iq]); 856 857 tx_buffer_free(skb); 858 } 859 860 /** 861 * free_netsgbuf_with_resp - Unmap and free gather buffer with response 862 * @buf: buffer 863 */ 864 static void free_netsgbuf_with_resp(void *buf) 865 { 866 struct octnet_buf_free_info *finfo; 867 struct octeon_soft_command *sc; 868 struct octnic_gather *g; 869 struct sk_buff *skb; 870 int i, frags, iq; 871 struct lio *lio; 872 873 sc = (struct octeon_soft_command *)buf; 874 skb = (struct sk_buff *)sc->callback_arg; 875 finfo = (struct octnet_buf_free_info *)&skb->cb; 876 877 lio = finfo->lio; 878 g = finfo->g; 879 frags = skb_shinfo(skb)->nr_frags; 880 881 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 882 g->sg[0].ptr[0], (skb->len - skb->data_len), 883 DMA_TO_DEVICE); 884 885 i = 1; 886 while (frags--) { 887 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 888 889 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 890 g->sg[(i >> 2)].ptr[(i & 3)], 891 skb_frag_size(frag), DMA_TO_DEVICE); 892 i++; 893 } 894 895 iq = skb_iq(lio->oct_dev, skb); 896 897 spin_lock(&lio->glist_lock[iq]); 898 list_add_tail(&g->list, &lio->glist[iq]); 899 spin_unlock(&lio->glist_lock[iq]); 900 901 /* Don't free the skb yet */ 902 } 903 904 /** 905 * liquidio_open - Net device open for LiquidIO 906 * @netdev: network device 907 */ 908 static int liquidio_open(struct net_device *netdev) 909 { 910 struct lio *lio = GET_LIO(netdev); 911 struct octeon_device *oct = lio->oct_dev; 912 struct octeon_device_priv *oct_priv = 913 (struct octeon_device_priv *)oct->priv; 914 struct napi_struct *napi, *n; 915 int ret = 0; 916 917 if (!oct->props[lio->ifidx].napi_enabled) { 918 tasklet_disable(&oct_priv->droq_tasklet); 919 920 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 921 napi_enable(napi); 922 923 oct->props[lio->ifidx].napi_enabled = 1; 924 925 oct->droq[0]->ops.poll_mode = 1; 926 } 927 928 ifstate_set(lio, LIO_IFSTATE_RUNNING); 929 930 /* Ready for link status updates */ 931 lio->intf_open = 1; 932 933 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 934 start_txqs(netdev); 935 936 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 937 lio->stats_wk.ctxptr = lio; 938 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 939 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 940 941 /* tell Octeon to start forwarding packets to host */ 942 ret = send_rx_ctrl_cmd(lio, 1); 943 if (ret) 944 return ret; 945 946 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 947 948 return ret; 949 } 950 951 /** 952 * liquidio_stop - jNet device stop for LiquidIO 953 * @netdev: network device 954 */ 955 static int liquidio_stop(struct net_device *netdev) 956 { 957 struct lio *lio = GET_LIO(netdev); 958 struct octeon_device *oct = lio->oct_dev; 959 struct octeon_device_priv *oct_priv = 960 (struct octeon_device_priv *)oct->priv; 961 struct napi_struct *napi, *n; 962 int ret = 0; 963 964 /* tell Octeon to stop forwarding packets to host */ 965 ret = send_rx_ctrl_cmd(lio, 0); 966 if (ret) 967 return ret; 968 969 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 970 /* Inform that netif carrier is down */ 971 lio->intf_open = 0; 972 lio->linfo.link.s.link_up = 0; 973 974 netif_carrier_off(netdev); 975 lio->link_changes++; 976 977 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 978 979 stop_txqs(netdev); 980 981 /* Wait for any pending Rx descriptors */ 982 if (lio_wait_for_clean_oq(oct)) 983 netif_info(lio, rx_err, lio->netdev, 984 "Proceeding with stop interface after partial RX desc processing\n"); 985 986 if (oct->props[lio->ifidx].napi_enabled == 1) { 987 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 988 napi_disable(napi); 989 990 oct->props[lio->ifidx].napi_enabled = 0; 991 992 oct->droq[0]->ops.poll_mode = 0; 993 994 tasklet_enable(&oct_priv->droq_tasklet); 995 } 996 997 cancel_delayed_work_sync(&lio->stats_wk.work); 998 999 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1000 1001 return ret; 1002 } 1003 1004 /** 1005 * get_new_flags - Converts a mask based on net device flags 1006 * @netdev: network device 1007 * 1008 * This routine generates a octnet_ifflags mask from the net device flags 1009 * received from the OS. 1010 */ 1011 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1012 { 1013 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1014 1015 if (netdev->flags & IFF_PROMISC) 1016 f |= OCTNET_IFFLAG_PROMISC; 1017 1018 if (netdev->flags & IFF_ALLMULTI) 1019 f |= OCTNET_IFFLAG_ALLMULTI; 1020 1021 if (netdev->flags & IFF_MULTICAST) { 1022 f |= OCTNET_IFFLAG_MULTICAST; 1023 1024 /* Accept all multicast addresses if there are more than we 1025 * can handle 1026 */ 1027 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1028 f |= OCTNET_IFFLAG_ALLMULTI; 1029 } 1030 1031 if (netdev->flags & IFF_BROADCAST) 1032 f |= OCTNET_IFFLAG_BROADCAST; 1033 1034 return f; 1035 } 1036 1037 static void liquidio_set_uc_list(struct net_device *netdev) 1038 { 1039 struct lio *lio = GET_LIO(netdev); 1040 struct octeon_device *oct = lio->oct_dev; 1041 struct octnic_ctrl_pkt nctrl; 1042 struct netdev_hw_addr *ha; 1043 u64 *mac; 1044 1045 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1046 return; 1047 1048 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1049 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1050 return; 1051 } 1052 1053 lio->netdev_uc_count = netdev_uc_count(netdev); 1054 1055 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1056 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1057 nctrl.ncmd.s.more = lio->netdev_uc_count; 1058 nctrl.ncmd.s.param1 = oct->vf_num; 1059 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1060 nctrl.netpndev = (u64)netdev; 1061 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1062 1063 /* copy all the addresses into the udd */ 1064 mac = &nctrl.udd[0]; 1065 netdev_for_each_uc_addr(ha, netdev) { 1066 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1067 mac++; 1068 } 1069 1070 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1071 } 1072 1073 /** 1074 * liquidio_set_mcast_list - Net device set_multicast_list 1075 * @netdev: network device 1076 */ 1077 static void liquidio_set_mcast_list(struct net_device *netdev) 1078 { 1079 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1080 struct lio *lio = GET_LIO(netdev); 1081 struct octeon_device *oct = lio->oct_dev; 1082 struct octnic_ctrl_pkt nctrl; 1083 struct netdev_hw_addr *ha; 1084 u64 *mc; 1085 int ret; 1086 1087 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1088 1089 /* Create a ctrl pkt command to be sent to core app. */ 1090 nctrl.ncmd.u64 = 0; 1091 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1092 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1093 nctrl.ncmd.s.param2 = mc_count; 1094 nctrl.ncmd.s.more = mc_count; 1095 nctrl.netpndev = (u64)netdev; 1096 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1097 1098 /* copy all the addresses into the udd */ 1099 mc = &nctrl.udd[0]; 1100 netdev_for_each_mc_addr(ha, netdev) { 1101 *mc = 0; 1102 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1103 /* no need to swap bytes */ 1104 if (++mc > &nctrl.udd[mc_count]) 1105 break; 1106 } 1107 1108 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1109 1110 /* Apparently, any activity in this call from the kernel has to 1111 * be atomic. So we won't wait for response. 1112 */ 1113 1114 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1115 if (ret) { 1116 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1117 ret); 1118 } 1119 1120 liquidio_set_uc_list(netdev); 1121 } 1122 1123 /** 1124 * liquidio_set_mac - Net device set_mac_address 1125 * @netdev: network device 1126 * @p: opaque pointer to sockaddr 1127 */ 1128 static int liquidio_set_mac(struct net_device *netdev, void *p) 1129 { 1130 struct sockaddr *addr = (struct sockaddr *)p; 1131 struct lio *lio = GET_LIO(netdev); 1132 struct octeon_device *oct = lio->oct_dev; 1133 struct octnic_ctrl_pkt nctrl; 1134 int ret = 0; 1135 1136 if (!is_valid_ether_addr(addr->sa_data)) 1137 return -EADDRNOTAVAIL; 1138 1139 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1140 return 0; 1141 1142 if (lio->linfo.macaddr_is_admin_asgnd) 1143 return -EPERM; 1144 1145 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1146 1147 nctrl.ncmd.u64 = 0; 1148 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1149 nctrl.ncmd.s.param1 = 0; 1150 nctrl.ncmd.s.more = 1; 1151 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1152 nctrl.netpndev = (u64)netdev; 1153 1154 nctrl.udd[0] = 0; 1155 /* The MAC Address is presented in network byte order. */ 1156 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1157 1158 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1159 if (ret < 0) { 1160 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1161 return -ENOMEM; 1162 } 1163 1164 if (nctrl.sc_status == 1165 FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { 1166 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); 1167 return -EPERM; 1168 } 1169 1170 eth_hw_addr_set(netdev, addr->sa_data); 1171 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1172 1173 return 0; 1174 } 1175 1176 static void 1177 liquidio_get_stats64(struct net_device *netdev, 1178 struct rtnl_link_stats64 *lstats) 1179 { 1180 struct lio *lio = GET_LIO(netdev); 1181 struct octeon_device *oct; 1182 u64 pkts = 0, drop = 0, bytes = 0; 1183 struct oct_droq_stats *oq_stats; 1184 struct oct_iq_stats *iq_stats; 1185 int i, iq_no, oq_no; 1186 1187 oct = lio->oct_dev; 1188 1189 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1190 return; 1191 1192 for (i = 0; i < oct->num_iqs; i++) { 1193 iq_no = lio->linfo.txpciq[i].s.q_no; 1194 iq_stats = &oct->instr_queue[iq_no]->stats; 1195 pkts += iq_stats->tx_done; 1196 drop += iq_stats->tx_dropped; 1197 bytes += iq_stats->tx_tot_bytes; 1198 } 1199 1200 lstats->tx_packets = pkts; 1201 lstats->tx_bytes = bytes; 1202 lstats->tx_dropped = drop; 1203 1204 pkts = 0; 1205 drop = 0; 1206 bytes = 0; 1207 1208 for (i = 0; i < oct->num_oqs; i++) { 1209 oq_no = lio->linfo.rxpciq[i].s.q_no; 1210 oq_stats = &oct->droq[oq_no]->stats; 1211 pkts += oq_stats->rx_pkts_received; 1212 drop += (oq_stats->rx_dropped + 1213 oq_stats->dropped_nodispatch + 1214 oq_stats->dropped_toomany + 1215 oq_stats->dropped_nomem); 1216 bytes += oq_stats->rx_bytes_received; 1217 } 1218 1219 lstats->rx_bytes = bytes; 1220 lstats->rx_packets = pkts; 1221 lstats->rx_dropped = drop; 1222 1223 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 1224 1225 /* detailed rx_errors: */ 1226 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 1227 /* recved pkt with crc error */ 1228 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 1229 /* recv'd frame alignment error */ 1230 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 1231 1232 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 1233 lstats->rx_frame_errors; 1234 1235 /* detailed tx_errors */ 1236 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 1237 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 1238 1239 lstats->tx_errors = lstats->tx_aborted_errors + 1240 lstats->tx_carrier_errors; 1241 } 1242 1243 /** 1244 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl 1245 * @netdev: network device 1246 * @ifr: interface request 1247 */ 1248 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 1249 { 1250 struct lio *lio = GET_LIO(netdev); 1251 struct hwtstamp_config conf; 1252 1253 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 1254 return -EFAULT; 1255 1256 switch (conf.tx_type) { 1257 case HWTSTAMP_TX_ON: 1258 case HWTSTAMP_TX_OFF: 1259 break; 1260 default: 1261 return -ERANGE; 1262 } 1263 1264 switch (conf.rx_filter) { 1265 case HWTSTAMP_FILTER_NONE: 1266 break; 1267 case HWTSTAMP_FILTER_ALL: 1268 case HWTSTAMP_FILTER_SOME: 1269 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1270 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1271 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1272 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1273 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1274 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1275 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1276 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1277 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1278 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1279 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1280 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1281 case HWTSTAMP_FILTER_NTP_ALL: 1282 conf.rx_filter = HWTSTAMP_FILTER_ALL; 1283 break; 1284 default: 1285 return -ERANGE; 1286 } 1287 1288 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 1289 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1290 1291 else 1292 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1293 1294 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 1295 } 1296 1297 /** 1298 * liquidio_ioctl - ioctl handler 1299 * @netdev: network device 1300 * @ifr: interface request 1301 * @cmd: command 1302 */ 1303 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1304 { 1305 switch (cmd) { 1306 case SIOCSHWTSTAMP: 1307 return hwtstamp_ioctl(netdev, ifr); 1308 default: 1309 return -EOPNOTSUPP; 1310 } 1311 } 1312 1313 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 1314 { 1315 struct sk_buff *skb = (struct sk_buff *)buf; 1316 struct octnet_buf_free_info *finfo; 1317 struct oct_timestamp_resp *resp; 1318 struct octeon_soft_command *sc; 1319 struct lio *lio; 1320 1321 finfo = (struct octnet_buf_free_info *)skb->cb; 1322 lio = finfo->lio; 1323 sc = finfo->sc; 1324 oct = lio->oct_dev; 1325 resp = (struct oct_timestamp_resp *)sc->virtrptr; 1326 1327 if (status != OCTEON_REQUEST_DONE) { 1328 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 1329 CVM_CAST64(status)); 1330 resp->timestamp = 0; 1331 } 1332 1333 octeon_swap_8B_data(&resp->timestamp, 1); 1334 1335 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 1336 struct skb_shared_hwtstamps ts; 1337 u64 ns = resp->timestamp; 1338 1339 netif_info(lio, tx_done, lio->netdev, 1340 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 1341 skb, (unsigned long long)ns); 1342 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 1343 skb_tstamp_tx(skb, &ts); 1344 } 1345 1346 octeon_free_soft_command(oct, sc); 1347 tx_buffer_free(skb); 1348 } 1349 1350 /* send_nic_timestamp_pkt - Send a data packet that will be timestamped 1351 * @oct: octeon device 1352 * @ndata: pointer to network data 1353 * @finfo: pointer to private network data 1354 */ 1355 static int send_nic_timestamp_pkt(struct octeon_device *oct, 1356 struct octnic_data_pkt *ndata, 1357 struct octnet_buf_free_info *finfo, 1358 int xmit_more) 1359 { 1360 struct octeon_soft_command *sc; 1361 int ring_doorbell; 1362 struct lio *lio; 1363 int retval; 1364 u32 len; 1365 1366 lio = finfo->lio; 1367 1368 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 1369 sizeof(struct oct_timestamp_resp)); 1370 finfo->sc = sc; 1371 1372 if (!sc) { 1373 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 1374 return IQ_SEND_FAILED; 1375 } 1376 1377 if (ndata->reqtype == REQTYPE_NORESP_NET) 1378 ndata->reqtype = REQTYPE_RESP_NET; 1379 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 1380 ndata->reqtype = REQTYPE_RESP_NET_SG; 1381 1382 sc->callback = handle_timestamp; 1383 sc->callback_arg = finfo->skb; 1384 sc->iq_no = ndata->q_no; 1385 1386 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 1387 1388 ring_doorbell = !xmit_more; 1389 1390 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 1391 sc, len, ndata->reqtype); 1392 1393 if (retval == IQ_SEND_FAILED) { 1394 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 1395 retval); 1396 octeon_free_soft_command(oct, sc); 1397 } else { 1398 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 1399 } 1400 1401 return retval; 1402 } 1403 1404 /** 1405 * liquidio_xmit - Transmit networks packets to the Octeon interface 1406 * @skb: skbuff struct to be passed to network layer. 1407 * @netdev: pointer to network device 1408 * @returns whether the packet was transmitted to the device okay or not 1409 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 1410 */ 1411 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 1412 { 1413 struct octnet_buf_free_info *finfo; 1414 union octnic_cmd_setup cmdsetup; 1415 struct octnic_data_pkt ndata; 1416 struct octeon_instr_irh *irh; 1417 struct oct_iq_stats *stats; 1418 struct octeon_device *oct; 1419 int q_idx = 0, iq_no = 0; 1420 union tx_info *tx_info; 1421 int xmit_more = 0; 1422 struct lio *lio; 1423 int status = 0; 1424 u64 dptr = 0; 1425 u32 tag = 0; 1426 int j; 1427 1428 lio = GET_LIO(netdev); 1429 oct = lio->oct_dev; 1430 1431 q_idx = skb_iq(lio->oct_dev, skb); 1432 tag = q_idx; 1433 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 1434 1435 stats = &oct->instr_queue[iq_no]->stats; 1436 1437 /* Check for all conditions in which the current packet cannot be 1438 * transmitted. 1439 */ 1440 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 1441 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 1442 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 1443 lio->linfo.link.s.link_up); 1444 goto lio_xmit_failed; 1445 } 1446 1447 /* Use space in skb->cb to store info used to unmap and 1448 * free the buffers. 1449 */ 1450 finfo = (struct octnet_buf_free_info *)skb->cb; 1451 finfo->lio = lio; 1452 finfo->skb = skb; 1453 finfo->sc = NULL; 1454 1455 /* Prepare the attributes for the data to be passed to OSI. */ 1456 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 1457 1458 ndata.buf = finfo; 1459 1460 ndata.q_no = iq_no; 1461 1462 if (octnet_iq_is_full(oct, ndata.q_no)) { 1463 /* defer sending if queue is full */ 1464 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 1465 ndata.q_no); 1466 stats->tx_iq_busy++; 1467 return NETDEV_TX_BUSY; 1468 } 1469 1470 ndata.datasize = skb->len; 1471 1472 cmdsetup.u64 = 0; 1473 cmdsetup.s.iq_no = iq_no; 1474 1475 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1476 if (skb->encapsulation) { 1477 cmdsetup.s.tnl_csum = 1; 1478 stats->tx_vxlan++; 1479 } else { 1480 cmdsetup.s.transport_csum = 1; 1481 } 1482 } 1483 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1484 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1485 cmdsetup.s.timestamp = 1; 1486 } 1487 1488 if (!skb_shinfo(skb)->nr_frags) { 1489 cmdsetup.s.u.datasize = skb->len; 1490 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1491 /* Offload checksum calculation for TCP/UDP packets */ 1492 dptr = dma_map_single(&oct->pci_dev->dev, 1493 skb->data, 1494 skb->len, 1495 DMA_TO_DEVICE); 1496 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 1497 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 1498 __func__); 1499 return NETDEV_TX_BUSY; 1500 } 1501 1502 ndata.cmd.cmd3.dptr = dptr; 1503 finfo->dptr = dptr; 1504 ndata.reqtype = REQTYPE_NORESP_NET; 1505 1506 } else { 1507 skb_frag_t *frag; 1508 struct octnic_gather *g; 1509 int i, frags; 1510 1511 spin_lock(&lio->glist_lock[q_idx]); 1512 g = (struct octnic_gather *) 1513 lio_list_delete_head(&lio->glist[q_idx]); 1514 spin_unlock(&lio->glist_lock[q_idx]); 1515 1516 if (!g) { 1517 netif_info(lio, tx_err, lio->netdev, 1518 "Transmit scatter gather: glist null!\n"); 1519 goto lio_xmit_failed; 1520 } 1521 1522 cmdsetup.s.gather = 1; 1523 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 1524 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1525 1526 memset(g->sg, 0, g->sg_size); 1527 1528 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 1529 skb->data, 1530 (skb->len - skb->data_len), 1531 DMA_TO_DEVICE); 1532 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 1533 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 1534 __func__); 1535 return NETDEV_TX_BUSY; 1536 } 1537 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 1538 1539 frags = skb_shinfo(skb)->nr_frags; 1540 i = 1; 1541 while (frags--) { 1542 frag = &skb_shinfo(skb)->frags[i - 1]; 1543 1544 g->sg[(i >> 2)].ptr[(i & 3)] = 1545 skb_frag_dma_map(&oct->pci_dev->dev, 1546 frag, 0, skb_frag_size(frag), 1547 DMA_TO_DEVICE); 1548 if (dma_mapping_error(&oct->pci_dev->dev, 1549 g->sg[i >> 2].ptr[i & 3])) { 1550 dma_unmap_single(&oct->pci_dev->dev, 1551 g->sg[0].ptr[0], 1552 skb->len - skb->data_len, 1553 DMA_TO_DEVICE); 1554 for (j = 1; j < i; j++) { 1555 frag = &skb_shinfo(skb)->frags[j - 1]; 1556 dma_unmap_page(&oct->pci_dev->dev, 1557 g->sg[j >> 2].ptr[j & 3], 1558 skb_frag_size(frag), 1559 DMA_TO_DEVICE); 1560 } 1561 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 1562 __func__); 1563 return NETDEV_TX_BUSY; 1564 } 1565 1566 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 1567 (i & 3)); 1568 i++; 1569 } 1570 1571 dptr = g->sg_dma_ptr; 1572 1573 ndata.cmd.cmd3.dptr = dptr; 1574 finfo->dptr = dptr; 1575 finfo->g = g; 1576 1577 ndata.reqtype = REQTYPE_NORESP_NET_SG; 1578 } 1579 1580 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 1581 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 1582 1583 if (skb_shinfo(skb)->gso_size) { 1584 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 1585 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 1586 } 1587 1588 /* HW insert VLAN tag */ 1589 if (skb_vlan_tag_present(skb)) { 1590 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 1591 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 1592 } 1593 1594 xmit_more = netdev_xmit_more(); 1595 1596 if (unlikely(cmdsetup.s.timestamp)) 1597 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 1598 else 1599 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 1600 if (status == IQ_SEND_FAILED) 1601 goto lio_xmit_failed; 1602 1603 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 1604 1605 if (status == IQ_SEND_STOP) { 1606 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 1607 iq_no); 1608 netif_stop_subqueue(netdev, q_idx); 1609 } 1610 1611 netif_trans_update(netdev); 1612 1613 if (tx_info->s.gso_segs) 1614 stats->tx_done += tx_info->s.gso_segs; 1615 else 1616 stats->tx_done++; 1617 stats->tx_tot_bytes += ndata.datasize; 1618 1619 return NETDEV_TX_OK; 1620 1621 lio_xmit_failed: 1622 stats->tx_dropped++; 1623 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 1624 iq_no, stats->tx_dropped); 1625 if (dptr) 1626 dma_unmap_single(&oct->pci_dev->dev, dptr, 1627 ndata.datasize, DMA_TO_DEVICE); 1628 1629 octeon_ring_doorbell_locked(oct, iq_no); 1630 1631 tx_buffer_free(skb); 1632 return NETDEV_TX_OK; 1633 } 1634 1635 /** 1636 * liquidio_tx_timeout - Network device Tx timeout 1637 * @netdev: pointer to network device 1638 * @txqueue: index of the hung transmit queue 1639 */ 1640 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1641 { 1642 struct lio *lio; 1643 1644 lio = GET_LIO(netdev); 1645 1646 netif_info(lio, tx_err, lio->netdev, 1647 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 1648 netdev->stats.tx_dropped); 1649 netif_trans_update(netdev); 1650 wake_txqs(netdev); 1651 } 1652 1653 static int 1654 liquidio_vlan_rx_add_vid(struct net_device *netdev, 1655 __be16 proto __attribute__((unused)), u16 vid) 1656 { 1657 struct lio *lio = GET_LIO(netdev); 1658 struct octeon_device *oct = lio->oct_dev; 1659 struct octnic_ctrl_pkt nctrl; 1660 int ret = 0; 1661 1662 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1663 1664 nctrl.ncmd.u64 = 0; 1665 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 1666 nctrl.ncmd.s.param1 = vid; 1667 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1668 nctrl.netpndev = (u64)netdev; 1669 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1670 1671 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1672 if (ret) { 1673 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 1674 ret); 1675 return -EPERM; 1676 } 1677 1678 return 0; 1679 } 1680 1681 static int 1682 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 1683 __be16 proto __attribute__((unused)), u16 vid) 1684 { 1685 struct lio *lio = GET_LIO(netdev); 1686 struct octeon_device *oct = lio->oct_dev; 1687 struct octnic_ctrl_pkt nctrl; 1688 int ret = 0; 1689 1690 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1691 1692 nctrl.ncmd.u64 = 0; 1693 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 1694 nctrl.ncmd.s.param1 = vid; 1695 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1696 nctrl.netpndev = (u64)netdev; 1697 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1698 1699 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1700 if (ret) { 1701 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 1702 ret); 1703 if (ret > 0) 1704 ret = -EIO; 1705 } 1706 return ret; 1707 } 1708 1709 /** Sending command to enable/disable RX checksum offload 1710 * @param netdev pointer to network device 1711 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 1712 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 1713 * OCTNET_CMD_RXCSUM_DISABLE 1714 * @returns SUCCESS or FAILURE 1715 */ 1716 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 1717 u8 rx_cmd) 1718 { 1719 struct lio *lio = GET_LIO(netdev); 1720 struct octeon_device *oct = lio->oct_dev; 1721 struct octnic_ctrl_pkt nctrl; 1722 int ret = 0; 1723 1724 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1725 1726 nctrl.ncmd.u64 = 0; 1727 nctrl.ncmd.s.cmd = command; 1728 nctrl.ncmd.s.param1 = rx_cmd; 1729 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1730 nctrl.netpndev = (u64)netdev; 1731 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1732 1733 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1734 if (ret) { 1735 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 1736 ret); 1737 if (ret > 0) 1738 ret = -EIO; 1739 } 1740 return ret; 1741 } 1742 1743 /** Sending command to add/delete VxLAN UDP port to firmware 1744 * @param netdev pointer to network device 1745 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 1746 * @param vxlan_port VxLAN port to be added or deleted 1747 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 1748 * OCTNET_CMD_VXLAN_PORT_DEL 1749 * @returns SUCCESS or FAILURE 1750 */ 1751 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 1752 u16 vxlan_port, u8 vxlan_cmd_bit) 1753 { 1754 struct lio *lio = GET_LIO(netdev); 1755 struct octeon_device *oct = lio->oct_dev; 1756 struct octnic_ctrl_pkt nctrl; 1757 int ret = 0; 1758 1759 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1760 1761 nctrl.ncmd.u64 = 0; 1762 nctrl.ncmd.s.cmd = command; 1763 nctrl.ncmd.s.more = vxlan_cmd_bit; 1764 nctrl.ncmd.s.param1 = vxlan_port; 1765 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1766 nctrl.netpndev = (u64)netdev; 1767 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1768 1769 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1770 if (ret) { 1771 dev_err(&oct->pci_dev->dev, 1772 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 1773 ret); 1774 if (ret > 0) 1775 ret = -EIO; 1776 } 1777 return ret; 1778 } 1779 1780 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 1781 unsigned int table, unsigned int entry, 1782 struct udp_tunnel_info *ti) 1783 { 1784 return liquidio_vxlan_port_command(netdev, 1785 OCTNET_CMD_VXLAN_PORT_CONFIG, 1786 htons(ti->port), 1787 OCTNET_CMD_VXLAN_PORT_ADD); 1788 } 1789 1790 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 1791 unsigned int table, 1792 unsigned int entry, 1793 struct udp_tunnel_info *ti) 1794 { 1795 return liquidio_vxlan_port_command(netdev, 1796 OCTNET_CMD_VXLAN_PORT_CONFIG, 1797 htons(ti->port), 1798 OCTNET_CMD_VXLAN_PORT_DEL); 1799 } 1800 1801 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 1802 .set_port = liquidio_udp_tunnel_set_port, 1803 .unset_port = liquidio_udp_tunnel_unset_port, 1804 .tables = { 1805 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 1806 }, 1807 }; 1808 1809 /** \brief Net device fix features 1810 * @param netdev pointer to network device 1811 * @param request features requested 1812 * @returns updated features list 1813 */ 1814 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 1815 netdev_features_t request) 1816 { 1817 struct lio *lio = netdev_priv(netdev); 1818 1819 if ((request & NETIF_F_RXCSUM) && 1820 !(lio->dev_capability & NETIF_F_RXCSUM)) 1821 request &= ~NETIF_F_RXCSUM; 1822 1823 if ((request & NETIF_F_HW_CSUM) && 1824 !(lio->dev_capability & NETIF_F_HW_CSUM)) 1825 request &= ~NETIF_F_HW_CSUM; 1826 1827 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 1828 request &= ~NETIF_F_TSO; 1829 1830 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 1831 request &= ~NETIF_F_TSO6; 1832 1833 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 1834 request &= ~NETIF_F_LRO; 1835 1836 /* Disable LRO if RXCSUM is off */ 1837 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 1838 (lio->dev_capability & NETIF_F_LRO)) 1839 request &= ~NETIF_F_LRO; 1840 1841 return request; 1842 } 1843 1844 /** \brief Net device set features 1845 * @param netdev pointer to network device 1846 * @param features features to enable/disable 1847 */ 1848 static int liquidio_set_features(struct net_device *netdev, 1849 netdev_features_t features) 1850 { 1851 struct lio *lio = netdev_priv(netdev); 1852 1853 if (!((netdev->features ^ features) & NETIF_F_LRO)) 1854 return 0; 1855 1856 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 1857 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 1858 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1859 else if (!(features & NETIF_F_LRO) && 1860 (lio->dev_capability & NETIF_F_LRO)) 1861 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 1862 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1863 if (!(netdev->features & NETIF_F_RXCSUM) && 1864 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1865 (features & NETIF_F_RXCSUM)) 1866 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1867 OCTNET_CMD_RXCSUM_ENABLE); 1868 else if ((netdev->features & NETIF_F_RXCSUM) && 1869 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1870 !(features & NETIF_F_RXCSUM)) 1871 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1872 OCTNET_CMD_RXCSUM_DISABLE); 1873 1874 return 0; 1875 } 1876 1877 static const struct net_device_ops lionetdevops = { 1878 .ndo_open = liquidio_open, 1879 .ndo_stop = liquidio_stop, 1880 .ndo_start_xmit = liquidio_xmit, 1881 .ndo_get_stats64 = liquidio_get_stats64, 1882 .ndo_set_mac_address = liquidio_set_mac, 1883 .ndo_set_rx_mode = liquidio_set_mcast_list, 1884 .ndo_tx_timeout = liquidio_tx_timeout, 1885 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 1886 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 1887 .ndo_change_mtu = liquidio_change_mtu, 1888 .ndo_eth_ioctl = liquidio_ioctl, 1889 .ndo_fix_features = liquidio_fix_features, 1890 .ndo_set_features = liquidio_set_features, 1891 }; 1892 1893 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 1894 { 1895 struct octeon_device *oct = (struct octeon_device *)buf; 1896 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 1897 union oct_link_status *ls; 1898 int gmxport = 0; 1899 int i; 1900 1901 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 1902 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 1903 recv_pkt->buffer_size[0], 1904 recv_pkt->rh.r_nic_info.gmxport); 1905 goto nic_info_err; 1906 } 1907 1908 gmxport = recv_pkt->rh.r_nic_info.gmxport; 1909 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 1910 OCT_DROQ_INFO_SIZE); 1911 1912 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 1913 1914 for (i = 0; i < oct->ifcount; i++) { 1915 if (oct->props[i].gmxport == gmxport) { 1916 update_link_status(oct->props[i].netdev, ls); 1917 break; 1918 } 1919 } 1920 1921 nic_info_err: 1922 for (i = 0; i < recv_pkt->buffer_count; i++) 1923 recv_buffer_free(recv_pkt->buffer_ptr[i]); 1924 octeon_free_recv_info(recv_info); 1925 return 0; 1926 } 1927 1928 /** 1929 * setup_nic_devices - Setup network interfaces 1930 * @octeon_dev: octeon device 1931 * 1932 * Called during init time for each device. It assumes the NIC 1933 * is already up and running. The link information for each 1934 * interface is passed in link_info. 1935 */ 1936 static int setup_nic_devices(struct octeon_device *octeon_dev) 1937 { 1938 int retval, num_iqueues, num_oqueues; 1939 u32 resp_size, data_size; 1940 struct liquidio_if_cfg_resp *resp; 1941 struct octeon_soft_command *sc; 1942 union oct_nic_if_cfg if_cfg; 1943 struct octdev_props *props; 1944 struct net_device *netdev; 1945 struct lio_version *vdata; 1946 struct lio *lio = NULL; 1947 u8 mac[ETH_ALEN], i, j; 1948 u32 ifidx_or_pfnum; 1949 1950 ifidx_or_pfnum = octeon_dev->pf_num; 1951 1952 /* This is to handle link status changes */ 1953 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 1954 lio_nic_info, octeon_dev); 1955 1956 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 1957 * They are handled directly. 1958 */ 1959 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 1960 free_netbuf); 1961 1962 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 1963 free_netsgbuf); 1964 1965 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 1966 free_netsgbuf_with_resp); 1967 1968 for (i = 0; i < octeon_dev->ifcount; i++) { 1969 resp_size = sizeof(struct liquidio_if_cfg_resp); 1970 data_size = sizeof(struct lio_version); 1971 sc = (struct octeon_soft_command *) 1972 octeon_alloc_soft_command(octeon_dev, data_size, 1973 resp_size, 0); 1974 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1975 vdata = (struct lio_version *)sc->virtdptr; 1976 1977 *((u64 *)vdata) = 0; 1978 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1979 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1980 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1981 1982 if_cfg.u64 = 0; 1983 1984 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 1985 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 1986 if_cfg.s.base_queue = 0; 1987 1988 sc->iq_no = 0; 1989 1990 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 1991 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 1992 0); 1993 1994 init_completion(&sc->complete); 1995 sc->sc_status = OCTEON_REQUEST_PENDING; 1996 1997 retval = octeon_send_soft_command(octeon_dev, sc); 1998 if (retval == IQ_SEND_FAILED) { 1999 dev_err(&octeon_dev->pci_dev->dev, 2000 "iq/oq config failed status: %x\n", retval); 2001 /* Soft instr is freed by driver in case of failure. */ 2002 octeon_free_soft_command(octeon_dev, sc); 2003 return(-EIO); 2004 } 2005 2006 /* Sleep on a wait queue till the cond flag indicates that the 2007 * response arrived or timed-out. 2008 */ 2009 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 2010 if (retval) 2011 return retval; 2012 2013 retval = resp->status; 2014 if (retval) { 2015 dev_err(&octeon_dev->pci_dev->dev, 2016 "iq/oq config failed, retval = %d\n", retval); 2017 WRITE_ONCE(sc->caller_is_done, true); 2018 return -EIO; 2019 } 2020 2021 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 2022 32, "%s", 2023 resp->cfg_info.liquidio_firmware_version); 2024 2025 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2026 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2027 2028 num_iqueues = hweight64(resp->cfg_info.iqmask); 2029 num_oqueues = hweight64(resp->cfg_info.oqmask); 2030 2031 if (!(num_iqueues) || !(num_oqueues)) { 2032 dev_err(&octeon_dev->pci_dev->dev, 2033 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2034 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2035 WRITE_ONCE(sc->caller_is_done, true); 2036 goto setup_nic_dev_done; 2037 } 2038 dev_dbg(&octeon_dev->pci_dev->dev, 2039 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2040 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2041 num_iqueues, num_oqueues); 2042 2043 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2044 2045 if (!netdev) { 2046 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2047 WRITE_ONCE(sc->caller_is_done, true); 2048 goto setup_nic_dev_done; 2049 } 2050 2051 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2052 2053 /* Associate the routines that will handle different 2054 * netdev tasks. 2055 */ 2056 netdev->netdev_ops = &lionetdevops; 2057 2058 lio = GET_LIO(netdev); 2059 2060 memset(lio, 0, sizeof(struct lio)); 2061 2062 lio->ifidx = ifidx_or_pfnum; 2063 2064 props = &octeon_dev->props[i]; 2065 props->gmxport = resp->cfg_info.linfo.gmxport; 2066 props->netdev = netdev; 2067 2068 lio->linfo.num_rxpciq = num_oqueues; 2069 lio->linfo.num_txpciq = num_iqueues; 2070 2071 for (j = 0; j < num_oqueues; j++) { 2072 lio->linfo.rxpciq[j].u64 = 2073 resp->cfg_info.linfo.rxpciq[j].u64; 2074 } 2075 for (j = 0; j < num_iqueues; j++) { 2076 lio->linfo.txpciq[j].u64 = 2077 resp->cfg_info.linfo.txpciq[j].u64; 2078 } 2079 2080 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2081 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2082 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2083 lio->linfo.macaddr_is_admin_asgnd = 2084 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2085 lio->linfo.macaddr_spoofchk = 2086 resp->cfg_info.linfo.macaddr_spoofchk; 2087 2088 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2089 2090 lio->dev_capability = NETIF_F_HIGHDMA 2091 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2092 | NETIF_F_SG | NETIF_F_RXCSUM 2093 | NETIF_F_TSO | NETIF_F_TSO6 2094 | NETIF_F_GRO 2095 | NETIF_F_LRO; 2096 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2097 2098 /* Copy of transmit encapsulation capabilities: 2099 * TSO, TSO6, Checksums for this device 2100 */ 2101 lio->enc_dev_capability = NETIF_F_IP_CSUM 2102 | NETIF_F_IPV6_CSUM 2103 | NETIF_F_GSO_UDP_TUNNEL 2104 | NETIF_F_HW_CSUM | NETIF_F_SG 2105 | NETIF_F_RXCSUM 2106 | NETIF_F_TSO | NETIF_F_TSO6 2107 | NETIF_F_LRO; 2108 2109 netdev->hw_enc_features = 2110 (lio->enc_dev_capability & ~NETIF_F_LRO); 2111 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 2112 2113 netdev->vlan_features = lio->dev_capability; 2114 /* Add any unchangeable hw features */ 2115 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2116 NETIF_F_HW_VLAN_CTAG_RX | 2117 NETIF_F_HW_VLAN_CTAG_TX; 2118 2119 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2120 2121 netdev->hw_features = lio->dev_capability; 2122 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 2123 2124 /* MTU range: 68 - 16000 */ 2125 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2126 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2127 2128 WRITE_ONCE(sc->caller_is_done, true); 2129 2130 /* Point to the properties for octeon device to which this 2131 * interface belongs. 2132 */ 2133 lio->oct_dev = octeon_dev; 2134 lio->octprops = props; 2135 lio->netdev = netdev; 2136 2137 dev_dbg(&octeon_dev->pci_dev->dev, 2138 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2139 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2140 2141 /* 64-bit swap required on LE machines */ 2142 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2143 for (j = 0; j < ETH_ALEN; j++) 2144 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2145 2146 /* Copy MAC Address to OS network device structure */ 2147 eth_hw_addr_set(netdev, mac); 2148 2149 if (liquidio_setup_io_queues(octeon_dev, i, 2150 lio->linfo.num_txpciq, 2151 lio->linfo.num_rxpciq)) { 2152 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2153 goto setup_nic_dev_free; 2154 } 2155 2156 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2157 2158 /* For VFs, enable Octeon device interrupts here, 2159 * as this is contingent upon IO queue setup 2160 */ 2161 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2162 OCTEON_ALL_INTR); 2163 2164 /* By default all interfaces on a single Octeon uses the same 2165 * tx and rx queues 2166 */ 2167 lio->txq = lio->linfo.txpciq[0].s.q_no; 2168 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2169 2170 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2171 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2172 2173 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 2174 dev_err(&octeon_dev->pci_dev->dev, 2175 "Gather list allocation failed\n"); 2176 goto setup_nic_dev_free; 2177 } 2178 2179 /* Register ethtool support */ 2180 liquidio_set_ethtool_ops(netdev); 2181 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2182 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2183 else 2184 octeon_dev->priv_flags = 0x0; 2185 2186 if (netdev->features & NETIF_F_LRO) 2187 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2188 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2189 2190 if (setup_link_status_change_wq(netdev)) 2191 goto setup_nic_dev_free; 2192 2193 if (setup_rx_oom_poll_fn(netdev)) 2194 goto setup_nic_dev_free; 2195 2196 /* Register the network device with the OS */ 2197 if (register_netdev(netdev)) { 2198 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 2199 goto setup_nic_dev_free; 2200 } 2201 2202 dev_dbg(&octeon_dev->pci_dev->dev, 2203 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 2204 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2205 netif_carrier_off(netdev); 2206 lio->link_changes++; 2207 2208 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 2209 2210 /* Sending command to firmware to enable Rx checksum offload 2211 * by default at the time of setup of Liquidio driver for 2212 * this device 2213 */ 2214 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2215 OCTNET_CMD_RXCSUM_ENABLE); 2216 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 2217 OCTNET_CMD_TXCSUM_ENABLE); 2218 2219 dev_dbg(&octeon_dev->pci_dev->dev, 2220 "NIC ifidx:%d Setup successful\n", i); 2221 2222 octeon_dev->no_speed_setting = 1; 2223 } 2224 2225 return 0; 2226 2227 setup_nic_dev_free: 2228 2229 while (i--) { 2230 dev_err(&octeon_dev->pci_dev->dev, 2231 "NIC ifidx:%d Setup failed\n", i); 2232 liquidio_destroy_nic_device(octeon_dev, i); 2233 } 2234 2235 setup_nic_dev_done: 2236 2237 return -ENODEV; 2238 } 2239 2240 /** 2241 * liquidio_init_nic_module - initialize the NIC 2242 * @oct: octeon device 2243 * 2244 * This initialization routine is called once the Octeon device application is 2245 * up and running 2246 */ 2247 static int liquidio_init_nic_module(struct octeon_device *oct) 2248 { 2249 int num_nic_ports = 1; 2250 int i, retval = 0; 2251 2252 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 2253 2254 /* only default iq and oq were initialized 2255 * initialize the rest as well run port_config command for each port 2256 */ 2257 oct->ifcount = num_nic_ports; 2258 memset(oct->props, 0, 2259 sizeof(struct octdev_props) * num_nic_ports); 2260 2261 for (i = 0; i < MAX_OCTEON_LINKS; i++) 2262 oct->props[i].gmxport = -1; 2263 2264 retval = setup_nic_devices(oct); 2265 if (retval) { 2266 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 2267 goto octnet_init_failure; 2268 } 2269 2270 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 2271 2272 return retval; 2273 2274 octnet_init_failure: 2275 2276 oct->ifcount = 0; 2277 2278 return retval; 2279 } 2280 2281 /** 2282 * octeon_device_init - Device initialization for each Octeon device that is probed 2283 * @oct: octeon device 2284 */ 2285 static int octeon_device_init(struct octeon_device *oct) 2286 { 2287 u32 rev_id; 2288 int j; 2289 2290 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 2291 2292 /* Enable access to the octeon device and make its DMA capability 2293 * known to the OS. 2294 */ 2295 if (octeon_pci_os_setup(oct)) 2296 return 1; 2297 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 2298 2299 oct->chip_id = OCTEON_CN23XX_VF_VID; 2300 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 2301 oct->rev_id = rev_id & 0xff; 2302 2303 if (cn23xx_setup_octeon_vf_device(oct)) 2304 return 1; 2305 2306 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 2307 2308 oct->app_mode = CVM_DRV_NIC_APP; 2309 2310 /* Initialize the dispatch mechanism used to push packets arriving on 2311 * Octeon Output queues. 2312 */ 2313 if (octeon_init_dispatch_list(oct)) 2314 return 1; 2315 2316 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 2317 2318 if (octeon_set_io_queues_off(oct)) { 2319 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 2320 return 1; 2321 } 2322 2323 if (oct->fn_list.setup_device_regs(oct)) { 2324 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 2325 return 1; 2326 } 2327 2328 /* Initialize soft command buffer pool */ 2329 if (octeon_setup_sc_buffer_pool(oct)) { 2330 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 2331 return 1; 2332 } 2333 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 2334 2335 /* Setup the data structures that manage this Octeon's Input queues. */ 2336 if (octeon_setup_instr_queues(oct)) { 2337 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 2338 return 1; 2339 } 2340 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 2341 2342 /* Initialize lists to manage the requests of different types that 2343 * arrive from user & kernel applications for this octeon device. 2344 */ 2345 if (octeon_setup_response_list(oct)) { 2346 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 2347 return 1; 2348 } 2349 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 2350 2351 if (octeon_setup_output_queues(oct)) { 2352 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 2353 return 1; 2354 } 2355 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 2356 2357 if (oct->fn_list.setup_mbox(oct)) { 2358 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 2359 return 1; 2360 } 2361 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 2362 2363 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { 2364 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 2365 return 1; 2366 } 2367 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 2368 2369 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n", 2370 oct->sriov_info.rings_per_vf); 2371 2372 /* Setup the interrupt handler and record the INT SUM register address*/ 2373 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) 2374 return 1; 2375 2376 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 2377 2378 /* *************************************************************** 2379 * The interrupts need to be enabled for the PF<-->VF handshake. 2380 * They are [re]-enabled after the PF<-->VF handshake so that the 2381 * correct OQ tick value is used (i.e. the value retrieved from 2382 * the PF as part of the handshake). 2383 */ 2384 2385 /* Enable Octeon device interrupts */ 2386 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2387 2388 if (cn23xx_octeon_pfvf_handshake(oct)) 2389 return 1; 2390 2391 /* Here we [re]-enable the interrupts so that the correct OQ tick value 2392 * is used (i.e. the value that was retrieved during the handshake) 2393 */ 2394 2395 /* Enable Octeon device interrupts */ 2396 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2397 /* *************************************************************** */ 2398 2399 /* Enable the input and output queues for this Octeon device */ 2400 if (oct->fn_list.enable_io_queues(oct)) { 2401 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 2402 return 1; 2403 } 2404 2405 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 2406 2407 atomic_set(&oct->status, OCT_DEV_HOST_OK); 2408 2409 /* Send Credit for Octeon Output queues. Credits are always sent after 2410 * the output queue is enabled. 2411 */ 2412 for (j = 0; j < oct->num_oqs; j++) 2413 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 2414 2415 /* Packets can start arriving on the output queues from this point. */ 2416 2417 atomic_set(&oct->status, OCT_DEV_CORE_OK); 2418 2419 atomic_set(&oct->status, OCT_DEV_RUNNING); 2420 2421 if (liquidio_init_nic_module(oct)) 2422 return 1; 2423 2424 return 0; 2425 } 2426 2427 static int __init liquidio_vf_init(void) 2428 { 2429 octeon_init_device_list(0); 2430 return pci_register_driver(&liquidio_vf_pci_driver); 2431 } 2432 2433 static void __exit liquidio_vf_exit(void) 2434 { 2435 pci_unregister_driver(&liquidio_vf_pci_driver); 2436 2437 pr_info("LiquidIO_VF network module is now unloaded\n"); 2438 } 2439 2440 module_init(liquidio_vf_init); 2441 module_exit(liquidio_vf_exit); 2442