1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION(LIQUIDIO_VERSION); 36 37 static int debug = -1; 38 module_param(debug, int, 0644); 39 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 40 41 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 42 43 struct oct_timestamp_resp { 44 u64 rh; 45 u64 timestamp; 46 u64 status; 47 }; 48 49 union tx_info { 50 u64 u64; 51 struct { 52 #ifdef __BIG_ENDIAN_BITFIELD 53 u16 gso_size; 54 u16 gso_segs; 55 u32 reserved; 56 #else 57 u32 reserved; 58 u16 gso_segs; 59 u16 gso_size; 60 #endif 61 } s; 62 }; 63 64 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 65 #define OCTNIC_GSO_MAX_SIZE \ 66 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 67 68 static int 69 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 70 static void liquidio_vf_remove(struct pci_dev *pdev); 71 static int octeon_device_init(struct octeon_device *oct); 72 static int liquidio_stop(struct net_device *netdev); 73 74 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 75 { 76 struct octeon_device_priv *oct_priv = 77 (struct octeon_device_priv *)oct->priv; 78 int retry = MAX_IO_PENDING_PKT_COUNT; 79 int pkt_cnt = 0, pending_pkts; 80 int i; 81 82 do { 83 pending_pkts = 0; 84 85 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 86 if (!(oct->io_qmask.oq & BIT_ULL(i))) 87 continue; 88 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 89 } 90 if (pkt_cnt > 0) { 91 pending_pkts += pkt_cnt; 92 tasklet_schedule(&oct_priv->droq_tasklet); 93 } 94 pkt_cnt = 0; 95 schedule_timeout_uninterruptible(1); 96 97 } while (retry-- && pending_pkts); 98 99 return pkt_cnt; 100 } 101 102 /** 103 * \brief Cause device to go quiet so it can be safely removed/reset/etc 104 * @param oct Pointer to Octeon device 105 */ 106 static void pcierror_quiesce_device(struct octeon_device *oct) 107 { 108 int i; 109 110 /* Disable the input and output queues now. No more packets will 111 * arrive from Octeon, but we should wait for all packet processing 112 * to finish. 113 */ 114 115 /* To allow for in-flight requests */ 116 schedule_timeout_uninterruptible(100); 117 118 if (wait_for_pending_requests(oct)) 119 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 120 121 /* Force all requests waiting to be fetched by OCTEON to complete. */ 122 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 123 struct octeon_instr_queue *iq; 124 125 if (!(oct->io_qmask.iq & BIT_ULL(i))) 126 continue; 127 iq = oct->instr_queue[i]; 128 129 if (atomic_read(&iq->instr_pending)) { 130 spin_lock_bh(&iq->lock); 131 iq->fill_cnt = 0; 132 iq->octeon_read_index = iq->host_write_index; 133 iq->stats.instr_processed += 134 atomic_read(&iq->instr_pending); 135 lio_process_iq_request_list(oct, iq, 0); 136 spin_unlock_bh(&iq->lock); 137 } 138 } 139 140 /* Force all pending ordered list requests to time out. */ 141 lio_process_ordered_list(oct, 1); 142 143 /* We do not need to wait for output queue packets to be processed. */ 144 } 145 146 /** 147 * \brief Cleanup PCI AER uncorrectable error status 148 * @param dev Pointer to PCI device 149 */ 150 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 151 { 152 u32 status, mask; 153 int pos = 0x100; 154 155 pr_info("%s :\n", __func__); 156 157 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 158 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 159 if (dev->error_state == pci_channel_io_normal) 160 status &= ~mask; /* Clear corresponding nonfatal bits */ 161 else 162 status &= mask; /* Clear corresponding fatal bits */ 163 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 164 } 165 166 /** 167 * \brief Stop all PCI IO to a given device 168 * @param dev Pointer to Octeon device 169 */ 170 static void stop_pci_io(struct octeon_device *oct) 171 { 172 struct msix_entry *msix_entries; 173 int i; 174 175 /* No more instructions will be forwarded. */ 176 atomic_set(&oct->status, OCT_DEV_IN_RESET); 177 178 for (i = 0; i < oct->ifcount; i++) 179 netif_device_detach(oct->props[i].netdev); 180 181 /* Disable interrupts */ 182 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 183 184 pcierror_quiesce_device(oct); 185 if (oct->msix_on) { 186 msix_entries = (struct msix_entry *)oct->msix_entries; 187 for (i = 0; i < oct->num_msix_irqs; i++) { 188 /* clear the affinity_cpumask */ 189 irq_set_affinity_hint(msix_entries[i].vector, 190 NULL); 191 free_irq(msix_entries[i].vector, 192 &oct->ioq_vector[i]); 193 } 194 pci_disable_msix(oct->pci_dev); 195 kfree(oct->msix_entries); 196 oct->msix_entries = NULL; 197 octeon_free_ioq_vector(oct); 198 } 199 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 200 lio_get_state_string(&oct->status)); 201 202 /* making it a common function for all OCTEON models */ 203 cleanup_aer_uncorrect_error_status(oct->pci_dev); 204 205 pci_disable_device(oct->pci_dev); 206 } 207 208 /** 209 * \brief called when PCI error is detected 210 * @param pdev Pointer to PCI device 211 * @param state The current pci connection state 212 * 213 * This function is called after a PCI bus error affecting 214 * this device has been detected. 215 */ 216 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 217 pci_channel_state_t state) 218 { 219 struct octeon_device *oct = pci_get_drvdata(pdev); 220 221 /* Non-correctable Non-fatal errors */ 222 if (state == pci_channel_io_normal) { 223 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 224 cleanup_aer_uncorrect_error_status(oct->pci_dev); 225 return PCI_ERS_RESULT_CAN_RECOVER; 226 } 227 228 /* Non-correctable Fatal errors */ 229 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 230 stop_pci_io(oct); 231 232 return PCI_ERS_RESULT_DISCONNECT; 233 } 234 235 /* For PCI-E Advanced Error Recovery (AER) Interface */ 236 static const struct pci_error_handlers liquidio_vf_err_handler = { 237 .error_detected = liquidio_pcie_error_detected, 238 }; 239 240 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 241 { 242 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 244 }, 245 { 246 0, 0, 0, 0, 0, 0, 0 247 } 248 }; 249 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 250 251 static struct pci_driver liquidio_vf_pci_driver = { 252 .name = "LiquidIO_VF", 253 .id_table = liquidio_vf_pci_tbl, 254 .probe = liquidio_vf_probe, 255 .remove = liquidio_vf_remove, 256 .err_handler = &liquidio_vf_err_handler, /* For AER */ 257 }; 258 259 /** 260 * \brief Print link information 261 * @param netdev network device 262 */ 263 static void print_link_info(struct net_device *netdev) 264 { 265 struct lio *lio = GET_LIO(netdev); 266 267 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 268 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 269 struct oct_link_info *linfo = &lio->linfo; 270 271 if (linfo->link.s.link_up) { 272 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 273 linfo->link.s.speed, 274 (linfo->link.s.duplex) ? "Full" : "Half"); 275 } else { 276 netif_info(lio, link, lio->netdev, "Link Down\n"); 277 } 278 } 279 } 280 281 /** 282 * \brief Routine to notify MTU change 283 * @param work work_struct data structure 284 */ 285 static void octnet_link_status_change(struct work_struct *work) 286 { 287 struct cavium_wk *wk = (struct cavium_wk *)work; 288 struct lio *lio = (struct lio *)wk->ctxptr; 289 290 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 291 * this API is invoked only when new max-MTU of the interface is 292 * less than current MTU. 293 */ 294 rtnl_lock(); 295 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 296 rtnl_unlock(); 297 } 298 299 /** 300 * \brief Sets up the mtu status change work 301 * @param netdev network device 302 */ 303 static int setup_link_status_change_wq(struct net_device *netdev) 304 { 305 struct lio *lio = GET_LIO(netdev); 306 struct octeon_device *oct = lio->oct_dev; 307 308 lio->link_status_wq.wq = alloc_workqueue("link-status", 309 WQ_MEM_RECLAIM, 0); 310 if (!lio->link_status_wq.wq) { 311 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 312 return -1; 313 } 314 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 315 octnet_link_status_change); 316 lio->link_status_wq.wk.ctxptr = lio; 317 318 return 0; 319 } 320 321 static void cleanup_link_status_change_wq(struct net_device *netdev) 322 { 323 struct lio *lio = GET_LIO(netdev); 324 325 if (lio->link_status_wq.wq) { 326 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 327 destroy_workqueue(lio->link_status_wq.wq); 328 } 329 } 330 331 /** 332 * \brief Update link status 333 * @param netdev network device 334 * @param ls link status structure 335 * 336 * Called on receipt of a link status response from the core application to 337 * update each interface's link status. 338 */ 339 static void update_link_status(struct net_device *netdev, 340 union oct_link_status *ls) 341 { 342 struct lio *lio = GET_LIO(netdev); 343 int current_max_mtu = lio->linfo.link.s.mtu; 344 struct octeon_device *oct = lio->oct_dev; 345 346 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 347 lio->linfo.link.u64 = ls->u64; 348 349 print_link_info(netdev); 350 lio->link_changes++; 351 352 if (lio->linfo.link.s.link_up) { 353 netif_carrier_on(netdev); 354 wake_txqs(netdev); 355 } else { 356 netif_carrier_off(netdev); 357 stop_txqs(netdev); 358 } 359 360 if (lio->linfo.link.s.mtu != current_max_mtu) { 361 dev_info(&oct->pci_dev->dev, 362 "Max MTU Changed from %d to %d\n", 363 current_max_mtu, lio->linfo.link.s.mtu); 364 netdev->max_mtu = lio->linfo.link.s.mtu; 365 } 366 367 if (lio->linfo.link.s.mtu < netdev->mtu) { 368 dev_warn(&oct->pci_dev->dev, 369 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 370 netdev->mtu, lio->linfo.link.s.mtu); 371 queue_delayed_work(lio->link_status_wq.wq, 372 &lio->link_status_wq.wk.work, 0); 373 } 374 } 375 } 376 377 /** 378 * \brief PCI probe handler 379 * @param pdev PCI device structure 380 * @param ent unused 381 */ 382 static int 383 liquidio_vf_probe(struct pci_dev *pdev, 384 const struct pci_device_id *ent __attribute__((unused))) 385 { 386 struct octeon_device *oct_dev = NULL; 387 388 oct_dev = octeon_allocate_device(pdev->device, 389 sizeof(struct octeon_device_priv)); 390 391 if (!oct_dev) { 392 dev_err(&pdev->dev, "Unable to allocate device\n"); 393 return -ENOMEM; 394 } 395 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 396 397 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 398 (u32)pdev->vendor, (u32)pdev->device); 399 400 /* Assign octeon_device for this device to the private data area. */ 401 pci_set_drvdata(pdev, oct_dev); 402 403 /* set linux specific device pointer */ 404 oct_dev->pci_dev = pdev; 405 406 oct_dev->subsystem_id = pdev->subsystem_vendor | 407 (pdev->subsystem_device << 16); 408 409 if (octeon_device_init(oct_dev)) { 410 liquidio_vf_remove(pdev); 411 return -ENOMEM; 412 } 413 414 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 415 416 return 0; 417 } 418 419 /** 420 * \brief PCI FLR for each Octeon device. 421 * @param oct octeon device 422 */ 423 static void octeon_pci_flr(struct octeon_device *oct) 424 { 425 pci_save_state(oct->pci_dev); 426 427 pci_cfg_access_lock(oct->pci_dev); 428 429 /* Quiesce the device completely */ 430 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 431 PCI_COMMAND_INTX_DISABLE); 432 433 pcie_flr(oct->pci_dev); 434 435 pci_cfg_access_unlock(oct->pci_dev); 436 437 pci_restore_state(oct->pci_dev); 438 } 439 440 /** 441 *\brief Destroy resources associated with octeon device 442 * @param pdev PCI device structure 443 * @param ent unused 444 */ 445 static void octeon_destroy_resources(struct octeon_device *oct) 446 { 447 struct msix_entry *msix_entries; 448 int i; 449 450 switch (atomic_read(&oct->status)) { 451 case OCT_DEV_RUNNING: 452 case OCT_DEV_CORE_OK: 453 /* No more instructions will be forwarded. */ 454 atomic_set(&oct->status, OCT_DEV_IN_RESET); 455 456 oct->app_mode = CVM_DRV_INVALID_APP; 457 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 458 lio_get_state_string(&oct->status)); 459 460 schedule_timeout_uninterruptible(HZ / 10); 461 462 /* fallthrough */ 463 case OCT_DEV_HOST_OK: 464 /* fallthrough */ 465 case OCT_DEV_IO_QUEUES_DONE: 466 if (lio_wait_for_instr_fetch(oct)) 467 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 468 469 if (wait_for_pending_requests(oct)) 470 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 471 472 /* Disable the input and output queues now. No more packets will 473 * arrive from Octeon, but we should wait for all packet 474 * processing to finish. 475 */ 476 oct->fn_list.disable_io_queues(oct); 477 478 if (lio_wait_for_oq_pkts(oct)) 479 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 480 481 /* Force all requests waiting to be fetched by OCTEON to 482 * complete. 483 */ 484 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 485 struct octeon_instr_queue *iq; 486 487 if (!(oct->io_qmask.iq & BIT_ULL(i))) 488 continue; 489 iq = oct->instr_queue[i]; 490 491 if (atomic_read(&iq->instr_pending)) { 492 spin_lock_bh(&iq->lock); 493 iq->fill_cnt = 0; 494 iq->octeon_read_index = iq->host_write_index; 495 iq->stats.instr_processed += 496 atomic_read(&iq->instr_pending); 497 lio_process_iq_request_list(oct, iq, 0); 498 spin_unlock_bh(&iq->lock); 499 } 500 } 501 502 lio_process_ordered_list(oct, 1); 503 octeon_free_sc_done_list(oct); 504 octeon_free_sc_zombie_list(oct); 505 506 /* fall through */ 507 case OCT_DEV_INTR_SET_DONE: 508 /* Disable interrupts */ 509 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 510 511 if (oct->msix_on) { 512 msix_entries = (struct msix_entry *)oct->msix_entries; 513 for (i = 0; i < oct->num_msix_irqs; i++) { 514 if (oct->ioq_vector[i].vector) { 515 irq_set_affinity_hint( 516 msix_entries[i].vector, 517 NULL); 518 free_irq(msix_entries[i].vector, 519 &oct->ioq_vector[i]); 520 oct->ioq_vector[i].vector = 0; 521 } 522 } 523 pci_disable_msix(oct->pci_dev); 524 kfree(oct->msix_entries); 525 oct->msix_entries = NULL; 526 kfree(oct->irq_name_storage); 527 oct->irq_name_storage = NULL; 528 } 529 /* Soft reset the octeon device before exiting */ 530 if (oct->pci_dev->reset_fn) 531 octeon_pci_flr(oct); 532 else 533 cn23xx_vf_ask_pf_to_do_flr(oct); 534 535 /* fallthrough */ 536 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 537 octeon_free_ioq_vector(oct); 538 539 /* fallthrough */ 540 case OCT_DEV_MBOX_SETUP_DONE: 541 oct->fn_list.free_mbox(oct); 542 543 /* fallthrough */ 544 case OCT_DEV_IN_RESET: 545 case OCT_DEV_DROQ_INIT_DONE: 546 mdelay(100); 547 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 548 if (!(oct->io_qmask.oq & BIT_ULL(i))) 549 continue; 550 octeon_delete_droq(oct, i); 551 } 552 553 /* fallthrough */ 554 case OCT_DEV_RESP_LIST_INIT_DONE: 555 octeon_delete_response_list(oct); 556 557 /* fallthrough */ 558 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 559 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 560 if (!(oct->io_qmask.iq & BIT_ULL(i))) 561 continue; 562 octeon_delete_instr_queue(oct, i); 563 } 564 565 /* fallthrough */ 566 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 567 octeon_free_sc_buffer_pool(oct); 568 569 /* fallthrough */ 570 case OCT_DEV_DISPATCH_INIT_DONE: 571 octeon_delete_dispatch_list(oct); 572 cancel_delayed_work_sync(&oct->nic_poll_work.work); 573 574 /* fallthrough */ 575 case OCT_DEV_PCI_MAP_DONE: 576 octeon_unmap_pci_barx(oct, 0); 577 octeon_unmap_pci_barx(oct, 1); 578 579 /* fallthrough */ 580 case OCT_DEV_PCI_ENABLE_DONE: 581 pci_clear_master(oct->pci_dev); 582 /* Disable the device, releasing the PCI INT */ 583 pci_disable_device(oct->pci_dev); 584 585 /* fallthrough */ 586 case OCT_DEV_BEGIN_STATE: 587 /* Nothing to be done here either */ 588 break; 589 } 590 } 591 592 /** 593 * \brief Send Rx control command 594 * @param lio per-network private data 595 * @param start_stop whether to start or stop 596 */ 597 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 598 { 599 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 600 struct octeon_soft_command *sc; 601 union octnet_cmd *ncmd; 602 int retval; 603 604 if (oct->props[lio->ifidx].rx_on == start_stop) 605 return; 606 607 sc = (struct octeon_soft_command *) 608 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 609 16, 0); 610 611 ncmd = (union octnet_cmd *)sc->virtdptr; 612 613 ncmd->u64 = 0; 614 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 615 ncmd->s.param1 = start_stop; 616 617 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 618 619 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 620 621 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 622 OPCODE_NIC_CMD, 0, 0, 0); 623 624 init_completion(&sc->complete); 625 sc->sc_status = OCTEON_REQUEST_PENDING; 626 627 retval = octeon_send_soft_command(oct, sc); 628 if (retval == IQ_SEND_FAILED) { 629 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 630 octeon_free_soft_command(oct, sc); 631 } else { 632 /* Sleep on a wait queue till the cond flag indicates that the 633 * response arrived or timed-out. 634 */ 635 retval = wait_for_sc_completion_timeout(oct, sc, 0); 636 if (retval) 637 return; 638 639 oct->props[lio->ifidx].rx_on = start_stop; 640 WRITE_ONCE(sc->caller_is_done, true); 641 } 642 } 643 644 /** 645 * \brief Destroy NIC device interface 646 * @param oct octeon device 647 * @param ifidx which interface to destroy 648 * 649 * Cleanup associated with each interface for an Octeon device when NIC 650 * module is being unloaded or if initialization fails during load. 651 */ 652 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 653 { 654 struct net_device *netdev = oct->props[ifidx].netdev; 655 struct napi_struct *napi, *n; 656 struct lio *lio; 657 658 if (!netdev) { 659 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 660 __func__, ifidx); 661 return; 662 } 663 664 lio = GET_LIO(netdev); 665 666 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 667 668 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 669 liquidio_stop(netdev); 670 671 if (oct->props[lio->ifidx].napi_enabled == 1) { 672 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 673 napi_disable(napi); 674 675 oct->props[lio->ifidx].napi_enabled = 0; 676 677 oct->droq[0]->ops.poll_mode = 0; 678 } 679 680 /* Delete NAPI */ 681 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 682 netif_napi_del(napi); 683 684 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 685 unregister_netdev(netdev); 686 687 cleanup_rx_oom_poll_fn(netdev); 688 689 cleanup_link_status_change_wq(netdev); 690 691 lio_delete_glists(lio); 692 693 free_netdev(netdev); 694 695 oct->props[ifidx].gmxport = -1; 696 697 oct->props[ifidx].netdev = NULL; 698 } 699 700 /** 701 * \brief Stop complete NIC functionality 702 * @param oct octeon device 703 */ 704 static int liquidio_stop_nic_module(struct octeon_device *oct) 705 { 706 struct lio *lio; 707 int i, j; 708 709 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 710 if (!oct->ifcount) { 711 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 712 return 1; 713 } 714 715 spin_lock_bh(&oct->cmd_resp_wqlock); 716 oct->cmd_resp_state = OCT_DRV_OFFLINE; 717 spin_unlock_bh(&oct->cmd_resp_wqlock); 718 719 for (i = 0; i < oct->ifcount; i++) { 720 lio = GET_LIO(oct->props[i].netdev); 721 for (j = 0; j < oct->num_oqs; j++) 722 octeon_unregister_droq_ops(oct, 723 lio->linfo.rxpciq[j].s.q_no); 724 } 725 726 for (i = 0; i < oct->ifcount; i++) 727 liquidio_destroy_nic_device(oct, i); 728 729 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 730 return 0; 731 } 732 733 /** 734 * \brief Cleans up resources at unload time 735 * @param pdev PCI device structure 736 */ 737 static void liquidio_vf_remove(struct pci_dev *pdev) 738 { 739 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 740 741 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 742 743 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 744 liquidio_stop_nic_module(oct_dev); 745 746 /* Reset the octeon device and cleanup all memory allocated for 747 * the octeon device by driver. 748 */ 749 octeon_destroy_resources(oct_dev); 750 751 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 752 753 /* This octeon device has been removed. Update the global 754 * data structure to reflect this. Free the device structure. 755 */ 756 octeon_free_device_mem(oct_dev); 757 } 758 759 /** 760 * \brief PCI initialization for each Octeon device. 761 * @param oct octeon device 762 */ 763 static int octeon_pci_os_setup(struct octeon_device *oct) 764 { 765 #ifdef CONFIG_PCI_IOV 766 /* setup PCI stuff first */ 767 if (!oct->pci_dev->physfn) 768 octeon_pci_flr(oct); 769 #endif 770 771 if (pci_enable_device(oct->pci_dev)) { 772 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 773 return 1; 774 } 775 776 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 777 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 778 pci_disable_device(oct->pci_dev); 779 return 1; 780 } 781 782 /* Enable PCI DMA Master. */ 783 pci_set_master(oct->pci_dev); 784 785 return 0; 786 } 787 788 /** 789 * \brief Unmap and free network buffer 790 * @param buf buffer 791 */ 792 static void free_netbuf(void *buf) 793 { 794 struct octnet_buf_free_info *finfo; 795 struct sk_buff *skb; 796 struct lio *lio; 797 798 finfo = (struct octnet_buf_free_info *)buf; 799 skb = finfo->skb; 800 lio = finfo->lio; 801 802 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 803 DMA_TO_DEVICE); 804 805 tx_buffer_free(skb); 806 } 807 808 /** 809 * \brief Unmap and free gather buffer 810 * @param buf buffer 811 */ 812 static void free_netsgbuf(void *buf) 813 { 814 struct octnet_buf_free_info *finfo; 815 struct octnic_gather *g; 816 struct sk_buff *skb; 817 int i, frags, iq; 818 struct lio *lio; 819 820 finfo = (struct octnet_buf_free_info *)buf; 821 skb = finfo->skb; 822 lio = finfo->lio; 823 g = finfo->g; 824 frags = skb_shinfo(skb)->nr_frags; 825 826 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 827 g->sg[0].ptr[0], (skb->len - skb->data_len), 828 DMA_TO_DEVICE); 829 830 i = 1; 831 while (frags--) { 832 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 833 834 pci_unmap_page((lio->oct_dev)->pci_dev, 835 g->sg[(i >> 2)].ptr[(i & 3)], 836 frag->size, DMA_TO_DEVICE); 837 i++; 838 } 839 840 iq = skb_iq(lio->oct_dev, skb); 841 842 spin_lock(&lio->glist_lock[iq]); 843 list_add_tail(&g->list, &lio->glist[iq]); 844 spin_unlock(&lio->glist_lock[iq]); 845 846 tx_buffer_free(skb); 847 } 848 849 /** 850 * \brief Unmap and free gather buffer with response 851 * @param buf buffer 852 */ 853 static void free_netsgbuf_with_resp(void *buf) 854 { 855 struct octnet_buf_free_info *finfo; 856 struct octeon_soft_command *sc; 857 struct octnic_gather *g; 858 struct sk_buff *skb; 859 int i, frags, iq; 860 struct lio *lio; 861 862 sc = (struct octeon_soft_command *)buf; 863 skb = (struct sk_buff *)sc->callback_arg; 864 finfo = (struct octnet_buf_free_info *)&skb->cb; 865 866 lio = finfo->lio; 867 g = finfo->g; 868 frags = skb_shinfo(skb)->nr_frags; 869 870 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 871 g->sg[0].ptr[0], (skb->len - skb->data_len), 872 DMA_TO_DEVICE); 873 874 i = 1; 875 while (frags--) { 876 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 877 878 pci_unmap_page((lio->oct_dev)->pci_dev, 879 g->sg[(i >> 2)].ptr[(i & 3)], 880 frag->size, DMA_TO_DEVICE); 881 i++; 882 } 883 884 iq = skb_iq(lio->oct_dev, skb); 885 886 spin_lock(&lio->glist_lock[iq]); 887 list_add_tail(&g->list, &lio->glist[iq]); 888 spin_unlock(&lio->glist_lock[iq]); 889 890 /* Don't free the skb yet */ 891 } 892 893 /** 894 * \brief Net device open for LiquidIO 895 * @param netdev network device 896 */ 897 static int liquidio_open(struct net_device *netdev) 898 { 899 struct lio *lio = GET_LIO(netdev); 900 struct octeon_device *oct = lio->oct_dev; 901 struct napi_struct *napi, *n; 902 903 if (!oct->props[lio->ifidx].napi_enabled) { 904 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 905 napi_enable(napi); 906 907 oct->props[lio->ifidx].napi_enabled = 1; 908 909 oct->droq[0]->ops.poll_mode = 1; 910 } 911 912 ifstate_set(lio, LIO_IFSTATE_RUNNING); 913 914 /* Ready for link status updates */ 915 lio->intf_open = 1; 916 917 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 918 start_txqs(netdev); 919 920 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 921 lio->stats_wk.ctxptr = lio; 922 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 923 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 924 925 /* tell Octeon to start forwarding packets to host */ 926 send_rx_ctrl_cmd(lio, 1); 927 928 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 929 930 return 0; 931 } 932 933 /** 934 * \brief Net device stop for LiquidIO 935 * @param netdev network device 936 */ 937 static int liquidio_stop(struct net_device *netdev) 938 { 939 struct lio *lio = GET_LIO(netdev); 940 struct octeon_device *oct = lio->oct_dev; 941 struct napi_struct *napi, *n; 942 943 /* tell Octeon to stop forwarding packets to host */ 944 send_rx_ctrl_cmd(lio, 0); 945 946 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 947 /* Inform that netif carrier is down */ 948 lio->intf_open = 0; 949 lio->linfo.link.s.link_up = 0; 950 951 netif_carrier_off(netdev); 952 lio->link_changes++; 953 954 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 955 956 stop_txqs(netdev); 957 958 /* Wait for any pending Rx descriptors */ 959 if (lio_wait_for_clean_oq(oct)) 960 netif_info(lio, rx_err, lio->netdev, 961 "Proceeding with stop interface after partial RX desc processing\n"); 962 963 if (oct->props[lio->ifidx].napi_enabled == 1) { 964 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 965 napi_disable(napi); 966 967 oct->props[lio->ifidx].napi_enabled = 0; 968 969 oct->droq[0]->ops.poll_mode = 0; 970 } 971 972 cancel_delayed_work_sync(&lio->stats_wk.work); 973 974 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 975 976 return 0; 977 } 978 979 /** 980 * \brief Converts a mask based on net device flags 981 * @param netdev network device 982 * 983 * This routine generates a octnet_ifflags mask from the net device flags 984 * received from the OS. 985 */ 986 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 987 { 988 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 989 990 if (netdev->flags & IFF_PROMISC) 991 f |= OCTNET_IFFLAG_PROMISC; 992 993 if (netdev->flags & IFF_ALLMULTI) 994 f |= OCTNET_IFFLAG_ALLMULTI; 995 996 if (netdev->flags & IFF_MULTICAST) { 997 f |= OCTNET_IFFLAG_MULTICAST; 998 999 /* Accept all multicast addresses if there are more than we 1000 * can handle 1001 */ 1002 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1003 f |= OCTNET_IFFLAG_ALLMULTI; 1004 } 1005 1006 if (netdev->flags & IFF_BROADCAST) 1007 f |= OCTNET_IFFLAG_BROADCAST; 1008 1009 return f; 1010 } 1011 1012 static void liquidio_set_uc_list(struct net_device *netdev) 1013 { 1014 struct lio *lio = GET_LIO(netdev); 1015 struct octeon_device *oct = lio->oct_dev; 1016 struct octnic_ctrl_pkt nctrl; 1017 struct netdev_hw_addr *ha; 1018 u64 *mac; 1019 1020 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1021 return; 1022 1023 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1024 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1025 return; 1026 } 1027 1028 lio->netdev_uc_count = netdev_uc_count(netdev); 1029 1030 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1031 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1032 nctrl.ncmd.s.more = lio->netdev_uc_count; 1033 nctrl.ncmd.s.param1 = oct->vf_num; 1034 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1035 nctrl.netpndev = (u64)netdev; 1036 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1037 1038 /* copy all the addresses into the udd */ 1039 mac = &nctrl.udd[0]; 1040 netdev_for_each_uc_addr(ha, netdev) { 1041 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1042 mac++; 1043 } 1044 1045 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1046 } 1047 1048 /** 1049 * \brief Net device set_multicast_list 1050 * @param netdev network device 1051 */ 1052 static void liquidio_set_mcast_list(struct net_device *netdev) 1053 { 1054 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1055 struct lio *lio = GET_LIO(netdev); 1056 struct octeon_device *oct = lio->oct_dev; 1057 struct octnic_ctrl_pkt nctrl; 1058 struct netdev_hw_addr *ha; 1059 u64 *mc; 1060 int ret; 1061 1062 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1063 1064 /* Create a ctrl pkt command to be sent to core app. */ 1065 nctrl.ncmd.u64 = 0; 1066 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1067 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1068 nctrl.ncmd.s.param2 = mc_count; 1069 nctrl.ncmd.s.more = mc_count; 1070 nctrl.netpndev = (u64)netdev; 1071 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1072 1073 /* copy all the addresses into the udd */ 1074 mc = &nctrl.udd[0]; 1075 netdev_for_each_mc_addr(ha, netdev) { 1076 *mc = 0; 1077 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1078 /* no need to swap bytes */ 1079 if (++mc > &nctrl.udd[mc_count]) 1080 break; 1081 } 1082 1083 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1084 1085 /* Apparently, any activity in this call from the kernel has to 1086 * be atomic. So we won't wait for response. 1087 */ 1088 1089 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1090 if (ret) { 1091 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1092 ret); 1093 } 1094 1095 liquidio_set_uc_list(netdev); 1096 } 1097 1098 /** 1099 * \brief Net device set_mac_address 1100 * @param netdev network device 1101 */ 1102 static int liquidio_set_mac(struct net_device *netdev, void *p) 1103 { 1104 struct sockaddr *addr = (struct sockaddr *)p; 1105 struct lio *lio = GET_LIO(netdev); 1106 struct octeon_device *oct = lio->oct_dev; 1107 struct octnic_ctrl_pkt nctrl; 1108 int ret = 0; 1109 1110 if (!is_valid_ether_addr(addr->sa_data)) 1111 return -EADDRNOTAVAIL; 1112 1113 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1114 return 0; 1115 1116 if (lio->linfo.macaddr_is_admin_asgnd) 1117 return -EPERM; 1118 1119 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1120 1121 nctrl.ncmd.u64 = 0; 1122 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1123 nctrl.ncmd.s.param1 = 0; 1124 nctrl.ncmd.s.more = 1; 1125 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1126 nctrl.netpndev = (u64)netdev; 1127 1128 nctrl.udd[0] = 0; 1129 /* The MAC Address is presented in network byte order. */ 1130 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1131 1132 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1133 if (ret < 0) { 1134 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1135 return -ENOMEM; 1136 } 1137 1138 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1139 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1140 1141 return 0; 1142 } 1143 1144 static void 1145 liquidio_get_stats64(struct net_device *netdev, 1146 struct rtnl_link_stats64 *lstats) 1147 { 1148 struct lio *lio = GET_LIO(netdev); 1149 struct octeon_device *oct; 1150 u64 pkts = 0, drop = 0, bytes = 0; 1151 struct oct_droq_stats *oq_stats; 1152 struct oct_iq_stats *iq_stats; 1153 int i, iq_no, oq_no; 1154 1155 oct = lio->oct_dev; 1156 1157 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1158 return; 1159 1160 for (i = 0; i < oct->num_iqs; i++) { 1161 iq_no = lio->linfo.txpciq[i].s.q_no; 1162 iq_stats = &oct->instr_queue[iq_no]->stats; 1163 pkts += iq_stats->tx_done; 1164 drop += iq_stats->tx_dropped; 1165 bytes += iq_stats->tx_tot_bytes; 1166 } 1167 1168 lstats->tx_packets = pkts; 1169 lstats->tx_bytes = bytes; 1170 lstats->tx_dropped = drop; 1171 1172 pkts = 0; 1173 drop = 0; 1174 bytes = 0; 1175 1176 for (i = 0; i < oct->num_oqs; i++) { 1177 oq_no = lio->linfo.rxpciq[i].s.q_no; 1178 oq_stats = &oct->droq[oq_no]->stats; 1179 pkts += oq_stats->rx_pkts_received; 1180 drop += (oq_stats->rx_dropped + 1181 oq_stats->dropped_nodispatch + 1182 oq_stats->dropped_toomany + 1183 oq_stats->dropped_nomem); 1184 bytes += oq_stats->rx_bytes_received; 1185 } 1186 1187 lstats->rx_bytes = bytes; 1188 lstats->rx_packets = pkts; 1189 lstats->rx_dropped = drop; 1190 1191 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 1192 1193 /* detailed rx_errors: */ 1194 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 1195 /* recved pkt with crc error */ 1196 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 1197 /* recv'd frame alignment error */ 1198 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 1199 1200 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 1201 lstats->rx_frame_errors; 1202 1203 /* detailed tx_errors */ 1204 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 1205 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 1206 1207 lstats->tx_errors = lstats->tx_aborted_errors + 1208 lstats->tx_carrier_errors; 1209 } 1210 1211 /** 1212 * \brief Handler for SIOCSHWTSTAMP ioctl 1213 * @param netdev network device 1214 * @param ifr interface request 1215 * @param cmd command 1216 */ 1217 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 1218 { 1219 struct lio *lio = GET_LIO(netdev); 1220 struct hwtstamp_config conf; 1221 1222 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 1223 return -EFAULT; 1224 1225 if (conf.flags) 1226 return -EINVAL; 1227 1228 switch (conf.tx_type) { 1229 case HWTSTAMP_TX_ON: 1230 case HWTSTAMP_TX_OFF: 1231 break; 1232 default: 1233 return -ERANGE; 1234 } 1235 1236 switch (conf.rx_filter) { 1237 case HWTSTAMP_FILTER_NONE: 1238 break; 1239 case HWTSTAMP_FILTER_ALL: 1240 case HWTSTAMP_FILTER_SOME: 1241 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1242 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1243 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1244 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1245 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1246 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1247 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1248 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1249 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1250 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1251 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1252 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1253 case HWTSTAMP_FILTER_NTP_ALL: 1254 conf.rx_filter = HWTSTAMP_FILTER_ALL; 1255 break; 1256 default: 1257 return -ERANGE; 1258 } 1259 1260 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 1261 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1262 1263 else 1264 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1265 1266 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 1267 } 1268 1269 /** 1270 * \brief ioctl handler 1271 * @param netdev network device 1272 * @param ifr interface request 1273 * @param cmd command 1274 */ 1275 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1276 { 1277 switch (cmd) { 1278 case SIOCSHWTSTAMP: 1279 return hwtstamp_ioctl(netdev, ifr); 1280 default: 1281 return -EOPNOTSUPP; 1282 } 1283 } 1284 1285 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 1286 { 1287 struct sk_buff *skb = (struct sk_buff *)buf; 1288 struct octnet_buf_free_info *finfo; 1289 struct oct_timestamp_resp *resp; 1290 struct octeon_soft_command *sc; 1291 struct lio *lio; 1292 1293 finfo = (struct octnet_buf_free_info *)skb->cb; 1294 lio = finfo->lio; 1295 sc = finfo->sc; 1296 oct = lio->oct_dev; 1297 resp = (struct oct_timestamp_resp *)sc->virtrptr; 1298 1299 if (status != OCTEON_REQUEST_DONE) { 1300 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 1301 CVM_CAST64(status)); 1302 resp->timestamp = 0; 1303 } 1304 1305 octeon_swap_8B_data(&resp->timestamp, 1); 1306 1307 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 1308 struct skb_shared_hwtstamps ts; 1309 u64 ns = resp->timestamp; 1310 1311 netif_info(lio, tx_done, lio->netdev, 1312 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 1313 skb, (unsigned long long)ns); 1314 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 1315 skb_tstamp_tx(skb, &ts); 1316 } 1317 1318 octeon_free_soft_command(oct, sc); 1319 tx_buffer_free(skb); 1320 } 1321 1322 /* \brief Send a data packet that will be timestamped 1323 * @param oct octeon device 1324 * @param ndata pointer to network data 1325 * @param finfo pointer to private network data 1326 */ 1327 static int send_nic_timestamp_pkt(struct octeon_device *oct, 1328 struct octnic_data_pkt *ndata, 1329 struct octnet_buf_free_info *finfo, 1330 int xmit_more) 1331 { 1332 struct octeon_soft_command *sc; 1333 int ring_doorbell; 1334 struct lio *lio; 1335 int retval; 1336 u32 len; 1337 1338 lio = finfo->lio; 1339 1340 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 1341 sizeof(struct oct_timestamp_resp)); 1342 finfo->sc = sc; 1343 1344 if (!sc) { 1345 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 1346 return IQ_SEND_FAILED; 1347 } 1348 1349 if (ndata->reqtype == REQTYPE_NORESP_NET) 1350 ndata->reqtype = REQTYPE_RESP_NET; 1351 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 1352 ndata->reqtype = REQTYPE_RESP_NET_SG; 1353 1354 sc->callback = handle_timestamp; 1355 sc->callback_arg = finfo->skb; 1356 sc->iq_no = ndata->q_no; 1357 1358 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 1359 1360 ring_doorbell = !xmit_more; 1361 1362 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 1363 sc, len, ndata->reqtype); 1364 1365 if (retval == IQ_SEND_FAILED) { 1366 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 1367 retval); 1368 octeon_free_soft_command(oct, sc); 1369 } else { 1370 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 1371 } 1372 1373 return retval; 1374 } 1375 1376 /** \brief Transmit networks packets to the Octeon interface 1377 * @param skbuff skbuff struct to be passed to network layer. 1378 * @param netdev pointer to network device 1379 * @returns whether the packet was transmitted to the device okay or not 1380 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 1381 */ 1382 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 1383 { 1384 struct octnet_buf_free_info *finfo; 1385 union octnic_cmd_setup cmdsetup; 1386 struct octnic_data_pkt ndata; 1387 struct octeon_instr_irh *irh; 1388 struct oct_iq_stats *stats; 1389 struct octeon_device *oct; 1390 int q_idx = 0, iq_no = 0; 1391 union tx_info *tx_info; 1392 int xmit_more = 0; 1393 struct lio *lio; 1394 int status = 0; 1395 u64 dptr = 0; 1396 u32 tag = 0; 1397 int j; 1398 1399 lio = GET_LIO(netdev); 1400 oct = lio->oct_dev; 1401 1402 q_idx = skb_iq(lio->oct_dev, skb); 1403 tag = q_idx; 1404 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 1405 1406 stats = &oct->instr_queue[iq_no]->stats; 1407 1408 /* Check for all conditions in which the current packet cannot be 1409 * transmitted. 1410 */ 1411 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 1412 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 1413 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 1414 lio->linfo.link.s.link_up); 1415 goto lio_xmit_failed; 1416 } 1417 1418 /* Use space in skb->cb to store info used to unmap and 1419 * free the buffers. 1420 */ 1421 finfo = (struct octnet_buf_free_info *)skb->cb; 1422 finfo->lio = lio; 1423 finfo->skb = skb; 1424 finfo->sc = NULL; 1425 1426 /* Prepare the attributes for the data to be passed to OSI. */ 1427 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 1428 1429 ndata.buf = finfo; 1430 1431 ndata.q_no = iq_no; 1432 1433 if (octnet_iq_is_full(oct, ndata.q_no)) { 1434 /* defer sending if queue is full */ 1435 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 1436 ndata.q_no); 1437 stats->tx_iq_busy++; 1438 return NETDEV_TX_BUSY; 1439 } 1440 1441 ndata.datasize = skb->len; 1442 1443 cmdsetup.u64 = 0; 1444 cmdsetup.s.iq_no = iq_no; 1445 1446 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1447 if (skb->encapsulation) { 1448 cmdsetup.s.tnl_csum = 1; 1449 stats->tx_vxlan++; 1450 } else { 1451 cmdsetup.s.transport_csum = 1; 1452 } 1453 } 1454 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1455 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1456 cmdsetup.s.timestamp = 1; 1457 } 1458 1459 if (!skb_shinfo(skb)->nr_frags) { 1460 cmdsetup.s.u.datasize = skb->len; 1461 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1462 /* Offload checksum calculation for TCP/UDP packets */ 1463 dptr = dma_map_single(&oct->pci_dev->dev, 1464 skb->data, 1465 skb->len, 1466 DMA_TO_DEVICE); 1467 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 1468 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 1469 __func__); 1470 return NETDEV_TX_BUSY; 1471 } 1472 1473 ndata.cmd.cmd3.dptr = dptr; 1474 finfo->dptr = dptr; 1475 ndata.reqtype = REQTYPE_NORESP_NET; 1476 1477 } else { 1478 struct skb_frag_struct *frag; 1479 struct octnic_gather *g; 1480 int i, frags; 1481 1482 spin_lock(&lio->glist_lock[q_idx]); 1483 g = (struct octnic_gather *) 1484 lio_list_delete_head(&lio->glist[q_idx]); 1485 spin_unlock(&lio->glist_lock[q_idx]); 1486 1487 if (!g) { 1488 netif_info(lio, tx_err, lio->netdev, 1489 "Transmit scatter gather: glist null!\n"); 1490 goto lio_xmit_failed; 1491 } 1492 1493 cmdsetup.s.gather = 1; 1494 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 1495 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1496 1497 memset(g->sg, 0, g->sg_size); 1498 1499 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 1500 skb->data, 1501 (skb->len - skb->data_len), 1502 DMA_TO_DEVICE); 1503 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 1504 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 1505 __func__); 1506 return NETDEV_TX_BUSY; 1507 } 1508 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 1509 1510 frags = skb_shinfo(skb)->nr_frags; 1511 i = 1; 1512 while (frags--) { 1513 frag = &skb_shinfo(skb)->frags[i - 1]; 1514 1515 g->sg[(i >> 2)].ptr[(i & 3)] = 1516 dma_map_page(&oct->pci_dev->dev, 1517 frag->page.p, 1518 frag->page_offset, 1519 frag->size, 1520 DMA_TO_DEVICE); 1521 if (dma_mapping_error(&oct->pci_dev->dev, 1522 g->sg[i >> 2].ptr[i & 3])) { 1523 dma_unmap_single(&oct->pci_dev->dev, 1524 g->sg[0].ptr[0], 1525 skb->len - skb->data_len, 1526 DMA_TO_DEVICE); 1527 for (j = 1; j < i; j++) { 1528 frag = &skb_shinfo(skb)->frags[j - 1]; 1529 dma_unmap_page(&oct->pci_dev->dev, 1530 g->sg[j >> 2].ptr[j & 3], 1531 frag->size, 1532 DMA_TO_DEVICE); 1533 } 1534 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 1535 __func__); 1536 return NETDEV_TX_BUSY; 1537 } 1538 1539 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 1540 i++; 1541 } 1542 1543 dptr = g->sg_dma_ptr; 1544 1545 ndata.cmd.cmd3.dptr = dptr; 1546 finfo->dptr = dptr; 1547 finfo->g = g; 1548 1549 ndata.reqtype = REQTYPE_NORESP_NET_SG; 1550 } 1551 1552 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 1553 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 1554 1555 if (skb_shinfo(skb)->gso_size) { 1556 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 1557 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 1558 } 1559 1560 /* HW insert VLAN tag */ 1561 if (skb_vlan_tag_present(skb)) { 1562 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 1563 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 1564 } 1565 1566 xmit_more = skb->xmit_more; 1567 1568 if (unlikely(cmdsetup.s.timestamp)) 1569 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 1570 else 1571 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 1572 if (status == IQ_SEND_FAILED) 1573 goto lio_xmit_failed; 1574 1575 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 1576 1577 if (status == IQ_SEND_STOP) { 1578 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 1579 iq_no); 1580 netif_stop_subqueue(netdev, q_idx); 1581 } 1582 1583 netif_trans_update(netdev); 1584 1585 if (tx_info->s.gso_segs) 1586 stats->tx_done += tx_info->s.gso_segs; 1587 else 1588 stats->tx_done++; 1589 stats->tx_tot_bytes += ndata.datasize; 1590 1591 return NETDEV_TX_OK; 1592 1593 lio_xmit_failed: 1594 stats->tx_dropped++; 1595 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 1596 iq_no, stats->tx_dropped); 1597 if (dptr) 1598 dma_unmap_single(&oct->pci_dev->dev, dptr, 1599 ndata.datasize, DMA_TO_DEVICE); 1600 1601 octeon_ring_doorbell_locked(oct, iq_no); 1602 1603 tx_buffer_free(skb); 1604 return NETDEV_TX_OK; 1605 } 1606 1607 /** \brief Network device Tx timeout 1608 * @param netdev pointer to network device 1609 */ 1610 static void liquidio_tx_timeout(struct net_device *netdev) 1611 { 1612 struct lio *lio; 1613 1614 lio = GET_LIO(netdev); 1615 1616 netif_info(lio, tx_err, lio->netdev, 1617 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 1618 netdev->stats.tx_dropped); 1619 netif_trans_update(netdev); 1620 wake_txqs(netdev); 1621 } 1622 1623 static int 1624 liquidio_vlan_rx_add_vid(struct net_device *netdev, 1625 __be16 proto __attribute__((unused)), u16 vid) 1626 { 1627 struct lio *lio = GET_LIO(netdev); 1628 struct octeon_device *oct = lio->oct_dev; 1629 struct octnic_ctrl_pkt nctrl; 1630 int ret = 0; 1631 1632 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1633 1634 nctrl.ncmd.u64 = 0; 1635 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 1636 nctrl.ncmd.s.param1 = vid; 1637 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1638 nctrl.netpndev = (u64)netdev; 1639 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1640 1641 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1642 if (ret) { 1643 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 1644 ret); 1645 return -EPERM; 1646 } 1647 1648 return 0; 1649 } 1650 1651 static int 1652 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 1653 __be16 proto __attribute__((unused)), u16 vid) 1654 { 1655 struct lio *lio = GET_LIO(netdev); 1656 struct octeon_device *oct = lio->oct_dev; 1657 struct octnic_ctrl_pkt nctrl; 1658 int ret = 0; 1659 1660 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1661 1662 nctrl.ncmd.u64 = 0; 1663 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 1664 nctrl.ncmd.s.param1 = vid; 1665 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1666 nctrl.netpndev = (u64)netdev; 1667 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1668 1669 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1670 if (ret) { 1671 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 1672 ret); 1673 if (ret > 0) 1674 ret = -EIO; 1675 } 1676 return ret; 1677 } 1678 1679 /** Sending command to enable/disable RX checksum offload 1680 * @param netdev pointer to network device 1681 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 1682 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 1683 * OCTNET_CMD_RXCSUM_DISABLE 1684 * @returns SUCCESS or FAILURE 1685 */ 1686 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 1687 u8 rx_cmd) 1688 { 1689 struct lio *lio = GET_LIO(netdev); 1690 struct octeon_device *oct = lio->oct_dev; 1691 struct octnic_ctrl_pkt nctrl; 1692 int ret = 0; 1693 1694 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1695 1696 nctrl.ncmd.u64 = 0; 1697 nctrl.ncmd.s.cmd = command; 1698 nctrl.ncmd.s.param1 = rx_cmd; 1699 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1700 nctrl.netpndev = (u64)netdev; 1701 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1702 1703 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1704 if (ret) { 1705 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 1706 ret); 1707 if (ret > 0) 1708 ret = -EIO; 1709 } 1710 return ret; 1711 } 1712 1713 /** Sending command to add/delete VxLAN UDP port to firmware 1714 * @param netdev pointer to network device 1715 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 1716 * @param vxlan_port VxLAN port to be added or deleted 1717 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 1718 * OCTNET_CMD_VXLAN_PORT_DEL 1719 * @returns SUCCESS or FAILURE 1720 */ 1721 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 1722 u16 vxlan_port, u8 vxlan_cmd_bit) 1723 { 1724 struct lio *lio = GET_LIO(netdev); 1725 struct octeon_device *oct = lio->oct_dev; 1726 struct octnic_ctrl_pkt nctrl; 1727 int ret = 0; 1728 1729 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1730 1731 nctrl.ncmd.u64 = 0; 1732 nctrl.ncmd.s.cmd = command; 1733 nctrl.ncmd.s.more = vxlan_cmd_bit; 1734 nctrl.ncmd.s.param1 = vxlan_port; 1735 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1736 nctrl.netpndev = (u64)netdev; 1737 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1738 1739 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1740 if (ret) { 1741 dev_err(&oct->pci_dev->dev, 1742 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 1743 ret); 1744 if (ret > 0) 1745 ret = -EIO; 1746 } 1747 return ret; 1748 } 1749 1750 /** \brief Net device fix features 1751 * @param netdev pointer to network device 1752 * @param request features requested 1753 * @returns updated features list 1754 */ 1755 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 1756 netdev_features_t request) 1757 { 1758 struct lio *lio = netdev_priv(netdev); 1759 1760 if ((request & NETIF_F_RXCSUM) && 1761 !(lio->dev_capability & NETIF_F_RXCSUM)) 1762 request &= ~NETIF_F_RXCSUM; 1763 1764 if ((request & NETIF_F_HW_CSUM) && 1765 !(lio->dev_capability & NETIF_F_HW_CSUM)) 1766 request &= ~NETIF_F_HW_CSUM; 1767 1768 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 1769 request &= ~NETIF_F_TSO; 1770 1771 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 1772 request &= ~NETIF_F_TSO6; 1773 1774 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 1775 request &= ~NETIF_F_LRO; 1776 1777 /* Disable LRO if RXCSUM is off */ 1778 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 1779 (lio->dev_capability & NETIF_F_LRO)) 1780 request &= ~NETIF_F_LRO; 1781 1782 return request; 1783 } 1784 1785 /** \brief Net device set features 1786 * @param netdev pointer to network device 1787 * @param features features to enable/disable 1788 */ 1789 static int liquidio_set_features(struct net_device *netdev, 1790 netdev_features_t features) 1791 { 1792 struct lio *lio = netdev_priv(netdev); 1793 1794 if (!((netdev->features ^ features) & NETIF_F_LRO)) 1795 return 0; 1796 1797 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 1798 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 1799 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1800 else if (!(features & NETIF_F_LRO) && 1801 (lio->dev_capability & NETIF_F_LRO)) 1802 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 1803 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1804 if (!(netdev->features & NETIF_F_RXCSUM) && 1805 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1806 (features & NETIF_F_RXCSUM)) 1807 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1808 OCTNET_CMD_RXCSUM_ENABLE); 1809 else if ((netdev->features & NETIF_F_RXCSUM) && 1810 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1811 !(features & NETIF_F_RXCSUM)) 1812 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1813 OCTNET_CMD_RXCSUM_DISABLE); 1814 1815 return 0; 1816 } 1817 1818 static void liquidio_add_vxlan_port(struct net_device *netdev, 1819 struct udp_tunnel_info *ti) 1820 { 1821 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 1822 return; 1823 1824 liquidio_vxlan_port_command(netdev, 1825 OCTNET_CMD_VXLAN_PORT_CONFIG, 1826 htons(ti->port), 1827 OCTNET_CMD_VXLAN_PORT_ADD); 1828 } 1829 1830 static void liquidio_del_vxlan_port(struct net_device *netdev, 1831 struct udp_tunnel_info *ti) 1832 { 1833 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 1834 return; 1835 1836 liquidio_vxlan_port_command(netdev, 1837 OCTNET_CMD_VXLAN_PORT_CONFIG, 1838 htons(ti->port), 1839 OCTNET_CMD_VXLAN_PORT_DEL); 1840 } 1841 1842 static const struct net_device_ops lionetdevops = { 1843 .ndo_open = liquidio_open, 1844 .ndo_stop = liquidio_stop, 1845 .ndo_start_xmit = liquidio_xmit, 1846 .ndo_get_stats64 = liquidio_get_stats64, 1847 .ndo_set_mac_address = liquidio_set_mac, 1848 .ndo_set_rx_mode = liquidio_set_mcast_list, 1849 .ndo_tx_timeout = liquidio_tx_timeout, 1850 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 1851 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 1852 .ndo_change_mtu = liquidio_change_mtu, 1853 .ndo_do_ioctl = liquidio_ioctl, 1854 .ndo_fix_features = liquidio_fix_features, 1855 .ndo_set_features = liquidio_set_features, 1856 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 1857 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 1858 }; 1859 1860 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 1861 { 1862 struct octeon_device *oct = (struct octeon_device *)buf; 1863 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 1864 union oct_link_status *ls; 1865 int gmxport = 0; 1866 int i; 1867 1868 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 1869 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 1870 recv_pkt->buffer_size[0], 1871 recv_pkt->rh.r_nic_info.gmxport); 1872 goto nic_info_err; 1873 } 1874 1875 gmxport = recv_pkt->rh.r_nic_info.gmxport; 1876 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 1877 OCT_DROQ_INFO_SIZE); 1878 1879 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 1880 1881 for (i = 0; i < oct->ifcount; i++) { 1882 if (oct->props[i].gmxport == gmxport) { 1883 update_link_status(oct->props[i].netdev, ls); 1884 break; 1885 } 1886 } 1887 1888 nic_info_err: 1889 for (i = 0; i < recv_pkt->buffer_count; i++) 1890 recv_buffer_free(recv_pkt->buffer_ptr[i]); 1891 octeon_free_recv_info(recv_info); 1892 return 0; 1893 } 1894 1895 /** 1896 * \brief Setup network interfaces 1897 * @param octeon_dev octeon device 1898 * 1899 * Called during init time for each device. It assumes the NIC 1900 * is already up and running. The link information for each 1901 * interface is passed in link_info. 1902 */ 1903 static int setup_nic_devices(struct octeon_device *octeon_dev) 1904 { 1905 int retval, num_iqueues, num_oqueues; 1906 u32 resp_size, data_size; 1907 struct liquidio_if_cfg_resp *resp; 1908 struct octeon_soft_command *sc; 1909 union oct_nic_if_cfg if_cfg; 1910 struct octdev_props *props; 1911 struct net_device *netdev; 1912 struct lio_version *vdata; 1913 struct lio *lio = NULL; 1914 u8 mac[ETH_ALEN], i, j; 1915 u32 ifidx_or_pfnum; 1916 1917 ifidx_or_pfnum = octeon_dev->pf_num; 1918 1919 /* This is to handle link status changes */ 1920 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 1921 lio_nic_info, octeon_dev); 1922 1923 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 1924 * They are handled directly. 1925 */ 1926 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 1927 free_netbuf); 1928 1929 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 1930 free_netsgbuf); 1931 1932 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 1933 free_netsgbuf_with_resp); 1934 1935 for (i = 0; i < octeon_dev->ifcount; i++) { 1936 resp_size = sizeof(struct liquidio_if_cfg_resp); 1937 data_size = sizeof(struct lio_version); 1938 sc = (struct octeon_soft_command *) 1939 octeon_alloc_soft_command(octeon_dev, data_size, 1940 resp_size, 0); 1941 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1942 vdata = (struct lio_version *)sc->virtdptr; 1943 1944 *((u64 *)vdata) = 0; 1945 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1946 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1947 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1948 1949 if_cfg.u64 = 0; 1950 1951 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 1952 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 1953 if_cfg.s.base_queue = 0; 1954 1955 sc->iq_no = 0; 1956 1957 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 1958 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 1959 0); 1960 1961 init_completion(&sc->complete); 1962 sc->sc_status = OCTEON_REQUEST_PENDING; 1963 1964 retval = octeon_send_soft_command(octeon_dev, sc); 1965 if (retval == IQ_SEND_FAILED) { 1966 dev_err(&octeon_dev->pci_dev->dev, 1967 "iq/oq config failed status: %x\n", retval); 1968 /* Soft instr is freed by driver in case of failure. */ 1969 octeon_free_soft_command(octeon_dev, sc); 1970 return(-EIO); 1971 } 1972 1973 /* Sleep on a wait queue till the cond flag indicates that the 1974 * response arrived or timed-out. 1975 */ 1976 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 1977 if (retval) 1978 return retval; 1979 1980 retval = resp->status; 1981 if (retval) { 1982 dev_err(&octeon_dev->pci_dev->dev, 1983 "iq/oq config failed, retval = %d\n", retval); 1984 WRITE_ONCE(sc->caller_is_done, true); 1985 return -EIO; 1986 } 1987 1988 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 1989 32, "%s", 1990 resp->cfg_info.liquidio_firmware_version); 1991 1992 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 1993 (sizeof(struct liquidio_if_cfg_info)) >> 3); 1994 1995 num_iqueues = hweight64(resp->cfg_info.iqmask); 1996 num_oqueues = hweight64(resp->cfg_info.oqmask); 1997 1998 if (!(num_iqueues) || !(num_oqueues)) { 1999 dev_err(&octeon_dev->pci_dev->dev, 2000 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2001 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2002 WRITE_ONCE(sc->caller_is_done, true); 2003 goto setup_nic_dev_done; 2004 } 2005 dev_dbg(&octeon_dev->pci_dev->dev, 2006 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2007 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2008 num_iqueues, num_oqueues); 2009 2010 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2011 2012 if (!netdev) { 2013 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2014 WRITE_ONCE(sc->caller_is_done, true); 2015 goto setup_nic_dev_done; 2016 } 2017 2018 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2019 2020 /* Associate the routines that will handle different 2021 * netdev tasks. 2022 */ 2023 netdev->netdev_ops = &lionetdevops; 2024 2025 lio = GET_LIO(netdev); 2026 2027 memset(lio, 0, sizeof(struct lio)); 2028 2029 lio->ifidx = ifidx_or_pfnum; 2030 2031 props = &octeon_dev->props[i]; 2032 props->gmxport = resp->cfg_info.linfo.gmxport; 2033 props->netdev = netdev; 2034 2035 lio->linfo.num_rxpciq = num_oqueues; 2036 lio->linfo.num_txpciq = num_iqueues; 2037 2038 for (j = 0; j < num_oqueues; j++) { 2039 lio->linfo.rxpciq[j].u64 = 2040 resp->cfg_info.linfo.rxpciq[j].u64; 2041 } 2042 for (j = 0; j < num_iqueues; j++) { 2043 lio->linfo.txpciq[j].u64 = 2044 resp->cfg_info.linfo.txpciq[j].u64; 2045 } 2046 2047 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2048 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2049 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2050 lio->linfo.macaddr_is_admin_asgnd = 2051 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2052 2053 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2054 2055 lio->dev_capability = NETIF_F_HIGHDMA 2056 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2057 | NETIF_F_SG | NETIF_F_RXCSUM 2058 | NETIF_F_TSO | NETIF_F_TSO6 2059 | NETIF_F_GRO 2060 | NETIF_F_LRO; 2061 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2062 2063 /* Copy of transmit encapsulation capabilities: 2064 * TSO, TSO6, Checksums for this device 2065 */ 2066 lio->enc_dev_capability = NETIF_F_IP_CSUM 2067 | NETIF_F_IPV6_CSUM 2068 | NETIF_F_GSO_UDP_TUNNEL 2069 | NETIF_F_HW_CSUM | NETIF_F_SG 2070 | NETIF_F_RXCSUM 2071 | NETIF_F_TSO | NETIF_F_TSO6 2072 | NETIF_F_LRO; 2073 2074 netdev->hw_enc_features = 2075 (lio->enc_dev_capability & ~NETIF_F_LRO); 2076 netdev->vlan_features = lio->dev_capability; 2077 /* Add any unchangeable hw features */ 2078 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2079 NETIF_F_HW_VLAN_CTAG_RX | 2080 NETIF_F_HW_VLAN_CTAG_TX; 2081 2082 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2083 2084 netdev->hw_features = lio->dev_capability; 2085 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 2086 2087 /* MTU range: 68 - 16000 */ 2088 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2089 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2090 2091 WRITE_ONCE(sc->caller_is_done, true); 2092 2093 /* Point to the properties for octeon device to which this 2094 * interface belongs. 2095 */ 2096 lio->oct_dev = octeon_dev; 2097 lio->octprops = props; 2098 lio->netdev = netdev; 2099 2100 dev_dbg(&octeon_dev->pci_dev->dev, 2101 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2102 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2103 2104 /* 64-bit swap required on LE machines */ 2105 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2106 for (j = 0; j < ETH_ALEN; j++) 2107 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2108 2109 /* Copy MAC Address to OS network device structure */ 2110 ether_addr_copy(netdev->dev_addr, mac); 2111 2112 if (liquidio_setup_io_queues(octeon_dev, i, 2113 lio->linfo.num_txpciq, 2114 lio->linfo.num_rxpciq)) { 2115 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2116 goto setup_nic_dev_free; 2117 } 2118 2119 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2120 2121 /* For VFs, enable Octeon device interrupts here, 2122 * as this is contingent upon IO queue setup 2123 */ 2124 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2125 OCTEON_ALL_INTR); 2126 2127 /* By default all interfaces on a single Octeon uses the same 2128 * tx and rx queues 2129 */ 2130 lio->txq = lio->linfo.txpciq[0].s.q_no; 2131 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2132 2133 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2134 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2135 2136 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 2137 dev_err(&octeon_dev->pci_dev->dev, 2138 "Gather list allocation failed\n"); 2139 goto setup_nic_dev_free; 2140 } 2141 2142 /* Register ethtool support */ 2143 liquidio_set_ethtool_ops(netdev); 2144 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2145 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2146 else 2147 octeon_dev->priv_flags = 0x0; 2148 2149 if (netdev->features & NETIF_F_LRO) 2150 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2151 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2152 2153 if (setup_link_status_change_wq(netdev)) 2154 goto setup_nic_dev_free; 2155 2156 if (setup_rx_oom_poll_fn(netdev)) 2157 goto setup_nic_dev_free; 2158 2159 /* Register the network device with the OS */ 2160 if (register_netdev(netdev)) { 2161 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 2162 goto setup_nic_dev_free; 2163 } 2164 2165 dev_dbg(&octeon_dev->pci_dev->dev, 2166 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 2167 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2168 netif_carrier_off(netdev); 2169 lio->link_changes++; 2170 2171 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 2172 2173 /* Sending command to firmware to enable Rx checksum offload 2174 * by default at the time of setup of Liquidio driver for 2175 * this device 2176 */ 2177 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2178 OCTNET_CMD_RXCSUM_ENABLE); 2179 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 2180 OCTNET_CMD_TXCSUM_ENABLE); 2181 2182 dev_dbg(&octeon_dev->pci_dev->dev, 2183 "NIC ifidx:%d Setup successful\n", i); 2184 2185 octeon_dev->no_speed_setting = 1; 2186 } 2187 2188 return 0; 2189 2190 setup_nic_dev_free: 2191 2192 while (i--) { 2193 dev_err(&octeon_dev->pci_dev->dev, 2194 "NIC ifidx:%d Setup failed\n", i); 2195 liquidio_destroy_nic_device(octeon_dev, i); 2196 } 2197 2198 setup_nic_dev_done: 2199 2200 return -ENODEV; 2201 } 2202 2203 /** 2204 * \brief initialize the NIC 2205 * @param oct octeon device 2206 * 2207 * This initialization routine is called once the Octeon device application is 2208 * up and running 2209 */ 2210 static int liquidio_init_nic_module(struct octeon_device *oct) 2211 { 2212 int num_nic_ports = 1; 2213 int i, retval = 0; 2214 2215 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 2216 2217 /* only default iq and oq were initialized 2218 * initialize the rest as well run port_config command for each port 2219 */ 2220 oct->ifcount = num_nic_ports; 2221 memset(oct->props, 0, 2222 sizeof(struct octdev_props) * num_nic_ports); 2223 2224 for (i = 0; i < MAX_OCTEON_LINKS; i++) 2225 oct->props[i].gmxport = -1; 2226 2227 retval = setup_nic_devices(oct); 2228 if (retval) { 2229 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 2230 goto octnet_init_failure; 2231 } 2232 2233 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 2234 2235 return retval; 2236 2237 octnet_init_failure: 2238 2239 oct->ifcount = 0; 2240 2241 return retval; 2242 } 2243 2244 /** 2245 * \brief Device initialization for each Octeon device that is probed 2246 * @param octeon_dev octeon device 2247 */ 2248 static int octeon_device_init(struct octeon_device *oct) 2249 { 2250 u32 rev_id; 2251 int j; 2252 2253 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 2254 2255 /* Enable access to the octeon device and make its DMA capability 2256 * known to the OS. 2257 */ 2258 if (octeon_pci_os_setup(oct)) 2259 return 1; 2260 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 2261 2262 oct->chip_id = OCTEON_CN23XX_VF_VID; 2263 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 2264 oct->rev_id = rev_id & 0xff; 2265 2266 if (cn23xx_setup_octeon_vf_device(oct)) 2267 return 1; 2268 2269 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 2270 2271 oct->app_mode = CVM_DRV_NIC_APP; 2272 2273 /* Initialize the dispatch mechanism used to push packets arriving on 2274 * Octeon Output queues. 2275 */ 2276 if (octeon_init_dispatch_list(oct)) 2277 return 1; 2278 2279 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 2280 2281 if (octeon_set_io_queues_off(oct)) { 2282 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 2283 return 1; 2284 } 2285 2286 if (oct->fn_list.setup_device_regs(oct)) { 2287 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 2288 return 1; 2289 } 2290 2291 /* Initialize soft command buffer pool */ 2292 if (octeon_setup_sc_buffer_pool(oct)) { 2293 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 2294 return 1; 2295 } 2296 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 2297 2298 /* Setup the data structures that manage this Octeon's Input queues. */ 2299 if (octeon_setup_instr_queues(oct)) { 2300 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 2301 return 1; 2302 } 2303 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 2304 2305 /* Initialize lists to manage the requests of different types that 2306 * arrive from user & kernel applications for this octeon device. 2307 */ 2308 if (octeon_setup_response_list(oct)) { 2309 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 2310 return 1; 2311 } 2312 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 2313 2314 if (octeon_setup_output_queues(oct)) { 2315 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 2316 return 1; 2317 } 2318 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 2319 2320 if (oct->fn_list.setup_mbox(oct)) { 2321 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 2322 return 1; 2323 } 2324 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 2325 2326 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { 2327 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 2328 return 1; 2329 } 2330 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 2331 2332 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n", 2333 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); 2334 2335 /* Setup the interrupt handler and record the INT SUM register address*/ 2336 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) 2337 return 1; 2338 2339 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 2340 2341 /* *************************************************************** 2342 * The interrupts need to be enabled for the PF<-->VF handshake. 2343 * They are [re]-enabled after the PF<-->VF handshake so that the 2344 * correct OQ tick value is used (i.e. the value retrieved from 2345 * the PF as part of the handshake). 2346 */ 2347 2348 /* Enable Octeon device interrupts */ 2349 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2350 2351 if (cn23xx_octeon_pfvf_handshake(oct)) 2352 return 1; 2353 2354 /* Here we [re]-enable the interrupts so that the correct OQ tick value 2355 * is used (i.e. the value that was retrieved during the handshake) 2356 */ 2357 2358 /* Enable Octeon device interrupts */ 2359 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2360 /* *************************************************************** */ 2361 2362 /* Enable the input and output queues for this Octeon device */ 2363 if (oct->fn_list.enable_io_queues(oct)) { 2364 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 2365 return 1; 2366 } 2367 2368 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 2369 2370 atomic_set(&oct->status, OCT_DEV_HOST_OK); 2371 2372 /* Send Credit for Octeon Output queues. Credits are always sent after 2373 * the output queue is enabled. 2374 */ 2375 for (j = 0; j < oct->num_oqs; j++) 2376 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 2377 2378 /* Packets can start arriving on the output queues from this point. */ 2379 2380 atomic_set(&oct->status, OCT_DEV_CORE_OK); 2381 2382 atomic_set(&oct->status, OCT_DEV_RUNNING); 2383 2384 if (liquidio_init_nic_module(oct)) 2385 return 1; 2386 2387 return 0; 2388 } 2389 2390 static int __init liquidio_vf_init(void) 2391 { 2392 octeon_init_device_list(0); 2393 return pci_register_driver(&liquidio_vf_pci_driver); 2394 } 2395 2396 static void __exit liquidio_vf_exit(void) 2397 { 2398 pci_unregister_driver(&liquidio_vf_pci_driver); 2399 2400 pr_info("LiquidIO_VF network module is now unloaded\n"); 2401 } 2402 2403 module_init(liquidio_vf_init); 2404 module_exit(liquidio_vf_exit); 2405