1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION(LIQUIDIO_VERSION); 36 37 static int debug = -1; 38 module_param(debug, int, 0644); 39 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 40 41 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 42 43 struct liquidio_if_cfg_context { 44 int octeon_id; 45 46 wait_queue_head_t wc; 47 48 int cond; 49 }; 50 51 struct liquidio_if_cfg_resp { 52 u64 rh; 53 struct liquidio_if_cfg_info cfg_info; 54 u64 status; 55 }; 56 57 struct liquidio_rx_ctl_context { 58 int octeon_id; 59 60 wait_queue_head_t wc; 61 62 int cond; 63 }; 64 65 struct oct_timestamp_resp { 66 u64 rh; 67 u64 timestamp; 68 u64 status; 69 }; 70 71 union tx_info { 72 u64 u64; 73 struct { 74 #ifdef __BIG_ENDIAN_BITFIELD 75 u16 gso_size; 76 u16 gso_segs; 77 u32 reserved; 78 #else 79 u32 reserved; 80 u16 gso_segs; 81 u16 gso_size; 82 #endif 83 } s; 84 }; 85 86 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS) 87 88 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 89 #define OCTNIC_GSO_MAX_SIZE \ 90 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 91 92 struct octnic_gather { 93 /* List manipulation. Next and prev pointers. */ 94 struct list_head list; 95 96 /* Size of the gather component at sg in bytes. */ 97 int sg_size; 98 99 /* Number of bytes that sg was adjusted to make it 8B-aligned. */ 100 int adjust; 101 102 /* Gather component that can accommodate max sized fragment list 103 * received from the IP layer. 104 */ 105 struct octeon_sg_entry *sg; 106 107 dma_addr_t sg_dma_ptr; 108 }; 109 110 struct octeon_device_priv { 111 /* Tasklet structures for this device. */ 112 struct tasklet_struct droq_tasklet; 113 unsigned long napi_mask; 114 }; 115 116 static int 117 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 118 static void liquidio_vf_remove(struct pci_dev *pdev); 119 static int octeon_device_init(struct octeon_device *oct); 120 static int liquidio_stop(struct net_device *netdev); 121 122 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 123 { 124 struct octeon_device_priv *oct_priv = 125 (struct octeon_device_priv *)oct->priv; 126 int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT; 127 int pkt_cnt = 0, pending_pkts; 128 int i; 129 130 do { 131 pending_pkts = 0; 132 133 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 134 if (!(oct->io_qmask.oq & BIT_ULL(i))) 135 continue; 136 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 137 } 138 if (pkt_cnt > 0) { 139 pending_pkts += pkt_cnt; 140 tasklet_schedule(&oct_priv->droq_tasklet); 141 } 142 pkt_cnt = 0; 143 schedule_timeout_uninterruptible(1); 144 145 } while (retry-- && pending_pkts); 146 147 return pkt_cnt; 148 } 149 150 /** 151 * \brief wait for all pending requests to complete 152 * @param oct Pointer to Octeon device 153 * 154 * Called during shutdown sequence 155 */ 156 static int wait_for_pending_requests(struct octeon_device *oct) 157 { 158 int i, pcount = 0; 159 160 for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) { 161 pcount = atomic_read( 162 &oct->response_list[OCTEON_ORDERED_SC_LIST] 163 .pending_req_count); 164 if (pcount) 165 schedule_timeout_uninterruptible(HZ / 10); 166 else 167 break; 168 } 169 170 if (pcount) 171 return 1; 172 173 return 0; 174 } 175 176 /** 177 * \brief Cause device to go quiet so it can be safely removed/reset/etc 178 * @param oct Pointer to Octeon device 179 */ 180 static void pcierror_quiesce_device(struct octeon_device *oct) 181 { 182 int i; 183 184 /* Disable the input and output queues now. No more packets will 185 * arrive from Octeon, but we should wait for all packet processing 186 * to finish. 187 */ 188 189 /* To allow for in-flight requests */ 190 schedule_timeout_uninterruptible(100); 191 192 if (wait_for_pending_requests(oct)) 193 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 194 195 /* Force all requests waiting to be fetched by OCTEON to complete. */ 196 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 197 struct octeon_instr_queue *iq; 198 199 if (!(oct->io_qmask.iq & BIT_ULL(i))) 200 continue; 201 iq = oct->instr_queue[i]; 202 203 if (atomic_read(&iq->instr_pending)) { 204 spin_lock_bh(&iq->lock); 205 iq->fill_cnt = 0; 206 iq->octeon_read_index = iq->host_write_index; 207 iq->stats.instr_processed += 208 atomic_read(&iq->instr_pending); 209 lio_process_iq_request_list(oct, iq, 0); 210 spin_unlock_bh(&iq->lock); 211 } 212 } 213 214 /* Force all pending ordered list requests to time out. */ 215 lio_process_ordered_list(oct, 1); 216 217 /* We do not need to wait for output queue packets to be processed. */ 218 } 219 220 /** 221 * \brief Cleanup PCI AER uncorrectable error status 222 * @param dev Pointer to PCI device 223 */ 224 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 225 { 226 u32 status, mask; 227 int pos = 0x100; 228 229 pr_info("%s :\n", __func__); 230 231 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 232 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 233 if (dev->error_state == pci_channel_io_normal) 234 status &= ~mask; /* Clear corresponding nonfatal bits */ 235 else 236 status &= mask; /* Clear corresponding fatal bits */ 237 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 238 } 239 240 /** 241 * \brief Stop all PCI IO to a given device 242 * @param dev Pointer to Octeon device 243 */ 244 static void stop_pci_io(struct octeon_device *oct) 245 { 246 struct msix_entry *msix_entries; 247 int i; 248 249 /* No more instructions will be forwarded. */ 250 atomic_set(&oct->status, OCT_DEV_IN_RESET); 251 252 for (i = 0; i < oct->ifcount; i++) 253 netif_device_detach(oct->props[i].netdev); 254 255 /* Disable interrupts */ 256 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 257 258 pcierror_quiesce_device(oct); 259 if (oct->msix_on) { 260 msix_entries = (struct msix_entry *)oct->msix_entries; 261 for (i = 0; i < oct->num_msix_irqs; i++) { 262 /* clear the affinity_cpumask */ 263 irq_set_affinity_hint(msix_entries[i].vector, 264 NULL); 265 free_irq(msix_entries[i].vector, 266 &oct->ioq_vector[i]); 267 } 268 pci_disable_msix(oct->pci_dev); 269 kfree(oct->msix_entries); 270 oct->msix_entries = NULL; 271 octeon_free_ioq_vector(oct); 272 } 273 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 274 lio_get_state_string(&oct->status)); 275 276 /* making it a common function for all OCTEON models */ 277 cleanup_aer_uncorrect_error_status(oct->pci_dev); 278 279 pci_disable_device(oct->pci_dev); 280 } 281 282 /** 283 * \brief called when PCI error is detected 284 * @param pdev Pointer to PCI device 285 * @param state The current pci connection state 286 * 287 * This function is called after a PCI bus error affecting 288 * this device has been detected. 289 */ 290 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 291 pci_channel_state_t state) 292 { 293 struct octeon_device *oct = pci_get_drvdata(pdev); 294 295 /* Non-correctable Non-fatal errors */ 296 if (state == pci_channel_io_normal) { 297 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 298 cleanup_aer_uncorrect_error_status(oct->pci_dev); 299 return PCI_ERS_RESULT_CAN_RECOVER; 300 } 301 302 /* Non-correctable Fatal errors */ 303 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 304 stop_pci_io(oct); 305 306 return PCI_ERS_RESULT_DISCONNECT; 307 } 308 309 /* For PCI-E Advanced Error Recovery (AER) Interface */ 310 static const struct pci_error_handlers liquidio_vf_err_handler = { 311 .error_detected = liquidio_pcie_error_detected, 312 }; 313 314 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 315 { 316 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 317 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 318 }, 319 { 320 0, 0, 0, 0, 0, 0, 0 321 } 322 }; 323 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 324 325 static struct pci_driver liquidio_vf_pci_driver = { 326 .name = "LiquidIO_VF", 327 .id_table = liquidio_vf_pci_tbl, 328 .probe = liquidio_vf_probe, 329 .remove = liquidio_vf_remove, 330 .err_handler = &liquidio_vf_err_handler, /* For AER */ 331 }; 332 333 /** 334 * \brief Stop Tx queues 335 * @param netdev network device 336 */ 337 static void txqs_stop(struct net_device *netdev) 338 { 339 if (netif_is_multiqueue(netdev)) { 340 int i; 341 342 for (i = 0; i < netdev->num_tx_queues; i++) 343 netif_stop_subqueue(netdev, i); 344 } else { 345 netif_stop_queue(netdev); 346 } 347 } 348 349 /** 350 * \brief Start Tx queues 351 * @param netdev network device 352 */ 353 static void txqs_start(struct net_device *netdev) 354 { 355 if (netif_is_multiqueue(netdev)) { 356 int i; 357 358 for (i = 0; i < netdev->num_tx_queues; i++) 359 netif_start_subqueue(netdev, i); 360 } else { 361 netif_start_queue(netdev); 362 } 363 } 364 365 /** 366 * \brief Wake Tx queues 367 * @param netdev network device 368 */ 369 static void txqs_wake(struct net_device *netdev) 370 { 371 struct lio *lio = GET_LIO(netdev); 372 373 if (netif_is_multiqueue(netdev)) { 374 int i; 375 376 for (i = 0; i < netdev->num_tx_queues; i++) { 377 int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)] 378 .s.q_no; 379 if (__netif_subqueue_stopped(netdev, i)) { 380 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, 381 tx_restart, 1); 382 netif_wake_subqueue(netdev, i); 383 } 384 } 385 } else { 386 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, 387 tx_restart, 1); 388 netif_wake_queue(netdev); 389 } 390 } 391 392 /** 393 * \brief Start Tx queue 394 * @param netdev network device 395 */ 396 static void start_txq(struct net_device *netdev) 397 { 398 struct lio *lio = GET_LIO(netdev); 399 400 if (lio->linfo.link.s.link_up) { 401 txqs_start(netdev); 402 return; 403 } 404 } 405 406 /** 407 * \brief Wake a queue 408 * @param netdev network device 409 * @param q which queue to wake 410 */ 411 static void wake_q(struct net_device *netdev, int q) 412 { 413 if (netif_is_multiqueue(netdev)) 414 netif_wake_subqueue(netdev, q); 415 else 416 netif_wake_queue(netdev); 417 } 418 419 /** 420 * \brief Stop a queue 421 * @param netdev network device 422 * @param q which queue to stop 423 */ 424 static void stop_q(struct net_device *netdev, int q) 425 { 426 if (netif_is_multiqueue(netdev)) 427 netif_stop_subqueue(netdev, q); 428 else 429 netif_stop_queue(netdev); 430 } 431 432 /** 433 * Remove the node at the head of the list. The list would be empty at 434 * the end of this call if there are no more nodes in the list. 435 */ 436 static struct list_head *list_delete_head(struct list_head *root) 437 { 438 struct list_head *node; 439 440 if ((root->prev == root) && (root->next == root)) 441 node = NULL; 442 else 443 node = root->next; 444 445 if (node) 446 list_del(node); 447 448 return node; 449 } 450 451 /** 452 * \brief Delete gather lists 453 * @param lio per-network private data 454 */ 455 static void delete_glists(struct lio *lio) 456 { 457 struct octnic_gather *g; 458 int i; 459 460 kfree(lio->glist_lock); 461 lio->glist_lock = NULL; 462 463 if (!lio->glist) 464 return; 465 466 for (i = 0; i < lio->linfo.num_txpciq; i++) { 467 do { 468 g = (struct octnic_gather *) 469 list_delete_head(&lio->glist[i]); 470 if (g) 471 kfree(g); 472 } while (g); 473 474 if (lio->glists_virt_base && lio->glists_virt_base[i] && 475 lio->glists_dma_base && lio->glists_dma_base[i]) { 476 lio_dma_free(lio->oct_dev, 477 lio->glist_entry_size * lio->tx_qsize, 478 lio->glists_virt_base[i], 479 lio->glists_dma_base[i]); 480 } 481 } 482 483 kfree(lio->glists_virt_base); 484 lio->glists_virt_base = NULL; 485 486 kfree(lio->glists_dma_base); 487 lio->glists_dma_base = NULL; 488 489 kfree(lio->glist); 490 lio->glist = NULL; 491 } 492 493 /** 494 * \brief Setup gather lists 495 * @param lio per-network private data 496 */ 497 static int setup_glists(struct lio *lio, int num_iqs) 498 { 499 struct octnic_gather *g; 500 int i, j; 501 502 lio->glist_lock = 503 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); 504 if (!lio->glist_lock) 505 return -ENOMEM; 506 507 lio->glist = 508 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); 509 if (!lio->glist) { 510 kfree(lio->glist_lock); 511 lio->glist_lock = NULL; 512 return -ENOMEM; 513 } 514 515 lio->glist_entry_size = 516 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); 517 518 /* allocate memory to store virtual and dma base address of 519 * per glist consistent memory 520 */ 521 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), 522 GFP_KERNEL); 523 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), 524 GFP_KERNEL); 525 526 if (!lio->glists_virt_base || !lio->glists_dma_base) { 527 delete_glists(lio); 528 return -ENOMEM; 529 } 530 531 for (i = 0; i < num_iqs; i++) { 532 spin_lock_init(&lio->glist_lock[i]); 533 534 INIT_LIST_HEAD(&lio->glist[i]); 535 536 lio->glists_virt_base[i] = 537 lio_dma_alloc(lio->oct_dev, 538 lio->glist_entry_size * lio->tx_qsize, 539 &lio->glists_dma_base[i]); 540 541 if (!lio->glists_virt_base[i]) { 542 delete_glists(lio); 543 return -ENOMEM; 544 } 545 546 for (j = 0; j < lio->tx_qsize; j++) { 547 g = kzalloc(sizeof(*g), GFP_KERNEL); 548 if (!g) 549 break; 550 551 g->sg = lio->glists_virt_base[i] + 552 (j * lio->glist_entry_size); 553 554 g->sg_dma_ptr = lio->glists_dma_base[i] + 555 (j * lio->glist_entry_size); 556 557 list_add_tail(&g->list, &lio->glist[i]); 558 } 559 560 if (j != lio->tx_qsize) { 561 delete_glists(lio); 562 return -ENOMEM; 563 } 564 } 565 566 return 0; 567 } 568 569 /** 570 * \brief Print link information 571 * @param netdev network device 572 */ 573 static void print_link_info(struct net_device *netdev) 574 { 575 struct lio *lio = GET_LIO(netdev); 576 577 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { 578 struct oct_link_info *linfo = &lio->linfo; 579 580 if (linfo->link.s.link_up) { 581 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 582 linfo->link.s.speed, 583 (linfo->link.s.duplex) ? "Full" : "Half"); 584 } else { 585 netif_info(lio, link, lio->netdev, "Link Down\n"); 586 } 587 } 588 } 589 590 /** 591 * \brief Routine to notify MTU change 592 * @param work work_struct data structure 593 */ 594 static void octnet_link_status_change(struct work_struct *work) 595 { 596 struct cavium_wk *wk = (struct cavium_wk *)work; 597 struct lio *lio = (struct lio *)wk->ctxptr; 598 599 rtnl_lock(); 600 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); 601 rtnl_unlock(); 602 } 603 604 /** 605 * \brief Sets up the mtu status change work 606 * @param netdev network device 607 */ 608 static int setup_link_status_change_wq(struct net_device *netdev) 609 { 610 struct lio *lio = GET_LIO(netdev); 611 struct octeon_device *oct = lio->oct_dev; 612 613 lio->link_status_wq.wq = alloc_workqueue("link-status", 614 WQ_MEM_RECLAIM, 0); 615 if (!lio->link_status_wq.wq) { 616 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 617 return -1; 618 } 619 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 620 octnet_link_status_change); 621 lio->link_status_wq.wk.ctxptr = lio; 622 623 return 0; 624 } 625 626 static void cleanup_link_status_change_wq(struct net_device *netdev) 627 { 628 struct lio *lio = GET_LIO(netdev); 629 630 if (lio->link_status_wq.wq) { 631 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 632 destroy_workqueue(lio->link_status_wq.wq); 633 } 634 } 635 636 /** 637 * \brief Update link status 638 * @param netdev network device 639 * @param ls link status structure 640 * 641 * Called on receipt of a link status response from the core application to 642 * update each interface's link status. 643 */ 644 static void update_link_status(struct net_device *netdev, 645 union oct_link_status *ls) 646 { 647 struct lio *lio = GET_LIO(netdev); 648 struct octeon_device *oct = lio->oct_dev; 649 650 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 651 lio->linfo.link.u64 = ls->u64; 652 653 print_link_info(netdev); 654 lio->link_changes++; 655 656 if (lio->linfo.link.s.link_up) { 657 netif_carrier_on(netdev); 658 txqs_wake(netdev); 659 } else { 660 netif_carrier_off(netdev); 661 txqs_stop(netdev); 662 } 663 664 if (lio->linfo.link.s.mtu < netdev->mtu) { 665 dev_warn(&oct->pci_dev->dev, 666 "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n", 667 netdev->mtu, lio->linfo.link.s.mtu); 668 lio->mtu = lio->linfo.link.s.mtu; 669 netdev->mtu = lio->linfo.link.s.mtu; 670 queue_delayed_work(lio->link_status_wq.wq, 671 &lio->link_status_wq.wk.work, 0); 672 } 673 } 674 } 675 676 static void update_txq_status(struct octeon_device *oct, int iq_num) 677 { 678 struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; 679 struct net_device *netdev; 680 struct lio *lio; 681 682 netdev = oct->props[iq->ifidx].netdev; 683 lio = GET_LIO(netdev); 684 if (netif_is_multiqueue(netdev)) { 685 if (__netif_subqueue_stopped(netdev, iq->q_index) && 686 lio->linfo.link.s.link_up && 687 (!octnet_iq_is_full(oct, iq_num))) { 688 netif_wake_subqueue(netdev, iq->q_index); 689 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, 690 tx_restart, 1); 691 } 692 } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up && 693 (!octnet_iq_is_full(oct, lio->txq))) { 694 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, 695 lio->txq, tx_restart, 1); 696 netif_wake_queue(netdev); 697 } 698 } 699 700 static 701 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) 702 { 703 struct octeon_device *oct = droq->oct_dev; 704 struct octeon_device_priv *oct_priv = 705 (struct octeon_device_priv *)oct->priv; 706 707 if (droq->ops.poll_mode) { 708 droq->ops.napi_fn(droq); 709 } else { 710 if (ret & MSIX_PO_INT) { 711 dev_err(&oct->pci_dev->dev, 712 "should not come here should not get rx when poll mode = 0 for vf\n"); 713 tasklet_schedule(&oct_priv->droq_tasklet); 714 return 1; 715 } 716 /* this will be flushed periodically by check iq db */ 717 if (ret & MSIX_PI_INT) 718 return 0; 719 } 720 return 0; 721 } 722 723 static irqreturn_t 724 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) 725 { 726 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; 727 struct octeon_device *oct = ioq_vector->oct_dev; 728 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; 729 u64 ret; 730 731 ret = oct->fn_list.msix_interrupt_handler(ioq_vector); 732 733 if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) 734 liquidio_schedule_msix_droq_pkt_handler(droq, ret); 735 736 return IRQ_HANDLED; 737 } 738 739 /** 740 * \brief Setup interrupt for octeon device 741 * @param oct octeon device 742 * 743 * Enable interrupt in Octeon device as given in the PCI interrupt mask. 744 */ 745 static int octeon_setup_interrupt(struct octeon_device *oct) 746 { 747 struct msix_entry *msix_entries; 748 char *queue_irq_names = NULL; 749 int num_alloc_ioq_vectors; 750 int num_ioq_vectors; 751 int irqret; 752 int i; 753 754 if (oct->msix_on) { 755 oct->num_msix_irqs = oct->sriov_info.rings_per_vf; 756 757 /* allocate storage for the names assigned to each irq */ 758 oct->irq_name_storage = 759 kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ, 760 GFP_KERNEL); 761 if (!oct->irq_name_storage) { 762 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); 763 return -ENOMEM; 764 } 765 766 queue_irq_names = oct->irq_name_storage; 767 768 oct->msix_entries = kcalloc( 769 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); 770 if (!oct->msix_entries) { 771 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); 772 kfree(oct->irq_name_storage); 773 oct->irq_name_storage = NULL; 774 return -ENOMEM; 775 } 776 777 msix_entries = (struct msix_entry *)oct->msix_entries; 778 779 for (i = 0; i < oct->num_msix_irqs; i++) 780 msix_entries[i].entry = i; 781 num_alloc_ioq_vectors = pci_enable_msix_range( 782 oct->pci_dev, msix_entries, 783 oct->num_msix_irqs, 784 oct->num_msix_irqs); 785 if (num_alloc_ioq_vectors < 0) { 786 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); 787 kfree(oct->msix_entries); 788 oct->msix_entries = NULL; 789 kfree(oct->irq_name_storage); 790 oct->irq_name_storage = NULL; 791 return num_alloc_ioq_vectors; 792 } 793 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); 794 795 num_ioq_vectors = oct->num_msix_irqs; 796 797 for (i = 0; i < num_ioq_vectors; i++) { 798 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, 799 "LiquidIO%u-vf%u-rxtx-%u", 800 oct->octeon_id, oct->vf_num, i); 801 802 irqret = request_irq(msix_entries[i].vector, 803 liquidio_msix_intr_handler, 0, 804 &queue_irq_names[IRQ_NAME_OFF(i)], 805 &oct->ioq_vector[i]); 806 if (irqret) { 807 dev_err(&oct->pci_dev->dev, 808 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", 809 irqret); 810 811 while (i) { 812 i--; 813 irq_set_affinity_hint( 814 msix_entries[i].vector, NULL); 815 free_irq(msix_entries[i].vector, 816 &oct->ioq_vector[i]); 817 } 818 pci_disable_msix(oct->pci_dev); 819 kfree(oct->msix_entries); 820 oct->msix_entries = NULL; 821 kfree(oct->irq_name_storage); 822 oct->irq_name_storage = NULL; 823 return irqret; 824 } 825 oct->ioq_vector[i].vector = msix_entries[i].vector; 826 /* assign the cpu mask for this msix interrupt vector */ 827 irq_set_affinity_hint( 828 msix_entries[i].vector, 829 (&oct->ioq_vector[i].affinity_mask)); 830 } 831 dev_dbg(&oct->pci_dev->dev, 832 "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id); 833 } 834 return 0; 835 } 836 837 /** 838 * \brief PCI probe handler 839 * @param pdev PCI device structure 840 * @param ent unused 841 */ 842 static int 843 liquidio_vf_probe(struct pci_dev *pdev, 844 const struct pci_device_id *ent __attribute__((unused))) 845 { 846 struct octeon_device *oct_dev = NULL; 847 848 oct_dev = octeon_allocate_device(pdev->device, 849 sizeof(struct octeon_device_priv)); 850 851 if (!oct_dev) { 852 dev_err(&pdev->dev, "Unable to allocate device\n"); 853 return -ENOMEM; 854 } 855 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 856 857 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 858 (u32)pdev->vendor, (u32)pdev->device); 859 860 /* Assign octeon_device for this device to the private data area. */ 861 pci_set_drvdata(pdev, oct_dev); 862 863 /* set linux specific device pointer */ 864 oct_dev->pci_dev = pdev; 865 866 if (octeon_device_init(oct_dev)) { 867 liquidio_vf_remove(pdev); 868 return -ENOMEM; 869 } 870 871 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 872 873 return 0; 874 } 875 876 /** 877 * \brief PCI FLR for each Octeon device. 878 * @param oct octeon device 879 */ 880 static void octeon_pci_flr(struct octeon_device *oct) 881 { 882 u16 status; 883 884 pci_save_state(oct->pci_dev); 885 886 pci_cfg_access_lock(oct->pci_dev); 887 888 /* Quiesce the device completely */ 889 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 890 PCI_COMMAND_INTX_DISABLE); 891 892 /* Wait for Transaction Pending bit clean */ 893 msleep(100); 894 pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status); 895 if (status & PCI_EXP_DEVSTA_TRPND) { 896 dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n"); 897 ssleep(5); 898 pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, 899 &status); 900 if (status & PCI_EXP_DEVSTA_TRPND) 901 dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n"); 902 } 903 pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL, 904 PCI_EXP_DEVCTL_BCR_FLR); 905 mdelay(100); 906 907 pci_cfg_access_unlock(oct->pci_dev); 908 909 pci_restore_state(oct->pci_dev); 910 } 911 912 /** 913 *\brief Destroy resources associated with octeon device 914 * @param pdev PCI device structure 915 * @param ent unused 916 */ 917 static void octeon_destroy_resources(struct octeon_device *oct) 918 { 919 struct msix_entry *msix_entries; 920 int i; 921 922 switch (atomic_read(&oct->status)) { 923 case OCT_DEV_RUNNING: 924 case OCT_DEV_CORE_OK: 925 /* No more instructions will be forwarded. */ 926 atomic_set(&oct->status, OCT_DEV_IN_RESET); 927 928 oct->app_mode = CVM_DRV_INVALID_APP; 929 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 930 lio_get_state_string(&oct->status)); 931 932 schedule_timeout_uninterruptible(HZ / 10); 933 934 /* fallthrough */ 935 case OCT_DEV_HOST_OK: 936 /* fallthrough */ 937 case OCT_DEV_IO_QUEUES_DONE: 938 if (wait_for_pending_requests(oct)) 939 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 940 941 if (lio_wait_for_instr_fetch(oct)) 942 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 943 944 /* Disable the input and output queues now. No more packets will 945 * arrive from Octeon, but we should wait for all packet 946 * processing to finish. 947 */ 948 oct->fn_list.disable_io_queues(oct); 949 950 if (lio_wait_for_oq_pkts(oct)) 951 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 952 953 case OCT_DEV_INTR_SET_DONE: 954 /* Disable interrupts */ 955 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 956 957 if (oct->msix_on) { 958 msix_entries = (struct msix_entry *)oct->msix_entries; 959 for (i = 0; i < oct->num_msix_irqs; i++) { 960 irq_set_affinity_hint(msix_entries[i].vector, 961 NULL); 962 free_irq(msix_entries[i].vector, 963 &oct->ioq_vector[i]); 964 } 965 pci_disable_msix(oct->pci_dev); 966 kfree(oct->msix_entries); 967 oct->msix_entries = NULL; 968 kfree(oct->irq_name_storage); 969 oct->irq_name_storage = NULL; 970 } 971 /* Soft reset the octeon device before exiting */ 972 if (oct->pci_dev->reset_fn) 973 octeon_pci_flr(oct); 974 else 975 cn23xx_vf_ask_pf_to_do_flr(oct); 976 977 /* fallthrough */ 978 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 979 octeon_free_ioq_vector(oct); 980 981 /* fallthrough */ 982 case OCT_DEV_MBOX_SETUP_DONE: 983 oct->fn_list.free_mbox(oct); 984 985 /* fallthrough */ 986 case OCT_DEV_IN_RESET: 987 case OCT_DEV_DROQ_INIT_DONE: 988 mdelay(100); 989 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 990 if (!(oct->io_qmask.oq & BIT_ULL(i))) 991 continue; 992 octeon_delete_droq(oct, i); 993 } 994 995 /* fallthrough */ 996 case OCT_DEV_RESP_LIST_INIT_DONE: 997 octeon_delete_response_list(oct); 998 999 /* fallthrough */ 1000 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1001 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1002 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1003 continue; 1004 octeon_delete_instr_queue(oct, i); 1005 } 1006 1007 /* fallthrough */ 1008 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1009 octeon_free_sc_buffer_pool(oct); 1010 1011 /* fallthrough */ 1012 case OCT_DEV_DISPATCH_INIT_DONE: 1013 octeon_delete_dispatch_list(oct); 1014 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1015 1016 /* fallthrough */ 1017 case OCT_DEV_PCI_MAP_DONE: 1018 octeon_unmap_pci_barx(oct, 0); 1019 octeon_unmap_pci_barx(oct, 1); 1020 1021 /* fallthrough */ 1022 case OCT_DEV_PCI_ENABLE_DONE: 1023 pci_clear_master(oct->pci_dev); 1024 /* Disable the device, releasing the PCI INT */ 1025 pci_disable_device(oct->pci_dev); 1026 1027 /* fallthrough */ 1028 case OCT_DEV_BEGIN_STATE: 1029 /* Nothing to be done here either */ 1030 break; 1031 } 1032 } 1033 1034 /** 1035 * \brief Callback for rx ctrl 1036 * @param status status of request 1037 * @param buf pointer to resp structure 1038 */ 1039 static void rx_ctl_callback(struct octeon_device *oct, 1040 u32 status, void *buf) 1041 { 1042 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1043 struct liquidio_rx_ctl_context *ctx; 1044 1045 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1046 1047 oct = lio_get_device(ctx->octeon_id); 1048 if (status) 1049 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", 1050 CVM_CAST64(status)); 1051 WRITE_ONCE(ctx->cond, 1); 1052 1053 /* This barrier is required to be sure that the response has been 1054 * written fully before waking up the handler 1055 */ 1056 wmb(); 1057 1058 wake_up_interruptible(&ctx->wc); 1059 } 1060 1061 /** 1062 * \brief Send Rx control command 1063 * @param lio per-network private data 1064 * @param start_stop whether to start or stop 1065 */ 1066 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1067 { 1068 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1069 int ctx_size = sizeof(struct liquidio_rx_ctl_context); 1070 struct liquidio_rx_ctl_context *ctx; 1071 struct octeon_soft_command *sc; 1072 union octnet_cmd *ncmd; 1073 int retval; 1074 1075 if (oct->props[lio->ifidx].rx_on == start_stop) 1076 return; 1077 1078 sc = (struct octeon_soft_command *) 1079 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1080 16, ctx_size); 1081 1082 ncmd = (union octnet_cmd *)sc->virtdptr; 1083 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1084 1085 WRITE_ONCE(ctx->cond, 0); 1086 ctx->octeon_id = lio_get_device_id(oct); 1087 init_waitqueue_head(&ctx->wc); 1088 1089 ncmd->u64 = 0; 1090 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1091 ncmd->s.param1 = start_stop; 1092 1093 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1094 1095 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1096 1097 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1098 OPCODE_NIC_CMD, 0, 0, 0); 1099 1100 sc->callback = rx_ctl_callback; 1101 sc->callback_arg = sc; 1102 sc->wait_time = 5000; 1103 1104 retval = octeon_send_soft_command(oct, sc); 1105 if (retval == IQ_SEND_FAILED) { 1106 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1107 } else { 1108 /* Sleep on a wait queue till the cond flag indicates that the 1109 * response arrived or timed-out. 1110 */ 1111 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) 1112 return; 1113 oct->props[lio->ifidx].rx_on = start_stop; 1114 } 1115 1116 octeon_free_soft_command(oct, sc); 1117 } 1118 1119 /** 1120 * \brief Destroy NIC device interface 1121 * @param oct octeon device 1122 * @param ifidx which interface to destroy 1123 * 1124 * Cleanup associated with each interface for an Octeon device when NIC 1125 * module is being unloaded or if initialization fails during load. 1126 */ 1127 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1128 { 1129 struct net_device *netdev = oct->props[ifidx].netdev; 1130 struct napi_struct *napi, *n; 1131 struct lio *lio; 1132 1133 if (!netdev) { 1134 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1135 __func__, ifidx); 1136 return; 1137 } 1138 1139 lio = GET_LIO(netdev); 1140 1141 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1142 1143 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1144 liquidio_stop(netdev); 1145 1146 if (oct->props[lio->ifidx].napi_enabled == 1) { 1147 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1148 napi_disable(napi); 1149 1150 oct->props[lio->ifidx].napi_enabled = 0; 1151 1152 oct->droq[0]->ops.poll_mode = 0; 1153 } 1154 1155 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1156 unregister_netdev(netdev); 1157 1158 cleanup_rx_oom_poll_fn(netdev); 1159 1160 cleanup_link_status_change_wq(netdev); 1161 1162 delete_glists(lio); 1163 1164 free_netdev(netdev); 1165 1166 oct->props[ifidx].gmxport = -1; 1167 1168 oct->props[ifidx].netdev = NULL; 1169 } 1170 1171 /** 1172 * \brief Stop complete NIC functionality 1173 * @param oct octeon device 1174 */ 1175 static int liquidio_stop_nic_module(struct octeon_device *oct) 1176 { 1177 struct lio *lio; 1178 int i, j; 1179 1180 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1181 if (!oct->ifcount) { 1182 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1183 return 1; 1184 } 1185 1186 spin_lock_bh(&oct->cmd_resp_wqlock); 1187 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1188 spin_unlock_bh(&oct->cmd_resp_wqlock); 1189 1190 for (i = 0; i < oct->ifcount; i++) { 1191 lio = GET_LIO(oct->props[i].netdev); 1192 for (j = 0; j < lio->linfo.num_rxpciq; j++) 1193 octeon_unregister_droq_ops(oct, 1194 lio->linfo.rxpciq[j].s.q_no); 1195 } 1196 1197 for (i = 0; i < oct->ifcount; i++) 1198 liquidio_destroy_nic_device(oct, i); 1199 1200 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1201 return 0; 1202 } 1203 1204 /** 1205 * \brief Cleans up resources at unload time 1206 * @param pdev PCI device structure 1207 */ 1208 static void liquidio_vf_remove(struct pci_dev *pdev) 1209 { 1210 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1211 1212 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1213 1214 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 1215 liquidio_stop_nic_module(oct_dev); 1216 1217 /* Reset the octeon device and cleanup all memory allocated for 1218 * the octeon device by driver. 1219 */ 1220 octeon_destroy_resources(oct_dev); 1221 1222 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1223 1224 /* This octeon device has been removed. Update the global 1225 * data structure to reflect this. Free the device structure. 1226 */ 1227 octeon_free_device_mem(oct_dev); 1228 } 1229 1230 /** 1231 * \brief PCI initialization for each Octeon device. 1232 * @param oct octeon device 1233 */ 1234 static int octeon_pci_os_setup(struct octeon_device *oct) 1235 { 1236 #ifdef CONFIG_PCI_IOV 1237 /* setup PCI stuff first */ 1238 if (!oct->pci_dev->physfn) 1239 octeon_pci_flr(oct); 1240 #endif 1241 1242 if (pci_enable_device(oct->pci_dev)) { 1243 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1244 return 1; 1245 } 1246 1247 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1248 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1249 pci_disable_device(oct->pci_dev); 1250 return 1; 1251 } 1252 1253 /* Enable PCI DMA Master. */ 1254 pci_set_master(oct->pci_dev); 1255 1256 return 0; 1257 } 1258 1259 static int skb_iq(struct lio *lio, struct sk_buff *skb) 1260 { 1261 int q = 0; 1262 1263 if (netif_is_multiqueue(lio->netdev)) 1264 q = skb->queue_mapping % lio->linfo.num_txpciq; 1265 1266 return q; 1267 } 1268 1269 /** 1270 * \brief Check Tx queue state for a given network buffer 1271 * @param lio per-network private data 1272 * @param skb network buffer 1273 */ 1274 static int check_txq_state(struct lio *lio, struct sk_buff *skb) 1275 { 1276 int q = 0, iq = 0; 1277 1278 if (netif_is_multiqueue(lio->netdev)) { 1279 q = skb->queue_mapping; 1280 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; 1281 } else { 1282 iq = lio->txq; 1283 q = iq; 1284 } 1285 1286 if (octnet_iq_is_full(lio->oct_dev, iq)) 1287 return 0; 1288 1289 if (__netif_subqueue_stopped(lio->netdev, q)) { 1290 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); 1291 wake_q(lio->netdev, q); 1292 } 1293 1294 return 1; 1295 } 1296 1297 /** 1298 * \brief Unmap and free network buffer 1299 * @param buf buffer 1300 */ 1301 static void free_netbuf(void *buf) 1302 { 1303 struct octnet_buf_free_info *finfo; 1304 struct sk_buff *skb; 1305 struct lio *lio; 1306 1307 finfo = (struct octnet_buf_free_info *)buf; 1308 skb = finfo->skb; 1309 lio = finfo->lio; 1310 1311 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1312 DMA_TO_DEVICE); 1313 1314 check_txq_state(lio, skb); 1315 1316 tx_buffer_free(skb); 1317 } 1318 1319 /** 1320 * \brief Unmap and free gather buffer 1321 * @param buf buffer 1322 */ 1323 static void free_netsgbuf(void *buf) 1324 { 1325 struct octnet_buf_free_info *finfo; 1326 struct octnic_gather *g; 1327 struct sk_buff *skb; 1328 int i, frags, iq; 1329 struct lio *lio; 1330 1331 finfo = (struct octnet_buf_free_info *)buf; 1332 skb = finfo->skb; 1333 lio = finfo->lio; 1334 g = finfo->g; 1335 frags = skb_shinfo(skb)->nr_frags; 1336 1337 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1338 g->sg[0].ptr[0], (skb->len - skb->data_len), 1339 DMA_TO_DEVICE); 1340 1341 i = 1; 1342 while (frags--) { 1343 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1344 1345 pci_unmap_page((lio->oct_dev)->pci_dev, 1346 g->sg[(i >> 2)].ptr[(i & 3)], 1347 frag->size, DMA_TO_DEVICE); 1348 i++; 1349 } 1350 1351 iq = skb_iq(lio, skb); 1352 1353 spin_lock(&lio->glist_lock[iq]); 1354 list_add_tail(&g->list, &lio->glist[iq]); 1355 spin_unlock(&lio->glist_lock[iq]); 1356 1357 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1358 1359 tx_buffer_free(skb); 1360 } 1361 1362 /** 1363 * \brief Unmap and free gather buffer with response 1364 * @param buf buffer 1365 */ 1366 static void free_netsgbuf_with_resp(void *buf) 1367 { 1368 struct octnet_buf_free_info *finfo; 1369 struct octeon_soft_command *sc; 1370 struct octnic_gather *g; 1371 struct sk_buff *skb; 1372 int i, frags, iq; 1373 struct lio *lio; 1374 1375 sc = (struct octeon_soft_command *)buf; 1376 skb = (struct sk_buff *)sc->callback_arg; 1377 finfo = (struct octnet_buf_free_info *)&skb->cb; 1378 1379 lio = finfo->lio; 1380 g = finfo->g; 1381 frags = skb_shinfo(skb)->nr_frags; 1382 1383 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1384 g->sg[0].ptr[0], (skb->len - skb->data_len), 1385 DMA_TO_DEVICE); 1386 1387 i = 1; 1388 while (frags--) { 1389 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1390 1391 pci_unmap_page((lio->oct_dev)->pci_dev, 1392 g->sg[(i >> 2)].ptr[(i & 3)], 1393 frag->size, DMA_TO_DEVICE); 1394 i++; 1395 } 1396 1397 iq = skb_iq(lio, skb); 1398 1399 spin_lock(&lio->glist_lock[iq]); 1400 list_add_tail(&g->list, &lio->glist[iq]); 1401 spin_unlock(&lio->glist_lock[iq]); 1402 1403 /* Don't free the skb yet */ 1404 1405 check_txq_state(lio, skb); 1406 } 1407 1408 /** 1409 * \brief Setup output queue 1410 * @param oct octeon device 1411 * @param q_no which queue 1412 * @param num_descs how many descriptors 1413 * @param desc_size size of each descriptor 1414 * @param app_ctx application context 1415 */ 1416 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, 1417 int desc_size, void *app_ctx) 1418 { 1419 int ret_val; 1420 1421 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1422 /* droq creation and local register settings. */ 1423 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1424 if (ret_val < 0) 1425 return ret_val; 1426 1427 if (ret_val == 1) { 1428 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); 1429 return 0; 1430 } 1431 1432 /* Enable the droq queues */ 1433 octeon_set_droq_pkt_op(oct, q_no, 1); 1434 1435 /* Send Credit for Octeon Output queues. Credits are always 1436 * sent after the output queue is enabled. 1437 */ 1438 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); 1439 1440 return ret_val; 1441 } 1442 1443 /** 1444 * \brief Callback for getting interface configuration 1445 * @param status status of request 1446 * @param buf pointer to resp structure 1447 */ 1448 static void if_cfg_callback(struct octeon_device *oct, 1449 u32 status __attribute__((unused)), void *buf) 1450 { 1451 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1452 struct liquidio_if_cfg_context *ctx; 1453 struct liquidio_if_cfg_resp *resp; 1454 1455 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1456 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 1457 1458 oct = lio_get_device(ctx->octeon_id); 1459 if (resp->status) 1460 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 1461 CVM_CAST64(resp->status)); 1462 WRITE_ONCE(ctx->cond, 1); 1463 1464 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", 1465 resp->cfg_info.liquidio_firmware_version); 1466 1467 /* This barrier is required to be sure that the response has been 1468 * written fully before waking up the handler 1469 */ 1470 wmb(); 1471 1472 wake_up_interruptible(&ctx->wc); 1473 } 1474 1475 /** Routine to push packets arriving on Octeon interface upto network layer. 1476 * @param oct_id - octeon device id. 1477 * @param skbuff - skbuff struct to be passed to network layer. 1478 * @param len - size of total data received. 1479 * @param rh - Control header associated with the packet 1480 * @param param - additional control data with the packet 1481 * @param arg - farg registered in droq_ops 1482 */ 1483 static void 1484 liquidio_push_packet(u32 octeon_id __attribute__((unused)), 1485 void *skbuff, 1486 u32 len, 1487 union octeon_rh *rh, 1488 void *param, 1489 void *arg) 1490 { 1491 struct napi_struct *napi = param; 1492 struct octeon_droq *droq = 1493 container_of(param, struct octeon_droq, napi); 1494 struct net_device *netdev = (struct net_device *)arg; 1495 struct sk_buff *skb = (struct sk_buff *)skbuff; 1496 u16 vtag = 0; 1497 u32 r_dh_off; 1498 1499 if (netdev) { 1500 struct lio *lio = GET_LIO(netdev); 1501 int packet_was_received; 1502 1503 /* Do not proceed if the interface is not in RUNNING state. */ 1504 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 1505 recv_buffer_free(skb); 1506 droq->stats.rx_dropped++; 1507 return; 1508 } 1509 1510 skb->dev = netdev; 1511 1512 skb_record_rx_queue(skb, droq->q_no); 1513 if (likely(len > MIN_SKB_SIZE)) { 1514 struct octeon_skb_page_info *pg_info; 1515 unsigned char *va; 1516 1517 pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 1518 if (pg_info->page) { 1519 /* For Paged allocation use the frags */ 1520 va = page_address(pg_info->page) + 1521 pg_info->page_offset; 1522 memcpy(skb->data, va, MIN_SKB_SIZE); 1523 skb_put(skb, MIN_SKB_SIZE); 1524 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1525 pg_info->page, 1526 pg_info->page_offset + 1527 MIN_SKB_SIZE, 1528 len - MIN_SKB_SIZE, 1529 LIO_RXBUFFER_SZ); 1530 } 1531 } else { 1532 struct octeon_skb_page_info *pg_info = 1533 ((struct octeon_skb_page_info *)(skb->cb)); 1534 skb_copy_to_linear_data(skb, 1535 page_address(pg_info->page) + 1536 pg_info->page_offset, len); 1537 skb_put(skb, len); 1538 put_page(pg_info->page); 1539 } 1540 1541 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; 1542 1543 if (rh->r_dh.has_hwtstamp) 1544 r_dh_off -= BYTES_PER_DHLEN_UNIT; 1545 1546 if (rh->r_dh.has_hash) { 1547 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); 1548 u32 hash = be32_to_cpu(*hash_be); 1549 1550 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); 1551 r_dh_off -= BYTES_PER_DHLEN_UNIT; 1552 } 1553 1554 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); 1555 skb->protocol = eth_type_trans(skb, skb->dev); 1556 1557 if ((netdev->features & NETIF_F_RXCSUM) && 1558 (((rh->r_dh.encap_on) && 1559 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || 1560 (!(rh->r_dh.encap_on) && 1561 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) 1562 /* checksum has already been verified */ 1563 skb->ip_summed = CHECKSUM_UNNECESSARY; 1564 else 1565 skb->ip_summed = CHECKSUM_NONE; 1566 1567 /* Setting Encapsulation field on basis of status received 1568 * from the firmware 1569 */ 1570 if (rh->r_dh.encap_on) { 1571 skb->encapsulation = 1; 1572 skb->csum_level = 1; 1573 droq->stats.rx_vxlan++; 1574 } 1575 1576 /* inbound VLAN tag */ 1577 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1578 rh->r_dh.vlan) { 1579 u16 priority = rh->r_dh.priority; 1580 u16 vid = rh->r_dh.vlan; 1581 1582 vtag = (priority << VLAN_PRIO_SHIFT) | vid; 1583 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); 1584 } 1585 1586 packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); 1587 1588 if (packet_was_received) { 1589 droq->stats.rx_bytes_received += len; 1590 droq->stats.rx_pkts_received++; 1591 } else { 1592 droq->stats.rx_dropped++; 1593 netif_info(lio, rx_err, lio->netdev, 1594 "droq:%d error rx_dropped:%llu\n", 1595 droq->q_no, droq->stats.rx_dropped); 1596 } 1597 1598 } else { 1599 recv_buffer_free(skb); 1600 } 1601 } 1602 1603 /** 1604 * \brief callback when receive interrupt occurs and we are in NAPI mode 1605 * @param arg pointer to octeon output queue 1606 */ 1607 static void liquidio_vf_napi_drv_callback(void *arg) 1608 { 1609 struct octeon_droq *droq = arg; 1610 1611 napi_schedule_irqoff(&droq->napi); 1612 } 1613 1614 /** 1615 * \brief Entry point for NAPI polling 1616 * @param napi NAPI structure 1617 * @param budget maximum number of items to process 1618 */ 1619 static int liquidio_napi_poll(struct napi_struct *napi, int budget) 1620 { 1621 struct octeon_instr_queue *iq; 1622 struct octeon_device *oct; 1623 struct octeon_droq *droq; 1624 int tx_done = 0, iq_no; 1625 int work_done; 1626 1627 droq = container_of(napi, struct octeon_droq, napi); 1628 oct = droq->oct_dev; 1629 iq_no = droq->q_no; 1630 1631 /* Handle Droq descriptors */ 1632 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, 1633 POLL_EVENT_PROCESS_PKTS, 1634 budget); 1635 1636 /* Flush the instruction queue */ 1637 iq = oct->instr_queue[iq_no]; 1638 if (iq) { 1639 if (atomic_read(&iq->instr_pending)) 1640 /* Process iq buffers with in the budget limits */ 1641 tx_done = octeon_flush_iq(oct, iq, budget); 1642 else 1643 tx_done = 1; 1644 1645 /* Update iq read-index rather than waiting for next interrupt. 1646 * Return back if tx_done is false. 1647 */ 1648 update_txq_status(oct, iq_no); 1649 } else { 1650 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", 1651 __func__, iq_no); 1652 } 1653 1654 /* force enable interrupt if reg cnts are high to avoid wraparound */ 1655 if ((work_done < budget && tx_done) || 1656 (iq && iq->pkt_in_done >= MAX_REG_CNT) || 1657 (droq->pkt_count >= MAX_REG_CNT)) { 1658 tx_done = 1; 1659 napi_complete_done(napi, work_done); 1660 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, 1661 POLL_EVENT_ENABLE_INTR, 0); 1662 return 0; 1663 } 1664 1665 return (!tx_done) ? (budget) : (work_done); 1666 } 1667 1668 /** 1669 * \brief Setup input and output queues 1670 * @param octeon_dev octeon device 1671 * @param ifidx Interface index 1672 * 1673 * Note: Queues are with respect to the octeon device. Thus 1674 * an input queue is for egress packets, and output queues 1675 * are for ingress packets. 1676 */ 1677 static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) 1678 { 1679 struct octeon_droq_ops droq_ops; 1680 struct net_device *netdev; 1681 static int cpu_id_modulus; 1682 struct octeon_droq *droq; 1683 struct napi_struct *napi; 1684 static int cpu_id; 1685 int num_tx_descs; 1686 struct lio *lio; 1687 int retval = 0; 1688 int q, q_no; 1689 1690 netdev = octeon_dev->props[ifidx].netdev; 1691 1692 lio = GET_LIO(netdev); 1693 1694 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); 1695 1696 droq_ops.fptr = liquidio_push_packet; 1697 droq_ops.farg = netdev; 1698 1699 droq_ops.poll_mode = 1; 1700 droq_ops.napi_fn = liquidio_vf_napi_drv_callback; 1701 cpu_id = 0; 1702 cpu_id_modulus = num_present_cpus(); 1703 1704 /* set up DROQs. */ 1705 for (q = 0; q < lio->linfo.num_rxpciq; q++) { 1706 q_no = lio->linfo.rxpciq[q].s.q_no; 1707 1708 retval = octeon_setup_droq( 1709 octeon_dev, q_no, 1710 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), 1711 lio->ifidx), 1712 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), 1713 lio->ifidx), 1714 NULL); 1715 if (retval) { 1716 dev_err(&octeon_dev->pci_dev->dev, 1717 "%s : Runtime DROQ(RxQ) creation failed.\n", 1718 __func__); 1719 return 1; 1720 } 1721 1722 droq = octeon_dev->droq[q_no]; 1723 napi = &droq->napi; 1724 netif_napi_add(netdev, napi, liquidio_napi_poll, 64); 1725 1726 /* designate a CPU for this droq */ 1727 droq->cpu_id = cpu_id; 1728 cpu_id++; 1729 if (cpu_id >= cpu_id_modulus) 1730 cpu_id = 0; 1731 1732 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); 1733 } 1734 1735 /* 23XX VF can send/recv control messages (via the first VF-owned 1736 * droq) from the firmware even if the ethX interface is down, 1737 * so that's why poll_mode must be off for the first droq. 1738 */ 1739 octeon_dev->droq[0]->ops.poll_mode = 0; 1740 1741 /* set up IQs. */ 1742 for (q = 0; q < lio->linfo.num_txpciq; q++) { 1743 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( 1744 octeon_get_conf(octeon_dev), lio->ifidx); 1745 retval = octeon_setup_iq(octeon_dev, ifidx, q, 1746 lio->linfo.txpciq[q], num_tx_descs, 1747 netdev_get_tx_queue(netdev, q)); 1748 if (retval) { 1749 dev_err(&octeon_dev->pci_dev->dev, 1750 " %s : Runtime IQ(TxQ) creation failed.\n", 1751 __func__); 1752 return 1; 1753 } 1754 } 1755 1756 return 0; 1757 } 1758 1759 /** 1760 * \brief Net device open for LiquidIO 1761 * @param netdev network device 1762 */ 1763 static int liquidio_open(struct net_device *netdev) 1764 { 1765 struct lio *lio = GET_LIO(netdev); 1766 struct octeon_device *oct = lio->oct_dev; 1767 struct napi_struct *napi, *n; 1768 1769 if (!oct->props[lio->ifidx].napi_enabled) { 1770 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1771 napi_enable(napi); 1772 1773 oct->props[lio->ifidx].napi_enabled = 1; 1774 1775 oct->droq[0]->ops.poll_mode = 1; 1776 } 1777 1778 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1779 1780 /* Ready for link status updates */ 1781 lio->intf_open = 1; 1782 1783 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1784 start_txq(netdev); 1785 1786 /* tell Octeon to start forwarding packets to host */ 1787 send_rx_ctrl_cmd(lio, 1); 1788 1789 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 1790 1791 return 0; 1792 } 1793 1794 /** 1795 * \brief Net device stop for LiquidIO 1796 * @param netdev network device 1797 */ 1798 static int liquidio_stop(struct net_device *netdev) 1799 { 1800 struct lio *lio = GET_LIO(netdev); 1801 struct octeon_device *oct = lio->oct_dev; 1802 1803 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 1804 /* Inform that netif carrier is down */ 1805 lio->intf_open = 0; 1806 lio->linfo.link.s.link_up = 0; 1807 1808 netif_carrier_off(netdev); 1809 lio->link_changes++; 1810 1811 /* tell Octeon to stop forwarding packets to host */ 1812 send_rx_ctrl_cmd(lio, 0); 1813 1814 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1815 1816 txqs_stop(netdev); 1817 1818 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1819 1820 return 0; 1821 } 1822 1823 /** 1824 * \brief Converts a mask based on net device flags 1825 * @param netdev network device 1826 * 1827 * This routine generates a octnet_ifflags mask from the net device flags 1828 * received from the OS. 1829 */ 1830 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1831 { 1832 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1833 1834 if (netdev->flags & IFF_PROMISC) 1835 f |= OCTNET_IFFLAG_PROMISC; 1836 1837 if (netdev->flags & IFF_ALLMULTI) 1838 f |= OCTNET_IFFLAG_ALLMULTI; 1839 1840 if (netdev->flags & IFF_MULTICAST) { 1841 f |= OCTNET_IFFLAG_MULTICAST; 1842 1843 /* Accept all multicast addresses if there are more than we 1844 * can handle 1845 */ 1846 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1847 f |= OCTNET_IFFLAG_ALLMULTI; 1848 } 1849 1850 if (netdev->flags & IFF_BROADCAST) 1851 f |= OCTNET_IFFLAG_BROADCAST; 1852 1853 return f; 1854 } 1855 1856 static void liquidio_set_uc_list(struct net_device *netdev) 1857 { 1858 struct lio *lio = GET_LIO(netdev); 1859 struct octeon_device *oct = lio->oct_dev; 1860 struct octnic_ctrl_pkt nctrl; 1861 struct netdev_hw_addr *ha; 1862 u64 *mac; 1863 1864 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1865 return; 1866 1867 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1868 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1869 return; 1870 } 1871 1872 lio->netdev_uc_count = netdev_uc_count(netdev); 1873 1874 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1875 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1876 nctrl.ncmd.s.more = lio->netdev_uc_count; 1877 nctrl.ncmd.s.param1 = oct->vf_num; 1878 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1879 nctrl.netpndev = (u64)netdev; 1880 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1881 1882 /* copy all the addresses into the udd */ 1883 mac = &nctrl.udd[0]; 1884 netdev_for_each_uc_addr(ha, netdev) { 1885 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1886 mac++; 1887 } 1888 1889 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1890 } 1891 1892 /** 1893 * \brief Net device set_multicast_list 1894 * @param netdev network device 1895 */ 1896 static void liquidio_set_mcast_list(struct net_device *netdev) 1897 { 1898 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1899 struct lio *lio = GET_LIO(netdev); 1900 struct octeon_device *oct = lio->oct_dev; 1901 struct octnic_ctrl_pkt nctrl; 1902 struct netdev_hw_addr *ha; 1903 u64 *mc; 1904 int ret; 1905 1906 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1907 1908 /* Create a ctrl pkt command to be sent to core app. */ 1909 nctrl.ncmd.u64 = 0; 1910 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1911 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1912 nctrl.ncmd.s.param2 = mc_count; 1913 nctrl.ncmd.s.more = mc_count; 1914 nctrl.netpndev = (u64)netdev; 1915 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1916 1917 /* copy all the addresses into the udd */ 1918 mc = &nctrl.udd[0]; 1919 netdev_for_each_mc_addr(ha, netdev) { 1920 *mc = 0; 1921 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1922 /* no need to swap bytes */ 1923 if (++mc > &nctrl.udd[mc_count]) 1924 break; 1925 } 1926 1927 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1928 1929 /* Apparently, any activity in this call from the kernel has to 1930 * be atomic. So we won't wait for response. 1931 */ 1932 nctrl.wait_time = 0; 1933 1934 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1935 if (ret < 0) { 1936 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1937 ret); 1938 } 1939 1940 liquidio_set_uc_list(netdev); 1941 } 1942 1943 /** 1944 * \brief Net device set_mac_address 1945 * @param netdev network device 1946 */ 1947 static int liquidio_set_mac(struct net_device *netdev, void *p) 1948 { 1949 struct sockaddr *addr = (struct sockaddr *)p; 1950 struct lio *lio = GET_LIO(netdev); 1951 struct octeon_device *oct = lio->oct_dev; 1952 struct octnic_ctrl_pkt nctrl; 1953 int ret = 0; 1954 1955 if (!is_valid_ether_addr(addr->sa_data)) 1956 return -EADDRNOTAVAIL; 1957 1958 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1959 return 0; 1960 1961 if (lio->linfo.macaddr_is_admin_asgnd) 1962 return -EPERM; 1963 1964 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1965 1966 nctrl.ncmd.u64 = 0; 1967 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1968 nctrl.ncmd.s.param1 = 0; 1969 nctrl.ncmd.s.more = 1; 1970 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1971 nctrl.netpndev = (u64)netdev; 1972 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1973 nctrl.wait_time = 100; 1974 1975 nctrl.udd[0] = 0; 1976 /* The MAC Address is presented in network byte order. */ 1977 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1978 1979 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1980 if (ret < 0) { 1981 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1982 return -ENOMEM; 1983 } 1984 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1985 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1986 1987 return 0; 1988 } 1989 1990 /** 1991 * \brief Net device get_stats 1992 * @param netdev network device 1993 */ 1994 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) 1995 { 1996 struct lio *lio = GET_LIO(netdev); 1997 struct net_device_stats *stats = &netdev->stats; 1998 u64 pkts = 0, drop = 0, bytes = 0; 1999 struct oct_droq_stats *oq_stats; 2000 struct oct_iq_stats *iq_stats; 2001 struct octeon_device *oct; 2002 int i, iq_no, oq_no; 2003 2004 oct = lio->oct_dev; 2005 2006 for (i = 0; i < lio->linfo.num_txpciq; i++) { 2007 iq_no = lio->linfo.txpciq[i].s.q_no; 2008 iq_stats = &oct->instr_queue[iq_no]->stats; 2009 pkts += iq_stats->tx_done; 2010 drop += iq_stats->tx_dropped; 2011 bytes += iq_stats->tx_tot_bytes; 2012 } 2013 2014 stats->tx_packets = pkts; 2015 stats->tx_bytes = bytes; 2016 stats->tx_dropped = drop; 2017 2018 pkts = 0; 2019 drop = 0; 2020 bytes = 0; 2021 2022 for (i = 0; i < lio->linfo.num_rxpciq; i++) { 2023 oq_no = lio->linfo.rxpciq[i].s.q_no; 2024 oq_stats = &oct->droq[oq_no]->stats; 2025 pkts += oq_stats->rx_pkts_received; 2026 drop += (oq_stats->rx_dropped + 2027 oq_stats->dropped_nodispatch + 2028 oq_stats->dropped_toomany + 2029 oq_stats->dropped_nomem); 2030 bytes += oq_stats->rx_bytes_received; 2031 } 2032 2033 stats->rx_bytes = bytes; 2034 stats->rx_packets = pkts; 2035 stats->rx_dropped = drop; 2036 2037 return stats; 2038 } 2039 2040 /** 2041 * \brief Net device change_mtu 2042 * @param netdev network device 2043 */ 2044 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 2045 { 2046 struct lio *lio = GET_LIO(netdev); 2047 struct octeon_device *oct = lio->oct_dev; 2048 2049 lio->mtu = new_mtu; 2050 2051 netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", 2052 netdev->mtu, new_mtu); 2053 dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", 2054 netdev->name, netdev->mtu, new_mtu); 2055 2056 netdev->mtu = new_mtu; 2057 2058 return 0; 2059 } 2060 2061 /** 2062 * \brief Handler for SIOCSHWTSTAMP ioctl 2063 * @param netdev network device 2064 * @param ifr interface request 2065 * @param cmd command 2066 */ 2067 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2068 { 2069 struct lio *lio = GET_LIO(netdev); 2070 struct hwtstamp_config conf; 2071 2072 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2073 return -EFAULT; 2074 2075 if (conf.flags) 2076 return -EINVAL; 2077 2078 switch (conf.tx_type) { 2079 case HWTSTAMP_TX_ON: 2080 case HWTSTAMP_TX_OFF: 2081 break; 2082 default: 2083 return -ERANGE; 2084 } 2085 2086 switch (conf.rx_filter) { 2087 case HWTSTAMP_FILTER_NONE: 2088 break; 2089 case HWTSTAMP_FILTER_ALL: 2090 case HWTSTAMP_FILTER_SOME: 2091 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2092 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2093 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2094 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2095 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2096 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2097 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2098 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2099 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2100 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2101 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2102 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2103 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2104 break; 2105 default: 2106 return -ERANGE; 2107 } 2108 2109 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2110 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2111 2112 else 2113 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2114 2115 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2116 } 2117 2118 /** 2119 * \brief ioctl handler 2120 * @param netdev network device 2121 * @param ifr interface request 2122 * @param cmd command 2123 */ 2124 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2125 { 2126 switch (cmd) { 2127 case SIOCSHWTSTAMP: 2128 return hwtstamp_ioctl(netdev, ifr); 2129 default: 2130 return -EOPNOTSUPP; 2131 } 2132 } 2133 2134 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 2135 { 2136 struct sk_buff *skb = (struct sk_buff *)buf; 2137 struct octnet_buf_free_info *finfo; 2138 struct oct_timestamp_resp *resp; 2139 struct octeon_soft_command *sc; 2140 struct lio *lio; 2141 2142 finfo = (struct octnet_buf_free_info *)skb->cb; 2143 lio = finfo->lio; 2144 sc = finfo->sc; 2145 oct = lio->oct_dev; 2146 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2147 2148 if (status != OCTEON_REQUEST_DONE) { 2149 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2150 CVM_CAST64(status)); 2151 resp->timestamp = 0; 2152 } 2153 2154 octeon_swap_8B_data(&resp->timestamp, 1); 2155 2156 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2157 struct skb_shared_hwtstamps ts; 2158 u64 ns = resp->timestamp; 2159 2160 netif_info(lio, tx_done, lio->netdev, 2161 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2162 skb, (unsigned long long)ns); 2163 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2164 skb_tstamp_tx(skb, &ts); 2165 } 2166 2167 octeon_free_soft_command(oct, sc); 2168 tx_buffer_free(skb); 2169 } 2170 2171 /* \brief Send a data packet that will be timestamped 2172 * @param oct octeon device 2173 * @param ndata pointer to network data 2174 * @param finfo pointer to private network data 2175 */ 2176 static int send_nic_timestamp_pkt(struct octeon_device *oct, 2177 struct octnic_data_pkt *ndata, 2178 struct octnet_buf_free_info *finfo) 2179 { 2180 struct octeon_soft_command *sc; 2181 int ring_doorbell; 2182 struct lio *lio; 2183 int retval; 2184 u32 len; 2185 2186 lio = finfo->lio; 2187 2188 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2189 sizeof(struct oct_timestamp_resp)); 2190 finfo->sc = sc; 2191 2192 if (!sc) { 2193 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2194 return IQ_SEND_FAILED; 2195 } 2196 2197 if (ndata->reqtype == REQTYPE_NORESP_NET) 2198 ndata->reqtype = REQTYPE_RESP_NET; 2199 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2200 ndata->reqtype = REQTYPE_RESP_NET_SG; 2201 2202 sc->callback = handle_timestamp; 2203 sc->callback_arg = finfo->skb; 2204 sc->iq_no = ndata->q_no; 2205 2206 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 2207 2208 ring_doorbell = 1; 2209 2210 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2211 sc, len, ndata->reqtype); 2212 2213 if (retval == IQ_SEND_FAILED) { 2214 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2215 retval); 2216 octeon_free_soft_command(oct, sc); 2217 } else { 2218 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2219 } 2220 2221 return retval; 2222 } 2223 2224 /** \brief Transmit networks packets to the Octeon interface 2225 * @param skbuff skbuff struct to be passed to network layer. 2226 * @param netdev pointer to network device 2227 * @returns whether the packet was transmitted to the device okay or not 2228 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2229 */ 2230 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2231 { 2232 struct octnet_buf_free_info *finfo; 2233 union octnic_cmd_setup cmdsetup; 2234 struct octnic_data_pkt ndata; 2235 struct octeon_instr_irh *irh; 2236 struct oct_iq_stats *stats; 2237 struct octeon_device *oct; 2238 int q_idx = 0, iq_no = 0; 2239 union tx_info *tx_info; 2240 struct lio *lio; 2241 int status = 0; 2242 u64 dptr = 0; 2243 u32 tag = 0; 2244 int j; 2245 2246 lio = GET_LIO(netdev); 2247 oct = lio->oct_dev; 2248 2249 if (netif_is_multiqueue(netdev)) { 2250 q_idx = skb->queue_mapping; 2251 q_idx = (q_idx % (lio->linfo.num_txpciq)); 2252 tag = q_idx; 2253 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2254 } else { 2255 iq_no = lio->txq; 2256 } 2257 2258 stats = &oct->instr_queue[iq_no]->stats; 2259 2260 /* Check for all conditions in which the current packet cannot be 2261 * transmitted. 2262 */ 2263 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2264 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 2265 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 2266 lio->linfo.link.s.link_up); 2267 goto lio_xmit_failed; 2268 } 2269 2270 /* Use space in skb->cb to store info used to unmap and 2271 * free the buffers. 2272 */ 2273 finfo = (struct octnet_buf_free_info *)skb->cb; 2274 finfo->lio = lio; 2275 finfo->skb = skb; 2276 finfo->sc = NULL; 2277 2278 /* Prepare the attributes for the data to be passed to OSI. */ 2279 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2280 2281 ndata.buf = finfo; 2282 2283 ndata.q_no = iq_no; 2284 2285 if (netif_is_multiqueue(netdev)) { 2286 if (octnet_iq_is_full(oct, ndata.q_no)) { 2287 /* defer sending if queue is full */ 2288 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2289 ndata.q_no); 2290 stats->tx_iq_busy++; 2291 return NETDEV_TX_BUSY; 2292 } 2293 } else { 2294 if (octnet_iq_is_full(oct, lio->txq)) { 2295 /* defer sending if queue is full */ 2296 stats->tx_iq_busy++; 2297 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2298 ndata.q_no); 2299 return NETDEV_TX_BUSY; 2300 } 2301 } 2302 2303 ndata.datasize = skb->len; 2304 2305 cmdsetup.u64 = 0; 2306 cmdsetup.s.iq_no = iq_no; 2307 2308 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2309 if (skb->encapsulation) { 2310 cmdsetup.s.tnl_csum = 1; 2311 stats->tx_vxlan++; 2312 } else { 2313 cmdsetup.s.transport_csum = 1; 2314 } 2315 } 2316 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2317 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2318 cmdsetup.s.timestamp = 1; 2319 } 2320 2321 if (!skb_shinfo(skb)->nr_frags) { 2322 cmdsetup.s.u.datasize = skb->len; 2323 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2324 /* Offload checksum calculation for TCP/UDP packets */ 2325 dptr = dma_map_single(&oct->pci_dev->dev, 2326 skb->data, 2327 skb->len, 2328 DMA_TO_DEVICE); 2329 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2330 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2331 __func__); 2332 return NETDEV_TX_BUSY; 2333 } 2334 2335 ndata.cmd.cmd3.dptr = dptr; 2336 finfo->dptr = dptr; 2337 ndata.reqtype = REQTYPE_NORESP_NET; 2338 2339 } else { 2340 struct skb_frag_struct *frag; 2341 struct octnic_gather *g; 2342 int i, frags; 2343 2344 spin_lock(&lio->glist_lock[q_idx]); 2345 g = (struct octnic_gather *)list_delete_head( 2346 &lio->glist[q_idx]); 2347 spin_unlock(&lio->glist_lock[q_idx]); 2348 2349 if (!g) { 2350 netif_info(lio, tx_err, lio->netdev, 2351 "Transmit scatter gather: glist null!\n"); 2352 goto lio_xmit_failed; 2353 } 2354 2355 cmdsetup.s.gather = 1; 2356 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2357 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2358 2359 memset(g->sg, 0, g->sg_size); 2360 2361 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2362 skb->data, 2363 (skb->len - skb->data_len), 2364 DMA_TO_DEVICE); 2365 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2366 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2367 __func__); 2368 return NETDEV_TX_BUSY; 2369 } 2370 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2371 2372 frags = skb_shinfo(skb)->nr_frags; 2373 i = 1; 2374 while (frags--) { 2375 frag = &skb_shinfo(skb)->frags[i - 1]; 2376 2377 g->sg[(i >> 2)].ptr[(i & 3)] = 2378 dma_map_page(&oct->pci_dev->dev, 2379 frag->page.p, 2380 frag->page_offset, 2381 frag->size, 2382 DMA_TO_DEVICE); 2383 if (dma_mapping_error(&oct->pci_dev->dev, 2384 g->sg[i >> 2].ptr[i & 3])) { 2385 dma_unmap_single(&oct->pci_dev->dev, 2386 g->sg[0].ptr[0], 2387 skb->len - skb->data_len, 2388 DMA_TO_DEVICE); 2389 for (j = 1; j < i; j++) { 2390 frag = &skb_shinfo(skb)->frags[j - 1]; 2391 dma_unmap_page(&oct->pci_dev->dev, 2392 g->sg[j >> 2].ptr[j & 3], 2393 frag->size, 2394 DMA_TO_DEVICE); 2395 } 2396 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2397 __func__); 2398 return NETDEV_TX_BUSY; 2399 } 2400 2401 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 2402 i++; 2403 } 2404 2405 dptr = g->sg_dma_ptr; 2406 2407 ndata.cmd.cmd3.dptr = dptr; 2408 finfo->dptr = dptr; 2409 finfo->g = g; 2410 2411 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2412 } 2413 2414 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2415 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2416 2417 if (skb_shinfo(skb)->gso_size) { 2418 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2419 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2420 } 2421 2422 /* HW insert VLAN tag */ 2423 if (skb_vlan_tag_present(skb)) { 2424 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 2425 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 2426 } 2427 2428 if (unlikely(cmdsetup.s.timestamp)) 2429 status = send_nic_timestamp_pkt(oct, &ndata, finfo); 2430 else 2431 status = octnet_send_nic_data_pkt(oct, &ndata); 2432 if (status == IQ_SEND_FAILED) 2433 goto lio_xmit_failed; 2434 2435 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2436 2437 if (status == IQ_SEND_STOP) { 2438 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 2439 iq_no); 2440 stop_q(lio->netdev, q_idx); 2441 } 2442 2443 netif_trans_update(netdev); 2444 2445 if (tx_info->s.gso_segs) 2446 stats->tx_done += tx_info->s.gso_segs; 2447 else 2448 stats->tx_done++; 2449 stats->tx_tot_bytes += ndata.datasize; 2450 2451 return NETDEV_TX_OK; 2452 2453 lio_xmit_failed: 2454 stats->tx_dropped++; 2455 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2456 iq_no, stats->tx_dropped); 2457 if (dptr) 2458 dma_unmap_single(&oct->pci_dev->dev, dptr, 2459 ndata.datasize, DMA_TO_DEVICE); 2460 tx_buffer_free(skb); 2461 return NETDEV_TX_OK; 2462 } 2463 2464 /** \brief Network device Tx timeout 2465 * @param netdev pointer to network device 2466 */ 2467 static void liquidio_tx_timeout(struct net_device *netdev) 2468 { 2469 struct lio *lio; 2470 2471 lio = GET_LIO(netdev); 2472 2473 netif_info(lio, tx_err, lio->netdev, 2474 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2475 netdev->stats.tx_dropped); 2476 netif_trans_update(netdev); 2477 txqs_wake(netdev); 2478 } 2479 2480 static int 2481 liquidio_vlan_rx_add_vid(struct net_device *netdev, 2482 __be16 proto __attribute__((unused)), u16 vid) 2483 { 2484 struct lio *lio = GET_LIO(netdev); 2485 struct octeon_device *oct = lio->oct_dev; 2486 struct octnic_ctrl_pkt nctrl; 2487 struct completion compl; 2488 u16 response_code; 2489 int ret = 0; 2490 2491 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2492 2493 nctrl.ncmd.u64 = 0; 2494 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2495 nctrl.ncmd.s.param1 = vid; 2496 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2497 nctrl.wait_time = 100; 2498 nctrl.netpndev = (u64)netdev; 2499 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2500 init_completion(&compl); 2501 nctrl.completion = &compl; 2502 nctrl.response_code = &response_code; 2503 2504 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2505 if (ret < 0) { 2506 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2507 ret); 2508 return -EIO; 2509 } 2510 2511 if (!wait_for_completion_timeout(&compl, 2512 msecs_to_jiffies(nctrl.wait_time))) 2513 return -EPERM; 2514 2515 if (READ_ONCE(response_code)) 2516 return -EPERM; 2517 2518 return 0; 2519 } 2520 2521 static int 2522 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2523 __be16 proto __attribute__((unused)), u16 vid) 2524 { 2525 struct lio *lio = GET_LIO(netdev); 2526 struct octeon_device *oct = lio->oct_dev; 2527 struct octnic_ctrl_pkt nctrl; 2528 int ret = 0; 2529 2530 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2531 2532 nctrl.ncmd.u64 = 0; 2533 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2534 nctrl.ncmd.s.param1 = vid; 2535 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2536 nctrl.wait_time = 100; 2537 nctrl.netpndev = (u64)netdev; 2538 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2539 2540 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2541 if (ret < 0) { 2542 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2543 ret); 2544 } 2545 return ret; 2546 } 2547 2548 /** Sending command to enable/disable RX checksum offload 2549 * @param netdev pointer to network device 2550 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 2551 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 2552 * OCTNET_CMD_RXCSUM_DISABLE 2553 * @returns SUCCESS or FAILURE 2554 */ 2555 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2556 u8 rx_cmd) 2557 { 2558 struct lio *lio = GET_LIO(netdev); 2559 struct octeon_device *oct = lio->oct_dev; 2560 struct octnic_ctrl_pkt nctrl; 2561 int ret = 0; 2562 2563 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2564 2565 nctrl.ncmd.u64 = 0; 2566 nctrl.ncmd.s.cmd = command; 2567 nctrl.ncmd.s.param1 = rx_cmd; 2568 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2569 nctrl.wait_time = 100; 2570 nctrl.netpndev = (u64)netdev; 2571 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2572 2573 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2574 if (ret < 0) { 2575 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 2576 ret); 2577 } 2578 return ret; 2579 } 2580 2581 /** Sending command to add/delete VxLAN UDP port to firmware 2582 * @param netdev pointer to network device 2583 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 2584 * @param vxlan_port VxLAN port to be added or deleted 2585 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 2586 * OCTNET_CMD_VXLAN_PORT_DEL 2587 * @returns SUCCESS or FAILURE 2588 */ 2589 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2590 u16 vxlan_port, u8 vxlan_cmd_bit) 2591 { 2592 struct lio *lio = GET_LIO(netdev); 2593 struct octeon_device *oct = lio->oct_dev; 2594 struct octnic_ctrl_pkt nctrl; 2595 int ret = 0; 2596 2597 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2598 2599 nctrl.ncmd.u64 = 0; 2600 nctrl.ncmd.s.cmd = command; 2601 nctrl.ncmd.s.more = vxlan_cmd_bit; 2602 nctrl.ncmd.s.param1 = vxlan_port; 2603 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2604 nctrl.wait_time = 100; 2605 nctrl.netpndev = (u64)netdev; 2606 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2607 2608 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2609 if (ret < 0) { 2610 dev_err(&oct->pci_dev->dev, 2611 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 2612 ret); 2613 } 2614 return ret; 2615 } 2616 2617 /** \brief Net device fix features 2618 * @param netdev pointer to network device 2619 * @param request features requested 2620 * @returns updated features list 2621 */ 2622 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2623 netdev_features_t request) 2624 { 2625 struct lio *lio = netdev_priv(netdev); 2626 2627 if ((request & NETIF_F_RXCSUM) && 2628 !(lio->dev_capability & NETIF_F_RXCSUM)) 2629 request &= ~NETIF_F_RXCSUM; 2630 2631 if ((request & NETIF_F_HW_CSUM) && 2632 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2633 request &= ~NETIF_F_HW_CSUM; 2634 2635 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2636 request &= ~NETIF_F_TSO; 2637 2638 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2639 request &= ~NETIF_F_TSO6; 2640 2641 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2642 request &= ~NETIF_F_LRO; 2643 2644 /* Disable LRO if RXCSUM is off */ 2645 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2646 (lio->dev_capability & NETIF_F_LRO)) 2647 request &= ~NETIF_F_LRO; 2648 2649 return request; 2650 } 2651 2652 /** \brief Net device set features 2653 * @param netdev pointer to network device 2654 * @param features features to enable/disable 2655 */ 2656 static int liquidio_set_features(struct net_device *netdev, 2657 netdev_features_t features) 2658 { 2659 struct lio *lio = netdev_priv(netdev); 2660 2661 if (!((netdev->features ^ features) & NETIF_F_LRO)) 2662 return 0; 2663 2664 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 2665 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2666 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2667 else if (!(features & NETIF_F_LRO) && 2668 (lio->dev_capability & NETIF_F_LRO)) 2669 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2670 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2671 if (!(netdev->features & NETIF_F_RXCSUM) && 2672 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2673 (features & NETIF_F_RXCSUM)) 2674 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2675 OCTNET_CMD_RXCSUM_ENABLE); 2676 else if ((netdev->features & NETIF_F_RXCSUM) && 2677 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2678 !(features & NETIF_F_RXCSUM)) 2679 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2680 OCTNET_CMD_RXCSUM_DISABLE); 2681 2682 return 0; 2683 } 2684 2685 static void liquidio_add_vxlan_port(struct net_device *netdev, 2686 struct udp_tunnel_info *ti) 2687 { 2688 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2689 return; 2690 2691 liquidio_vxlan_port_command(netdev, 2692 OCTNET_CMD_VXLAN_PORT_CONFIG, 2693 htons(ti->port), 2694 OCTNET_CMD_VXLAN_PORT_ADD); 2695 } 2696 2697 static void liquidio_del_vxlan_port(struct net_device *netdev, 2698 struct udp_tunnel_info *ti) 2699 { 2700 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2701 return; 2702 2703 liquidio_vxlan_port_command(netdev, 2704 OCTNET_CMD_VXLAN_PORT_CONFIG, 2705 htons(ti->port), 2706 OCTNET_CMD_VXLAN_PORT_DEL); 2707 } 2708 2709 static const struct net_device_ops lionetdevops = { 2710 .ndo_open = liquidio_open, 2711 .ndo_stop = liquidio_stop, 2712 .ndo_start_xmit = liquidio_xmit, 2713 .ndo_get_stats = liquidio_get_stats, 2714 .ndo_set_mac_address = liquidio_set_mac, 2715 .ndo_set_rx_mode = liquidio_set_mcast_list, 2716 .ndo_tx_timeout = liquidio_tx_timeout, 2717 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 2718 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 2719 .ndo_change_mtu = liquidio_change_mtu, 2720 .ndo_do_ioctl = liquidio_ioctl, 2721 .ndo_fix_features = liquidio_fix_features, 2722 .ndo_set_features = liquidio_set_features, 2723 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 2724 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 2725 }; 2726 2727 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 2728 { 2729 struct octeon_device *oct = (struct octeon_device *)buf; 2730 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 2731 union oct_link_status *ls; 2732 int gmxport = 0; 2733 int i; 2734 2735 if (recv_pkt->buffer_size[0] != sizeof(*ls)) { 2736 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 2737 recv_pkt->buffer_size[0], 2738 recv_pkt->rh.r_nic_info.gmxport); 2739 goto nic_info_err; 2740 } 2741 2742 gmxport = recv_pkt->rh.r_nic_info.gmxport; 2743 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); 2744 2745 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 2746 2747 for (i = 0; i < oct->ifcount; i++) { 2748 if (oct->props[i].gmxport == gmxport) { 2749 update_link_status(oct->props[i].netdev, ls); 2750 break; 2751 } 2752 } 2753 2754 nic_info_err: 2755 for (i = 0; i < recv_pkt->buffer_count; i++) 2756 recv_buffer_free(recv_pkt->buffer_ptr[i]); 2757 octeon_free_recv_info(recv_info); 2758 return 0; 2759 } 2760 2761 /** 2762 * \brief Setup network interfaces 2763 * @param octeon_dev octeon device 2764 * 2765 * Called during init time for each device. It assumes the NIC 2766 * is already up and running. The link information for each 2767 * interface is passed in link_info. 2768 */ 2769 static int setup_nic_devices(struct octeon_device *octeon_dev) 2770 { 2771 int retval, num_iqueues, num_oqueues; 2772 struct liquidio_if_cfg_context *ctx; 2773 u32 resp_size, ctx_size, data_size; 2774 struct liquidio_if_cfg_resp *resp; 2775 struct octeon_soft_command *sc; 2776 union oct_nic_if_cfg if_cfg; 2777 struct octdev_props *props; 2778 struct net_device *netdev; 2779 struct lio_version *vdata; 2780 struct lio *lio = NULL; 2781 u8 mac[ETH_ALEN], i, j; 2782 u32 ifidx_or_pfnum; 2783 2784 ifidx_or_pfnum = octeon_dev->pf_num; 2785 2786 /* This is to handle link status changes */ 2787 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 2788 lio_nic_info, octeon_dev); 2789 2790 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 2791 * They are handled directly. 2792 */ 2793 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 2794 free_netbuf); 2795 2796 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 2797 free_netsgbuf); 2798 2799 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 2800 free_netsgbuf_with_resp); 2801 2802 for (i = 0; i < octeon_dev->ifcount; i++) { 2803 resp_size = sizeof(struct liquidio_if_cfg_resp); 2804 ctx_size = sizeof(struct liquidio_if_cfg_context); 2805 data_size = sizeof(struct lio_version); 2806 sc = (struct octeon_soft_command *) 2807 octeon_alloc_soft_command(octeon_dev, data_size, 2808 resp_size, ctx_size); 2809 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 2810 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 2811 vdata = (struct lio_version *)sc->virtdptr; 2812 2813 *((u64 *)vdata) = 0; 2814 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 2815 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 2816 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 2817 2818 WRITE_ONCE(ctx->cond, 0); 2819 ctx->octeon_id = lio_get_device_id(octeon_dev); 2820 init_waitqueue_head(&ctx->wc); 2821 2822 if_cfg.u64 = 0; 2823 2824 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 2825 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 2826 if_cfg.s.base_queue = 0; 2827 2828 sc->iq_no = 0; 2829 2830 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 2831 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 2832 0); 2833 2834 sc->callback = if_cfg_callback; 2835 sc->callback_arg = sc; 2836 sc->wait_time = 5000; 2837 2838 retval = octeon_send_soft_command(octeon_dev, sc); 2839 if (retval == IQ_SEND_FAILED) { 2840 dev_err(&octeon_dev->pci_dev->dev, 2841 "iq/oq config failed status: %x\n", retval); 2842 /* Soft instr is freed by driver in case of failure. */ 2843 goto setup_nic_dev_fail; 2844 } 2845 2846 /* Sleep on a wait queue till the cond flag indicates that the 2847 * response arrived or timed-out. 2848 */ 2849 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 2850 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); 2851 goto setup_nic_wait_intr; 2852 } 2853 2854 retval = resp->status; 2855 if (retval) { 2856 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 2857 goto setup_nic_dev_fail; 2858 } 2859 2860 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2861 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2862 2863 num_iqueues = hweight64(resp->cfg_info.iqmask); 2864 num_oqueues = hweight64(resp->cfg_info.oqmask); 2865 2866 if (!(num_iqueues) || !(num_oqueues)) { 2867 dev_err(&octeon_dev->pci_dev->dev, 2868 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2869 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2870 goto setup_nic_dev_fail; 2871 } 2872 dev_dbg(&octeon_dev->pci_dev->dev, 2873 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2874 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2875 num_iqueues, num_oqueues); 2876 2877 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2878 2879 if (!netdev) { 2880 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2881 goto setup_nic_dev_fail; 2882 } 2883 2884 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2885 2886 /* Associate the routines that will handle different 2887 * netdev tasks. 2888 */ 2889 netdev->netdev_ops = &lionetdevops; 2890 2891 lio = GET_LIO(netdev); 2892 2893 memset(lio, 0, sizeof(struct lio)); 2894 2895 lio->ifidx = ifidx_or_pfnum; 2896 2897 props = &octeon_dev->props[i]; 2898 props->gmxport = resp->cfg_info.linfo.gmxport; 2899 props->netdev = netdev; 2900 2901 lio->linfo.num_rxpciq = num_oqueues; 2902 lio->linfo.num_txpciq = num_iqueues; 2903 2904 for (j = 0; j < num_oqueues; j++) { 2905 lio->linfo.rxpciq[j].u64 = 2906 resp->cfg_info.linfo.rxpciq[j].u64; 2907 } 2908 for (j = 0; j < num_iqueues; j++) { 2909 lio->linfo.txpciq[j].u64 = 2910 resp->cfg_info.linfo.txpciq[j].u64; 2911 } 2912 2913 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2914 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2915 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2916 lio->linfo.macaddr_is_admin_asgnd = 2917 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2918 2919 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2920 2921 lio->dev_capability = NETIF_F_HIGHDMA 2922 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2923 | NETIF_F_SG | NETIF_F_RXCSUM 2924 | NETIF_F_TSO | NETIF_F_TSO6 2925 | NETIF_F_GRO 2926 | NETIF_F_LRO; 2927 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2928 2929 /* Copy of transmit encapsulation capabilities: 2930 * TSO, TSO6, Checksums for this device 2931 */ 2932 lio->enc_dev_capability = NETIF_F_IP_CSUM 2933 | NETIF_F_IPV6_CSUM 2934 | NETIF_F_GSO_UDP_TUNNEL 2935 | NETIF_F_HW_CSUM | NETIF_F_SG 2936 | NETIF_F_RXCSUM 2937 | NETIF_F_TSO | NETIF_F_TSO6 2938 | NETIF_F_LRO; 2939 2940 netdev->hw_enc_features = 2941 (lio->enc_dev_capability & ~NETIF_F_LRO); 2942 netdev->vlan_features = lio->dev_capability; 2943 /* Add any unchangeable hw features */ 2944 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2945 NETIF_F_HW_VLAN_CTAG_RX | 2946 NETIF_F_HW_VLAN_CTAG_TX; 2947 2948 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2949 2950 netdev->hw_features = lio->dev_capability; 2951 2952 /* MTU range: 68 - 16000 */ 2953 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2954 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2955 2956 /* Point to the properties for octeon device to which this 2957 * interface belongs. 2958 */ 2959 lio->oct_dev = octeon_dev; 2960 lio->octprops = props; 2961 lio->netdev = netdev; 2962 2963 dev_dbg(&octeon_dev->pci_dev->dev, 2964 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2965 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2966 2967 /* 64-bit swap required on LE machines */ 2968 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2969 for (j = 0; j < ETH_ALEN; j++) 2970 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2971 2972 /* Copy MAC Address to OS network device structure */ 2973 ether_addr_copy(netdev->dev_addr, mac); 2974 2975 if (setup_io_queues(octeon_dev, i)) { 2976 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2977 goto setup_nic_dev_fail; 2978 } 2979 2980 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2981 2982 /* For VFs, enable Octeon device interrupts here, 2983 * as this is contingent upon IO queue setup 2984 */ 2985 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2986 OCTEON_ALL_INTR); 2987 2988 /* By default all interfaces on a single Octeon uses the same 2989 * tx and rx queues 2990 */ 2991 lio->txq = lio->linfo.txpciq[0].s.q_no; 2992 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2993 2994 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2995 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2996 2997 if (setup_glists(lio, num_iqueues)) { 2998 dev_err(&octeon_dev->pci_dev->dev, 2999 "Gather list allocation failed\n"); 3000 goto setup_nic_dev_fail; 3001 } 3002 3003 /* Register ethtool support */ 3004 liquidio_set_ethtool_ops(netdev); 3005 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 3006 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 3007 else 3008 octeon_dev->priv_flags = 0x0; 3009 3010 if (netdev->features & NETIF_F_LRO) 3011 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3012 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3013 3014 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3015 liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE, 3016 0); 3017 3018 if (setup_link_status_change_wq(netdev)) 3019 goto setup_nic_dev_fail; 3020 3021 if (setup_rx_oom_poll_fn(netdev)) 3022 goto setup_nic_dev_fail; 3023 3024 /* Register the network device with the OS */ 3025 if (register_netdev(netdev)) { 3026 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3027 goto setup_nic_dev_fail; 3028 } 3029 3030 dev_dbg(&octeon_dev->pci_dev->dev, 3031 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3032 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3033 netif_carrier_off(netdev); 3034 lio->link_changes++; 3035 3036 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3037 3038 /* Sending command to firmware to enable Rx checksum offload 3039 * by default at the time of setup of Liquidio driver for 3040 * this device 3041 */ 3042 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3043 OCTNET_CMD_RXCSUM_ENABLE); 3044 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3045 OCTNET_CMD_TXCSUM_ENABLE); 3046 3047 dev_dbg(&octeon_dev->pci_dev->dev, 3048 "NIC ifidx:%d Setup successful\n", i); 3049 3050 octeon_free_soft_command(octeon_dev, sc); 3051 } 3052 3053 return 0; 3054 3055 setup_nic_dev_fail: 3056 3057 octeon_free_soft_command(octeon_dev, sc); 3058 3059 setup_nic_wait_intr: 3060 3061 while (i--) { 3062 dev_err(&octeon_dev->pci_dev->dev, 3063 "NIC ifidx:%d Setup failed\n", i); 3064 liquidio_destroy_nic_device(octeon_dev, i); 3065 } 3066 return -ENODEV; 3067 } 3068 3069 /** 3070 * \brief initialize the NIC 3071 * @param oct octeon device 3072 * 3073 * This initialization routine is called once the Octeon device application is 3074 * up and running 3075 */ 3076 static int liquidio_init_nic_module(struct octeon_device *oct) 3077 { 3078 int num_nic_ports = 1; 3079 int i, retval = 0; 3080 3081 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3082 3083 /* only default iq and oq were initialized 3084 * initialize the rest as well run port_config command for each port 3085 */ 3086 oct->ifcount = num_nic_ports; 3087 memset(oct->props, 0, 3088 sizeof(struct octdev_props) * num_nic_ports); 3089 3090 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3091 oct->props[i].gmxport = -1; 3092 3093 retval = setup_nic_devices(oct); 3094 if (retval) { 3095 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3096 goto octnet_init_failure; 3097 } 3098 3099 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3100 3101 return retval; 3102 3103 octnet_init_failure: 3104 3105 oct->ifcount = 0; 3106 3107 return retval; 3108 } 3109 3110 /** 3111 * \brief Device initialization for each Octeon device that is probed 3112 * @param octeon_dev octeon device 3113 */ 3114 static int octeon_device_init(struct octeon_device *oct) 3115 { 3116 u32 rev_id; 3117 int j; 3118 3119 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 3120 3121 /* Enable access to the octeon device and make its DMA capability 3122 * known to the OS. 3123 */ 3124 if (octeon_pci_os_setup(oct)) 3125 return 1; 3126 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 3127 3128 oct->chip_id = OCTEON_CN23XX_VF_VID; 3129 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 3130 oct->rev_id = rev_id & 0xff; 3131 3132 if (cn23xx_setup_octeon_vf_device(oct)) 3133 return 1; 3134 3135 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 3136 3137 oct->app_mode = CVM_DRV_NIC_APP; 3138 3139 /* Initialize the dispatch mechanism used to push packets arriving on 3140 * Octeon Output queues. 3141 */ 3142 if (octeon_init_dispatch_list(oct)) 3143 return 1; 3144 3145 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 3146 3147 if (octeon_set_io_queues_off(oct)) { 3148 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 3149 return 1; 3150 } 3151 3152 if (oct->fn_list.setup_device_regs(oct)) { 3153 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 3154 return 1; 3155 } 3156 3157 /* Initialize soft command buffer pool */ 3158 if (octeon_setup_sc_buffer_pool(oct)) { 3159 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 3160 return 1; 3161 } 3162 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 3163 3164 /* Setup the data structures that manage this Octeon's Input queues. */ 3165 if (octeon_setup_instr_queues(oct)) { 3166 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 3167 return 1; 3168 } 3169 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 3170 3171 /* Initialize lists to manage the requests of different types that 3172 * arrive from user & kernel applications for this octeon device. 3173 */ 3174 if (octeon_setup_response_list(oct)) { 3175 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 3176 return 1; 3177 } 3178 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 3179 3180 if (octeon_setup_output_queues(oct)) { 3181 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 3182 return 1; 3183 } 3184 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 3185 3186 if (oct->fn_list.setup_mbox(oct)) { 3187 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 3188 return 1; 3189 } 3190 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 3191 3192 if (octeon_allocate_ioq_vector(oct)) { 3193 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 3194 return 1; 3195 } 3196 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 3197 3198 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n", 3199 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); 3200 3201 /* Setup the interrupt handler and record the INT SUM register address*/ 3202 if (octeon_setup_interrupt(oct)) 3203 return 1; 3204 3205 if (cn23xx_octeon_pfvf_handshake(oct)) 3206 return 1; 3207 3208 /* Enable Octeon device interrupts */ 3209 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 3210 3211 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 3212 3213 /* Enable the input and output queues for this Octeon device */ 3214 if (oct->fn_list.enable_io_queues(oct)) { 3215 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 3216 return 1; 3217 } 3218 3219 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 3220 3221 atomic_set(&oct->status, OCT_DEV_HOST_OK); 3222 3223 /* Send Credit for Octeon Output queues. Credits are always sent after 3224 * the output queue is enabled. 3225 */ 3226 for (j = 0; j < oct->num_oqs; j++) 3227 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 3228 3229 /* Packets can start arriving on the output queues from this point. */ 3230 3231 atomic_set(&oct->status, OCT_DEV_CORE_OK); 3232 3233 atomic_set(&oct->status, OCT_DEV_RUNNING); 3234 3235 if (liquidio_init_nic_module(oct)) 3236 return 1; 3237 3238 return 0; 3239 } 3240 3241 static int __init liquidio_vf_init(void) 3242 { 3243 octeon_init_device_list(0); 3244 return pci_register_driver(&liquidio_vf_pci_driver); 3245 } 3246 3247 static void __exit liquidio_vf_exit(void) 3248 { 3249 pci_unregister_driver(&liquidio_vf_pci_driver); 3250 3251 pr_info("LiquidIO_VF network module is now unloaded\n"); 3252 } 3253 3254 module_init(liquidio_vf_init); 3255 module_exit(liquidio_vf_exit); 3256