1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #endif 40 41 #include "ixgbe.h" 42 43 /********************************************************************* 44 * Driver version 45 *********************************************************************/ 46 char ixv_driver_version[] = "1.4.0"; 47 48 /********************************************************************* 49 * PCI Device ID Table 50 * 51 * Used by probe to select devices to load on 52 * Last field stores an index into ixv_strings 53 * Last entry must be all 0s 54 * 55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 56 *********************************************************************/ 57 58 static ixgbe_vendor_info_t ixv_vendor_info_array[] = 59 { 60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 64 /* required last entry */ 65 {0, 0, 0, 0, 0} 66 }; 67 68 /********************************************************************* 69 * Table of branding strings 70 *********************************************************************/ 71 72 static char *ixv_strings[] = { 73 "Intel(R) PRO/10GbE Virtual Function Network Driver" 74 }; 75 76 /********************************************************************* 77 * Function prototypes 78 *********************************************************************/ 79 static int ixv_probe(device_t); 80 static int ixv_attach(device_t); 81 static int ixv_detach(device_t); 82 static int ixv_shutdown(device_t); 83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t); 84 static void ixv_init(void *); 85 static void ixv_init_locked(struct adapter *); 86 static void ixv_stop(void *); 87 static void ixv_media_status(struct ifnet *, struct ifmediareq *); 88 static int ixv_media_change(struct ifnet *); 89 static void ixv_identify_hardware(struct adapter *); 90 static int ixv_allocate_pci_resources(struct adapter *); 91 static int ixv_allocate_msix(struct adapter *); 92 static int ixv_setup_msix(struct adapter *); 93 static void ixv_free_pci_resources(struct adapter *); 94 static void ixv_local_timer(void *); 95 static void ixv_setup_interface(device_t, struct adapter *); 96 static void ixv_config_link(struct adapter *); 97 98 static void ixv_initialize_transmit_units(struct adapter *); 99 static void ixv_initialize_receive_units(struct adapter *); 100 101 static void ixv_enable_intr(struct adapter *); 102 static void ixv_disable_intr(struct adapter *); 103 static void ixv_set_multi(struct adapter *); 104 static void ixv_update_link_status(struct adapter *); 105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); 106 static void ixv_set_ivar(struct adapter *, u8, u8, s8); 107 static void ixv_configure_ivars(struct adapter *); 108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 109 110 static void ixv_setup_vlan_support(struct adapter *); 111 static void ixv_register_vlan(void *, struct ifnet *, u16); 112 static void ixv_unregister_vlan(void *, struct ifnet *, u16); 113 114 static void ixv_save_stats(struct adapter *); 115 static void ixv_init_stats(struct adapter *); 116 static void ixv_update_stats(struct adapter *); 117 static void ixv_add_stats_sysctls(struct adapter *); 118 119 /* The MSI/X Interrupt handlers */ 120 static void ixv_msix_que(void *); 121 static void ixv_msix_mbx(void *); 122 123 /* Deferred interrupt tasklets */ 124 static void ixv_handle_que(void *, int); 125 static void ixv_handle_mbx(void *, int); 126 127 /********************************************************************* 128 * FreeBSD Device Interface Entry Points 129 *********************************************************************/ 130 131 static device_method_t ixv_methods[] = { 132 /* Device interface */ 133 DEVMETHOD(device_probe, ixv_probe), 134 DEVMETHOD(device_attach, ixv_attach), 135 DEVMETHOD(device_detach, ixv_detach), 136 DEVMETHOD(device_shutdown, ixv_shutdown), 137 DEVMETHOD_END 138 }; 139 140 static driver_t ixv_driver = { 141 "ixv", ixv_methods, sizeof(struct adapter), 142 }; 143 144 devclass_t ixv_devclass; 145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 146 MODULE_DEPEND(ixv, pci, 1, 1, 1); 147 MODULE_DEPEND(ixv, ether, 1, 1, 1); 148 /* XXX depend on 'ix' ? */ 149 150 /* 151 ** TUNEABLE PARAMETERS: 152 */ 153 154 /* Number of Queues - do not exceed MSIX vectors - 1 */ 155 static int ixv_num_queues = 1; 156 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 157 158 /* 159 ** AIM: Adaptive Interrupt Moderation 160 ** which means that the interrupt rate 161 ** is varied over time based on the 162 ** traffic for that interrupt vector 163 */ 164 static int ixv_enable_aim = FALSE; 165 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 166 167 /* How many packets rxeof tries to clean at a time */ 168 static int ixv_rx_process_limit = 256; 169 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 170 171 /* How many packets txeof tries to clean at a time */ 172 static int ixv_tx_process_limit = 256; 173 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 174 175 /* Flow control setting, default to full */ 176 static int ixv_flow_control = ixgbe_fc_full; 177 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); 178 179 /* 180 * Header split: this causes the hardware to DMA 181 * the header into a seperate mbuf from the payload, 182 * it can be a performance win in some workloads, but 183 * in others it actually hurts, its off by default. 184 */ 185 static int ixv_header_split = FALSE; 186 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); 187 188 /* 189 ** Number of TX descriptors per ring, 190 ** setting higher than RX as this seems 191 ** the better performing choice. 192 */ 193 static int ixv_txd = DEFAULT_TXD; 194 TUNABLE_INT("hw.ixv.txd", &ixv_txd); 195 196 /* Number of RX descriptors per ring */ 197 static int ixv_rxd = DEFAULT_RXD; 198 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 199 200 /* 201 ** Shadow VFTA table, this is needed because 202 ** the real filter table gets cleared during 203 ** a soft reset and we need to repopulate it. 204 */ 205 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; 206 207 /********************************************************************* 208 * Device identification routine 209 * 210 * ixv_probe determines if the driver should be loaded on 211 * adapter based on PCI vendor/device id of the adapter. 212 * 213 * return BUS_PROBE_DEFAULT on success, positive on failure 214 *********************************************************************/ 215 216 static int 217 ixv_probe(device_t dev) 218 { 219 ixgbe_vendor_info_t *ent; 220 221 u16 pci_vendor_id = 0; 222 u16 pci_device_id = 0; 223 u16 pci_subvendor_id = 0; 224 u16 pci_subdevice_id = 0; 225 char adapter_name[256]; 226 227 228 pci_vendor_id = pci_get_vendor(dev); 229 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 230 return (ENXIO); 231 232 pci_device_id = pci_get_device(dev); 233 pci_subvendor_id = pci_get_subvendor(dev); 234 pci_subdevice_id = pci_get_subdevice(dev); 235 236 ent = ixv_vendor_info_array; 237 while (ent->vendor_id != 0) { 238 if ((pci_vendor_id == ent->vendor_id) && 239 (pci_device_id == ent->device_id) && 240 241 ((pci_subvendor_id == ent->subvendor_id) || 242 (ent->subvendor_id == 0)) && 243 244 ((pci_subdevice_id == ent->subdevice_id) || 245 (ent->subdevice_id == 0))) { 246 sprintf(adapter_name, "%s, Version - %s", 247 ixv_strings[ent->index], 248 ixv_driver_version); 249 device_set_desc_copy(dev, adapter_name); 250 return (BUS_PROBE_DEFAULT); 251 } 252 ent++; 253 } 254 return (ENXIO); 255 } 256 257 /********************************************************************* 258 * Device initialization routine 259 * 260 * The attach entry point is called when the driver is being loaded. 261 * This routine identifies the type of hardware, allocates all resources 262 * and initializes the hardware. 263 * 264 * return 0 on success, positive on failure 265 *********************************************************************/ 266 267 static int 268 ixv_attach(device_t dev) 269 { 270 struct adapter *adapter; 271 struct ixgbe_hw *hw; 272 int error = 0; 273 274 INIT_DEBUGOUT("ixv_attach: begin"); 275 276 /* Allocate, clear, and link in our adapter structure */ 277 adapter = device_get_softc(dev); 278 adapter->dev = adapter->osdep.dev = dev; 279 hw = &adapter->hw; 280 281 /* Core Lock Init*/ 282 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 283 284 /* SYSCTL APIs */ 285 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 286 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 287 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, 288 adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); 289 290 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 291 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 292 OID_AUTO, "enable_aim", CTLFLAG_RW, 293 &ixv_enable_aim, 1, "Interrupt Moderation"); 294 295 /* Set up the timer callout */ 296 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 297 298 /* Determine hardware revision */ 299 ixv_identify_hardware(adapter); 300 301 /* Do base PCI setup - map BAR0 */ 302 if (ixv_allocate_pci_resources(adapter)) { 303 device_printf(dev, "Allocation of PCI resources failed\n"); 304 error = ENXIO; 305 goto err_out; 306 } 307 308 /* Do descriptor calc and sanity checks */ 309 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 310 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 311 device_printf(dev, "TXD config issue, using default!\n"); 312 adapter->num_tx_desc = DEFAULT_TXD; 313 } else 314 adapter->num_tx_desc = ixv_txd; 315 316 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 317 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 318 device_printf(dev, "RXD config issue, using default!\n"); 319 adapter->num_rx_desc = DEFAULT_RXD; 320 } else 321 adapter->num_rx_desc = ixv_rxd; 322 323 /* Allocate our TX/RX Queues */ 324 if (ixgbe_allocate_queues(adapter)) { 325 error = ENOMEM; 326 goto err_out; 327 } 328 329 /* 330 ** Initialize the shared code: its 331 ** at this point the mac type is set. 332 */ 333 error = ixgbe_init_shared_code(hw); 334 if (error) { 335 device_printf(dev,"Shared Code Initialization Failure\n"); 336 error = EIO; 337 goto err_late; 338 } 339 340 /* Setup the mailbox */ 341 ixgbe_init_mbx_params_vf(hw); 342 343 ixgbe_reset_hw(hw); 344 345 /* Get the Mailbox API version */ 346 device_printf(dev,"MBX API %d negotiation: %d\n", 347 ixgbe_mbox_api_11, 348 ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11)); 349 350 error = ixgbe_init_hw(hw); 351 if (error) { 352 device_printf(dev,"Hardware Initialization Failure\n"); 353 error = EIO; 354 goto err_late; 355 } 356 357 error = ixv_allocate_msix(adapter); 358 if (error) 359 goto err_late; 360 361 /* If no mac address was assigned, make a random one */ 362 if (!ixv_check_ether_addr(hw->mac.addr)) { 363 u8 addr[ETHER_ADDR_LEN]; 364 arc4rand(&addr, sizeof(addr), 0); 365 addr[0] &= 0xFE; 366 addr[0] |= 0x02; 367 bcopy(addr, hw->mac.addr, sizeof(addr)); 368 } 369 370 /* Setup OS specific network interface */ 371 ixv_setup_interface(dev, adapter); 372 373 /* Do the stats setup */ 374 ixv_save_stats(adapter); 375 ixv_init_stats(adapter); 376 ixv_add_stats_sysctls(adapter); 377 378 /* Register for VLAN events */ 379 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 380 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 381 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 382 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 383 384 INIT_DEBUGOUT("ixv_attach: end"); 385 return (0); 386 387 err_late: 388 ixgbe_free_transmit_structures(adapter); 389 ixgbe_free_receive_structures(adapter); 390 err_out: 391 ixv_free_pci_resources(adapter); 392 return (error); 393 394 } 395 396 /********************************************************************* 397 * Device removal routine 398 * 399 * The detach entry point is called when the driver is being removed. 400 * This routine stops the adapter and deallocates all the resources 401 * that were allocated for driver operation. 402 * 403 * return 0 on success, positive on failure 404 *********************************************************************/ 405 406 static int 407 ixv_detach(device_t dev) 408 { 409 struct adapter *adapter = device_get_softc(dev); 410 struct ix_queue *que = adapter->queues; 411 412 INIT_DEBUGOUT("ixv_detach: begin"); 413 414 /* Make sure VLANS are not using driver */ 415 if (adapter->ifp->if_vlantrunk != NULL) { 416 device_printf(dev,"Vlan in use, detach first\n"); 417 return (EBUSY); 418 } 419 420 IXGBE_CORE_LOCK(adapter); 421 ixv_stop(adapter); 422 IXGBE_CORE_UNLOCK(adapter); 423 424 for (int i = 0; i < adapter->num_queues; i++, que++) { 425 if (que->tq) { 426 struct tx_ring *txr = que->txr; 427 taskqueue_drain(que->tq, &txr->txq_task); 428 taskqueue_drain(que->tq, &que->que_task); 429 taskqueue_free(que->tq); 430 } 431 } 432 433 /* Drain the Mailbox(link) queue */ 434 if (adapter->tq) { 435 taskqueue_drain(adapter->tq, &adapter->link_task); 436 taskqueue_free(adapter->tq); 437 } 438 439 /* Unregister VLAN events */ 440 if (adapter->vlan_attach != NULL) 441 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 442 if (adapter->vlan_detach != NULL) 443 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 444 445 ether_ifdetach(adapter->ifp); 446 callout_drain(&adapter->timer); 447 ixv_free_pci_resources(adapter); 448 bus_generic_detach(dev); 449 if_free(adapter->ifp); 450 451 ixgbe_free_transmit_structures(adapter); 452 ixgbe_free_receive_structures(adapter); 453 454 IXGBE_CORE_LOCK_DESTROY(adapter); 455 return (0); 456 } 457 458 /********************************************************************* 459 * 460 * Shutdown entry point 461 * 462 **********************************************************************/ 463 static int 464 ixv_shutdown(device_t dev) 465 { 466 struct adapter *adapter = device_get_softc(dev); 467 IXGBE_CORE_LOCK(adapter); 468 ixv_stop(adapter); 469 IXGBE_CORE_UNLOCK(adapter); 470 return (0); 471 } 472 473 474 /********************************************************************* 475 * Ioctl entry point 476 * 477 * ixv_ioctl is called when the user wants to configure the 478 * interface. 479 * 480 * return 0 on success, positive on failure 481 **********************************************************************/ 482 483 static int 484 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 485 { 486 struct adapter *adapter = ifp->if_softc; 487 struct ifreq *ifr = (struct ifreq *) data; 488 #if defined(INET) || defined(INET6) 489 struct ifaddr *ifa = (struct ifaddr *) data; 490 bool avoid_reset = FALSE; 491 #endif 492 int error = 0; 493 494 switch (command) { 495 496 case SIOCSIFADDR: 497 #ifdef INET 498 if (ifa->ifa_addr->sa_family == AF_INET) 499 avoid_reset = TRUE; 500 #endif 501 #ifdef INET6 502 if (ifa->ifa_addr->sa_family == AF_INET6) 503 avoid_reset = TRUE; 504 #endif 505 #if defined(INET) || defined(INET6) 506 /* 507 ** Calling init results in link renegotiation, 508 ** so we avoid doing it when possible. 509 */ 510 if (avoid_reset) { 511 ifp->if_flags |= IFF_UP; 512 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 513 ixv_init(adapter); 514 if (!(ifp->if_flags & IFF_NOARP)) 515 arp_ifinit(ifp, ifa); 516 } else 517 error = ether_ioctl(ifp, command, data); 518 break; 519 #endif 520 case SIOCSIFMTU: 521 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 522 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) { 523 error = EINVAL; 524 } else { 525 IXGBE_CORE_LOCK(adapter); 526 ifp->if_mtu = ifr->ifr_mtu; 527 adapter->max_frame_size = 528 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 529 ixv_init_locked(adapter); 530 IXGBE_CORE_UNLOCK(adapter); 531 } 532 break; 533 case SIOCSIFFLAGS: 534 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 535 IXGBE_CORE_LOCK(adapter); 536 if (ifp->if_flags & IFF_UP) { 537 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 538 ixv_init_locked(adapter); 539 } else 540 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 541 ixv_stop(adapter); 542 adapter->if_flags = ifp->if_flags; 543 IXGBE_CORE_UNLOCK(adapter); 544 break; 545 case SIOCADDMULTI: 546 case SIOCDELMULTI: 547 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 548 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 549 IXGBE_CORE_LOCK(adapter); 550 ixv_disable_intr(adapter); 551 ixv_set_multi(adapter); 552 ixv_enable_intr(adapter); 553 IXGBE_CORE_UNLOCK(adapter); 554 } 555 break; 556 case SIOCSIFMEDIA: 557 case SIOCGIFMEDIA: 558 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 559 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 560 break; 561 case SIOCSIFCAP: 562 { 563 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 564 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 565 if (mask & IFCAP_HWCSUM) 566 ifp->if_capenable ^= IFCAP_HWCSUM; 567 if (mask & IFCAP_TSO4) 568 ifp->if_capenable ^= IFCAP_TSO4; 569 if (mask & IFCAP_LRO) 570 ifp->if_capenable ^= IFCAP_LRO; 571 if (mask & IFCAP_VLAN_HWTAGGING) 572 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 573 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 574 IXGBE_CORE_LOCK(adapter); 575 ixv_init_locked(adapter); 576 IXGBE_CORE_UNLOCK(adapter); 577 } 578 VLAN_CAPABILITIES(ifp); 579 break; 580 } 581 582 default: 583 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 584 error = ether_ioctl(ifp, command, data); 585 break; 586 } 587 588 return (error); 589 } 590 591 /********************************************************************* 592 * Init entry point 593 * 594 * This routine is used in two ways. It is used by the stack as 595 * init entry point in network interface structure. It is also used 596 * by the driver as a hw/sw initialization routine to get to a 597 * consistent state. 598 * 599 * return 0 on success, positive on failure 600 **********************************************************************/ 601 #define IXGBE_MHADD_MFS_SHIFT 16 602 603 static void 604 ixv_init_locked(struct adapter *adapter) 605 { 606 struct ifnet *ifp = adapter->ifp; 607 device_t dev = adapter->dev; 608 struct ixgbe_hw *hw = &adapter->hw; 609 u32 mhadd, gpie; 610 611 INIT_DEBUGOUT("ixv_init: begin"); 612 mtx_assert(&adapter->core_mtx, MA_OWNED); 613 hw->adapter_stopped = FALSE; 614 ixgbe_stop_adapter(hw); 615 callout_stop(&adapter->timer); 616 617 /* reprogram the RAR[0] in case user changed it. */ 618 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 619 620 /* Get the latest mac address, User can use a LAA */ 621 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, 622 IXGBE_ETH_LENGTH_OF_ADDRESS); 623 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); 624 hw->addr_ctrl.rar_used_count = 1; 625 626 /* Prepare transmit descriptors and buffers */ 627 if (ixgbe_setup_transmit_structures(adapter)) { 628 device_printf(dev,"Could not setup transmit structures\n"); 629 ixv_stop(adapter); 630 return; 631 } 632 633 ixgbe_reset_hw(hw); 634 ixv_initialize_transmit_units(adapter); 635 636 /* Setup Multicast table */ 637 ixv_set_multi(adapter); 638 639 /* 640 ** Determine the correct mbuf pool 641 ** for doing jumbo/headersplit 642 */ 643 if (ifp->if_mtu > ETHERMTU) 644 adapter->rx_mbuf_sz = MJUMPAGESIZE; 645 else 646 adapter->rx_mbuf_sz = MCLBYTES; 647 648 /* Prepare receive descriptors and buffers */ 649 if (ixgbe_setup_receive_structures(adapter)) { 650 device_printf(dev,"Could not setup receive structures\n"); 651 ixv_stop(adapter); 652 return; 653 } 654 655 /* Configure RX settings */ 656 ixv_initialize_receive_units(adapter); 657 658 /* Enable Enhanced MSIX mode */ 659 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); 660 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME; 661 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; 662 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 663 664 /* Set the various hardware offload abilities */ 665 ifp->if_hwassist = 0; 666 if (ifp->if_capenable & IFCAP_TSO4) 667 ifp->if_hwassist |= CSUM_TSO; 668 if (ifp->if_capenable & IFCAP_TXCSUM) { 669 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 670 #if __FreeBSD_version >= 800000 671 ifp->if_hwassist |= CSUM_SCTP; 672 #endif 673 } 674 675 /* Set MTU size */ 676 if (ifp->if_mtu > ETHERMTU) { 677 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 678 mhadd &= ~IXGBE_MHADD_MFS_MASK; 679 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 680 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 681 } 682 683 /* Set up VLAN offload and filter */ 684 ixv_setup_vlan_support(adapter); 685 686 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 687 688 /* Set up MSI/X routing */ 689 ixv_configure_ivars(adapter); 690 691 /* Set up auto-mask */ 692 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); 693 694 /* Set moderation on the Link interrupt */ 695 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); 696 697 /* Stats init */ 698 ixv_init_stats(adapter); 699 700 /* Config/Enable Link */ 701 ixv_config_link(adapter); 702 703 /* And now turn on interrupts */ 704 ixv_enable_intr(adapter); 705 706 /* Now inform the stack we're ready */ 707 ifp->if_drv_flags |= IFF_DRV_RUNNING; 708 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 709 710 return; 711 } 712 713 static void 714 ixv_init(void *arg) 715 { 716 struct adapter *adapter = arg; 717 718 IXGBE_CORE_LOCK(adapter); 719 ixv_init_locked(adapter); 720 IXGBE_CORE_UNLOCK(adapter); 721 return; 722 } 723 724 725 /* 726 ** 727 ** MSIX Interrupt Handlers and Tasklets 728 ** 729 */ 730 731 static inline void 732 ixv_enable_queue(struct adapter *adapter, u32 vector) 733 { 734 struct ixgbe_hw *hw = &adapter->hw; 735 u32 queue = 1 << vector; 736 u32 mask; 737 738 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 739 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 740 } 741 742 static inline void 743 ixv_disable_queue(struct adapter *adapter, u32 vector) 744 { 745 struct ixgbe_hw *hw = &adapter->hw; 746 u64 queue = (u64)(1 << vector); 747 u32 mask; 748 749 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 750 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 751 } 752 753 static inline void 754 ixv_rearm_queues(struct adapter *adapter, u64 queues) 755 { 756 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 757 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 758 } 759 760 761 static void 762 ixv_handle_que(void *context, int pending) 763 { 764 struct ix_queue *que = context; 765 struct adapter *adapter = que->adapter; 766 struct tx_ring *txr = que->txr; 767 struct ifnet *ifp = adapter->ifp; 768 bool more; 769 770 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 771 more = ixgbe_rxeof(que); 772 IXGBE_TX_LOCK(txr); 773 ixgbe_txeof(txr); 774 #if __FreeBSD_version >= 800000 775 if (!drbr_empty(ifp, txr->br)) 776 ixgbe_mq_start_locked(ifp, txr); 777 #else 778 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 779 ixgbe_start_locked(txr, ifp); 780 #endif 781 IXGBE_TX_UNLOCK(txr); 782 if (more) { 783 taskqueue_enqueue(que->tq, &que->que_task); 784 return; 785 } 786 } 787 788 /* Reenable this interrupt */ 789 ixv_enable_queue(adapter, que->msix); 790 return; 791 } 792 793 /********************************************************************* 794 * 795 * MSI Queue Interrupt Service routine 796 * 797 **********************************************************************/ 798 void 799 ixv_msix_que(void *arg) 800 { 801 struct ix_queue *que = arg; 802 struct adapter *adapter = que->adapter; 803 struct ifnet *ifp = adapter->ifp; 804 struct tx_ring *txr = que->txr; 805 struct rx_ring *rxr = que->rxr; 806 bool more; 807 u32 newitr = 0; 808 809 ixv_disable_queue(adapter, que->msix); 810 ++que->irqs; 811 812 more = ixgbe_rxeof(que); 813 814 IXGBE_TX_LOCK(txr); 815 ixgbe_txeof(txr); 816 /* 817 ** Make certain that if the stack 818 ** has anything queued the task gets 819 ** scheduled to handle it. 820 */ 821 #ifdef IXGBE_LEGACY_TX 822 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) 823 ixgbe_start_locked(txr, ifp); 824 #else 825 if (!drbr_empty(adapter->ifp, txr->br)) 826 ixgbe_mq_start_locked(ifp, txr); 827 #endif 828 IXGBE_TX_UNLOCK(txr); 829 830 /* Do AIM now? */ 831 832 if (ixv_enable_aim == FALSE) 833 goto no_calc; 834 /* 835 ** Do Adaptive Interrupt Moderation: 836 ** - Write out last calculated setting 837 ** - Calculate based on average size over 838 ** the last interval. 839 */ 840 if (que->eitr_setting) 841 IXGBE_WRITE_REG(&adapter->hw, 842 IXGBE_VTEITR(que->msix), 843 que->eitr_setting); 844 845 que->eitr_setting = 0; 846 847 /* Idle, do nothing */ 848 if ((txr->bytes == 0) && (rxr->bytes == 0)) 849 goto no_calc; 850 851 if ((txr->bytes) && (txr->packets)) 852 newitr = txr->bytes/txr->packets; 853 if ((rxr->bytes) && (rxr->packets)) 854 newitr = max(newitr, 855 (rxr->bytes / rxr->packets)); 856 newitr += 24; /* account for hardware frame, crc */ 857 858 /* set an upper boundary */ 859 newitr = min(newitr, 3000); 860 861 /* Be nice to the mid range */ 862 if ((newitr > 300) && (newitr < 1200)) 863 newitr = (newitr / 3); 864 else 865 newitr = (newitr / 2); 866 867 newitr |= newitr << 16; 868 869 /* save for next interrupt */ 870 que->eitr_setting = newitr; 871 872 /* Reset state */ 873 txr->bytes = 0; 874 txr->packets = 0; 875 rxr->bytes = 0; 876 rxr->packets = 0; 877 878 no_calc: 879 if (more) 880 taskqueue_enqueue(que->tq, &que->que_task); 881 else /* Reenable this interrupt */ 882 ixv_enable_queue(adapter, que->msix); 883 return; 884 } 885 886 static void 887 ixv_msix_mbx(void *arg) 888 { 889 struct adapter *adapter = arg; 890 struct ixgbe_hw *hw = &adapter->hw; 891 u32 reg; 892 893 ++adapter->link_irq; 894 895 /* First get the cause */ 896 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); 897 /* Clear interrupt with write */ 898 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); 899 900 /* Link status change */ 901 if (reg & IXGBE_EICR_LSC) 902 taskqueue_enqueue(adapter->tq, &adapter->link_task); 903 904 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); 905 return; 906 } 907 908 /********************************************************************* 909 * 910 * Media Ioctl callback 911 * 912 * This routine is called whenever the user queries the status of 913 * the interface using ifconfig. 914 * 915 **********************************************************************/ 916 static void 917 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 918 { 919 struct adapter *adapter = ifp->if_softc; 920 921 INIT_DEBUGOUT("ixv_media_status: begin"); 922 IXGBE_CORE_LOCK(adapter); 923 ixv_update_link_status(adapter); 924 925 ifmr->ifm_status = IFM_AVALID; 926 ifmr->ifm_active = IFM_ETHER; 927 928 if (!adapter->link_active) { 929 IXGBE_CORE_UNLOCK(adapter); 930 return; 931 } 932 933 ifmr->ifm_status |= IFM_ACTIVE; 934 935 switch (adapter->link_speed) { 936 case IXGBE_LINK_SPEED_1GB_FULL: 937 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 938 break; 939 case IXGBE_LINK_SPEED_10GB_FULL: 940 ifmr->ifm_active |= IFM_FDX; 941 break; 942 } 943 944 IXGBE_CORE_UNLOCK(adapter); 945 946 return; 947 } 948 949 /********************************************************************* 950 * 951 * Media Ioctl callback 952 * 953 * This routine is called when the user changes speed/duplex using 954 * media/mediopt option with ifconfig. 955 * 956 **********************************************************************/ 957 static int 958 ixv_media_change(struct ifnet * ifp) 959 { 960 struct adapter *adapter = ifp->if_softc; 961 struct ifmedia *ifm = &adapter->media; 962 963 INIT_DEBUGOUT("ixv_media_change: begin"); 964 965 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 966 return (EINVAL); 967 968 switch (IFM_SUBTYPE(ifm->ifm_media)) { 969 case IFM_AUTO: 970 break; 971 default: 972 device_printf(adapter->dev, "Only auto media type\n"); 973 return (EINVAL); 974 } 975 976 return (0); 977 } 978 979 980 /********************************************************************* 981 * Multicast Update 982 * 983 * This routine is called whenever multicast address list is updated. 984 * 985 **********************************************************************/ 986 #define IXGBE_RAR_ENTRIES 16 987 988 static void 989 ixv_set_multi(struct adapter *adapter) 990 { 991 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 992 u8 *update_ptr; 993 struct ifmultiaddr *ifma; 994 int mcnt = 0; 995 struct ifnet *ifp = adapter->ifp; 996 997 IOCTL_DEBUGOUT("ixv_set_multi: begin"); 998 999 #if __FreeBSD_version < 800000 1000 IF_ADDR_LOCK(ifp); 1001 #else 1002 if_maddr_rlock(ifp); 1003 #endif 1004 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1005 if (ifma->ifma_addr->sa_family != AF_LINK) 1006 continue; 1007 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1008 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 1009 IXGBE_ETH_LENGTH_OF_ADDRESS); 1010 mcnt++; 1011 } 1012 #if __FreeBSD_version < 800000 1013 IF_ADDR_UNLOCK(ifp); 1014 #else 1015 if_maddr_runlock(ifp); 1016 #endif 1017 1018 update_ptr = mta; 1019 1020 ixgbe_update_mc_addr_list(&adapter->hw, 1021 update_ptr, mcnt, ixv_mc_array_itr, TRUE); 1022 1023 return; 1024 } 1025 1026 /* 1027 * This is an iterator function now needed by the multicast 1028 * shared code. It simply feeds the shared code routine the 1029 * addresses in the array of ixv_set_multi() one by one. 1030 */ 1031 static u8 * 1032 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1033 { 1034 u8 *addr = *update_ptr; 1035 u8 *newptr; 1036 *vmdq = 0; 1037 1038 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 1039 *update_ptr = newptr; 1040 return addr; 1041 } 1042 1043 /********************************************************************* 1044 * Timer routine 1045 * 1046 * This routine checks for link status,updates statistics, 1047 * and runs the watchdog check. 1048 * 1049 **********************************************************************/ 1050 1051 static void 1052 ixv_local_timer(void *arg) 1053 { 1054 struct adapter *adapter = arg; 1055 device_t dev = adapter->dev; 1056 struct ix_queue *que = adapter->queues; 1057 u64 queues = 0; 1058 int hung = 0; 1059 1060 mtx_assert(&adapter->core_mtx, MA_OWNED); 1061 1062 ixv_update_link_status(adapter); 1063 1064 /* Stats Update */ 1065 ixv_update_stats(adapter); 1066 1067 /* 1068 ** Check the TX queues status 1069 ** - mark hung queues so we don't schedule on them 1070 ** - watchdog only if all queues show hung 1071 */ 1072 for (int i = 0; i < adapter->num_queues; i++, que++) { 1073 /* Keep track of queues with work for soft irq */ 1074 if (que->txr->busy) 1075 queues |= ((u64)1 << que->me); 1076 /* 1077 ** Each time txeof runs without cleaning, but there 1078 ** are uncleaned descriptors it increments busy. If 1079 ** we get to the MAX we declare it hung. 1080 */ 1081 if (que->busy == IXGBE_QUEUE_HUNG) { 1082 ++hung; 1083 /* Mark the queue as inactive */ 1084 adapter->active_queues &= ~((u64)1 << que->me); 1085 continue; 1086 } else { 1087 /* Check if we've come back from hung */ 1088 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1089 adapter->active_queues |= ((u64)1 << que->me); 1090 } 1091 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1092 device_printf(dev,"Warning queue %d " 1093 "appears to be hung!\n", i); 1094 que->txr->busy = IXGBE_QUEUE_HUNG; 1095 ++hung; 1096 } 1097 1098 } 1099 1100 /* Only truely watchdog if all queues show hung */ 1101 if (hung == adapter->num_queues) 1102 goto watchdog; 1103 else if (queues != 0) { /* Force an IRQ on queues with work */ 1104 ixv_rearm_queues(adapter, queues); 1105 } 1106 1107 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1108 return; 1109 1110 watchdog: 1111 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1112 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1113 adapter->watchdog_events++; 1114 ixv_init_locked(adapter); 1115 } 1116 1117 /* 1118 ** Note: this routine updates the OS on the link state 1119 ** the real check of the hardware only happens with 1120 ** a link interrupt. 1121 */ 1122 static void 1123 ixv_update_link_status(struct adapter *adapter) 1124 { 1125 struct ifnet *ifp = adapter->ifp; 1126 device_t dev = adapter->dev; 1127 1128 if (adapter->link_up){ 1129 if (adapter->link_active == FALSE) { 1130 if (bootverbose) 1131 device_printf(dev,"Link is up %d Gbps %s \n", 1132 ((adapter->link_speed == 128)? 10:1), 1133 "Full Duplex"); 1134 adapter->link_active = TRUE; 1135 if_link_state_change(ifp, LINK_STATE_UP); 1136 } 1137 } else { /* Link down */ 1138 if (adapter->link_active == TRUE) { 1139 if (bootverbose) 1140 device_printf(dev,"Link is Down\n"); 1141 if_link_state_change(ifp, LINK_STATE_DOWN); 1142 adapter->link_active = FALSE; 1143 } 1144 } 1145 1146 return; 1147 } 1148 1149 1150 /********************************************************************* 1151 * 1152 * This routine disables all traffic on the adapter by issuing a 1153 * global reset on the MAC and deallocates TX/RX buffers. 1154 * 1155 **********************************************************************/ 1156 1157 static void 1158 ixv_stop(void *arg) 1159 { 1160 struct ifnet *ifp; 1161 struct adapter *adapter = arg; 1162 struct ixgbe_hw *hw = &adapter->hw; 1163 ifp = adapter->ifp; 1164 1165 mtx_assert(&adapter->core_mtx, MA_OWNED); 1166 1167 INIT_DEBUGOUT("ixv_stop: begin\n"); 1168 ixv_disable_intr(adapter); 1169 1170 /* Tell the stack that the interface is no longer active */ 1171 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1172 1173 ixgbe_reset_hw(hw); 1174 adapter->hw.adapter_stopped = FALSE; 1175 ixgbe_stop_adapter(hw); 1176 callout_stop(&adapter->timer); 1177 1178 /* reprogram the RAR[0] in case user changed it. */ 1179 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1180 1181 return; 1182 } 1183 1184 1185 /********************************************************************* 1186 * 1187 * Determine hardware revision. 1188 * 1189 **********************************************************************/ 1190 static void 1191 ixv_identify_hardware(struct adapter *adapter) 1192 { 1193 device_t dev = adapter->dev; 1194 struct ixgbe_hw *hw = &adapter->hw; 1195 1196 /* 1197 ** Make sure BUSMASTER is set, on a VM under 1198 ** KVM it may not be and will break things. 1199 */ 1200 pci_enable_busmaster(dev); 1201 1202 /* Save off the information about this board */ 1203 hw->vendor_id = pci_get_vendor(dev); 1204 hw->device_id = pci_get_device(dev); 1205 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 1206 hw->subsystem_vendor_id = 1207 pci_read_config(dev, PCIR_SUBVEND_0, 2); 1208 hw->subsystem_device_id = 1209 pci_read_config(dev, PCIR_SUBDEV_0, 2); 1210 1211 /* We need this to determine device-specific things */ 1212 ixgbe_set_mac_type(hw); 1213 1214 /* Set the right number of segments */ 1215 adapter->num_segs = IXGBE_82599_SCATTER; 1216 1217 return; 1218 } 1219 1220 /********************************************************************* 1221 * 1222 * Setup MSIX Interrupt resources and handlers 1223 * 1224 **********************************************************************/ 1225 static int 1226 ixv_allocate_msix(struct adapter *adapter) 1227 { 1228 device_t dev = adapter->dev; 1229 struct ix_queue *que = adapter->queues; 1230 struct tx_ring *txr = adapter->tx_rings; 1231 int error, rid, vector = 0; 1232 1233 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 1234 rid = vector + 1; 1235 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1236 RF_SHAREABLE | RF_ACTIVE); 1237 if (que->res == NULL) { 1238 device_printf(dev,"Unable to allocate" 1239 " bus resource: que interrupt [%d]\n", vector); 1240 return (ENXIO); 1241 } 1242 /* Set the handler function */ 1243 error = bus_setup_intr(dev, que->res, 1244 INTR_TYPE_NET | INTR_MPSAFE, NULL, 1245 ixv_msix_que, que, &que->tag); 1246 if (error) { 1247 que->res = NULL; 1248 device_printf(dev, "Failed to register QUE handler"); 1249 return (error); 1250 } 1251 #if __FreeBSD_version >= 800504 1252 bus_describe_intr(dev, que->res, que->tag, "que %d", i); 1253 #endif 1254 que->msix = vector; 1255 adapter->active_queues |= (u64)(1 << que->msix); 1256 /* 1257 ** Bind the msix vector, and thus the 1258 ** ring to the corresponding cpu. 1259 */ 1260 if (adapter->num_queues > 1) 1261 bus_bind_intr(dev, que->res, i); 1262 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 1263 TASK_INIT(&que->que_task, 0, ixv_handle_que, que); 1264 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, 1265 taskqueue_thread_enqueue, &que->tq); 1266 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", 1267 device_get_nameunit(adapter->dev)); 1268 } 1269 1270 /* and Mailbox */ 1271 rid = vector + 1; 1272 adapter->res = bus_alloc_resource_any(dev, 1273 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 1274 if (!adapter->res) { 1275 device_printf(dev,"Unable to allocate" 1276 " bus resource: MBX interrupt [%d]\n", rid); 1277 return (ENXIO); 1278 } 1279 /* Set the mbx handler function */ 1280 error = bus_setup_intr(dev, adapter->res, 1281 INTR_TYPE_NET | INTR_MPSAFE, NULL, 1282 ixv_msix_mbx, adapter, &adapter->tag); 1283 if (error) { 1284 adapter->res = NULL; 1285 device_printf(dev, "Failed to register LINK handler"); 1286 return (error); 1287 } 1288 #if __FreeBSD_version >= 800504 1289 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx"); 1290 #endif 1291 adapter->vector = vector; 1292 /* Tasklets for Mailbox */ 1293 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter); 1294 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, 1295 taskqueue_thread_enqueue, &adapter->tq); 1296 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", 1297 device_get_nameunit(adapter->dev)); 1298 /* 1299 ** Due to a broken design QEMU will fail to properly 1300 ** enable the guest for MSIX unless the vectors in 1301 ** the table are all set up, so we must rewrite the 1302 ** ENABLE in the MSIX control register again at this 1303 ** point to cause it to successfully initialize us. 1304 */ 1305 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { 1306 int msix_ctrl; 1307 pci_find_cap(dev, PCIY_MSIX, &rid); 1308 rid += PCIR_MSIX_CTRL; 1309 msix_ctrl = pci_read_config(dev, rid, 2); 1310 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1311 pci_write_config(dev, rid, msix_ctrl, 2); 1312 } 1313 1314 return (0); 1315 } 1316 1317 /* 1318 * Setup MSIX resources, note that the VF 1319 * device MUST use MSIX, there is no fallback. 1320 */ 1321 static int 1322 ixv_setup_msix(struct adapter *adapter) 1323 { 1324 device_t dev = adapter->dev; 1325 int rid, want, msgs; 1326 1327 1328 /* Must have at least 2 MSIX vectors */ 1329 msgs = pci_msix_count(dev); 1330 if (msgs < 2) 1331 goto out; 1332 rid = PCIR_BAR(3); 1333 adapter->msix_mem = bus_alloc_resource_any(dev, 1334 SYS_RES_MEMORY, &rid, RF_ACTIVE); 1335 if (adapter->msix_mem == NULL) { 1336 device_printf(adapter->dev, 1337 "Unable to map MSIX table \n"); 1338 goto out; 1339 } 1340 1341 /* 1342 ** Want vectors for the queues, 1343 ** plus an additional for mailbox. 1344 */ 1345 want = adapter->num_queues + 1; 1346 if (want > msgs) { 1347 want = msgs; 1348 adapter->num_queues = msgs - 1; 1349 } else 1350 msgs = want; 1351 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 1352 device_printf(adapter->dev, 1353 "Using MSIX interrupts with %d vectors\n", want); 1354 return (want); 1355 } 1356 /* Release in case alloc was insufficient */ 1357 pci_release_msi(dev); 1358 out: 1359 if (adapter->msix_mem != NULL) { 1360 bus_release_resource(dev, SYS_RES_MEMORY, 1361 rid, adapter->msix_mem); 1362 adapter->msix_mem = NULL; 1363 } 1364 device_printf(adapter->dev,"MSIX config error\n"); 1365 return (ENXIO); 1366 } 1367 1368 1369 static int 1370 ixv_allocate_pci_resources(struct adapter *adapter) 1371 { 1372 int rid; 1373 device_t dev = adapter->dev; 1374 1375 rid = PCIR_BAR(0); 1376 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1377 &rid, RF_ACTIVE); 1378 1379 if (!(adapter->pci_mem)) { 1380 device_printf(dev,"Unable to allocate bus resource: memory\n"); 1381 return (ENXIO); 1382 } 1383 1384 adapter->osdep.mem_bus_space_tag = 1385 rman_get_bustag(adapter->pci_mem); 1386 adapter->osdep.mem_bus_space_handle = 1387 rman_get_bushandle(adapter->pci_mem); 1388 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; 1389 1390 /* Pick up the tuneable queues */ 1391 adapter->num_queues = ixv_num_queues; 1392 1393 adapter->hw.back = &adapter->osdep; 1394 1395 /* 1396 ** Now setup MSI/X, should 1397 ** return us the number of 1398 ** configured vectors. 1399 */ 1400 adapter->msix = ixv_setup_msix(adapter); 1401 if (adapter->msix == ENXIO) 1402 return (ENXIO); 1403 else 1404 return (0); 1405 } 1406 1407 static void 1408 ixv_free_pci_resources(struct adapter * adapter) 1409 { 1410 struct ix_queue *que = adapter->queues; 1411 device_t dev = adapter->dev; 1412 int rid, memrid; 1413 1414 memrid = PCIR_BAR(MSIX_82598_BAR); 1415 1416 /* 1417 ** There is a slight possibility of a failure mode 1418 ** in attach that will result in entering this function 1419 ** before interrupt resources have been initialized, and 1420 ** in that case we do not want to execute the loops below 1421 ** We can detect this reliably by the state of the adapter 1422 ** res pointer. 1423 */ 1424 if (adapter->res == NULL) 1425 goto mem; 1426 1427 /* 1428 ** Release all msix queue resources: 1429 */ 1430 for (int i = 0; i < adapter->num_queues; i++, que++) { 1431 rid = que->msix + 1; 1432 if (que->tag != NULL) { 1433 bus_teardown_intr(dev, que->res, que->tag); 1434 que->tag = NULL; 1435 } 1436 if (que->res != NULL) 1437 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 1438 } 1439 1440 1441 /* Clean the Legacy or Link interrupt last */ 1442 if (adapter->vector) /* we are doing MSIX */ 1443 rid = adapter->vector + 1; 1444 else 1445 (adapter->msix != 0) ? (rid = 1):(rid = 0); 1446 1447 if (adapter->tag != NULL) { 1448 bus_teardown_intr(dev, adapter->res, adapter->tag); 1449 adapter->tag = NULL; 1450 } 1451 if (adapter->res != NULL) 1452 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 1453 1454 mem: 1455 if (adapter->msix) 1456 pci_release_msi(dev); 1457 1458 if (adapter->msix_mem != NULL) 1459 bus_release_resource(dev, SYS_RES_MEMORY, 1460 memrid, adapter->msix_mem); 1461 1462 if (adapter->pci_mem != NULL) 1463 bus_release_resource(dev, SYS_RES_MEMORY, 1464 PCIR_BAR(0), adapter->pci_mem); 1465 1466 return; 1467 } 1468 1469 /********************************************************************* 1470 * 1471 * Setup networking device structure and register an interface. 1472 * 1473 **********************************************************************/ 1474 static void 1475 ixv_setup_interface(device_t dev, struct adapter *adapter) 1476 { 1477 struct ifnet *ifp; 1478 1479 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1480 1481 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1482 if (ifp == NULL) 1483 panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); 1484 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1485 ifp->if_baudrate = 1000000000; 1486 ifp->if_init = ixv_init; 1487 ifp->if_softc = adapter; 1488 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1489 ifp->if_ioctl = ixv_ioctl; 1490 #if __FreeBSD_version >= 800000 1491 ifp->if_transmit = ixgbe_mq_start; 1492 ifp->if_qflush = ixgbe_qflush; 1493 #else 1494 ifp->if_start = ixgbe_start; 1495 #endif 1496 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; 1497 1498 ether_ifattach(ifp, adapter->hw.mac.addr); 1499 1500 adapter->max_frame_size = 1501 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1502 1503 /* 1504 * Tell the upper layer(s) we support long frames. 1505 */ 1506 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1507 1508 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; 1509 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1510 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING 1511 | IFCAP_VLAN_HWTSO 1512 | IFCAP_VLAN_MTU; 1513 ifp->if_capabilities |= IFCAP_LRO; 1514 ifp->if_capenable = ifp->if_capabilities; 1515 1516 /* 1517 * Specify the media types supported by this adapter and register 1518 * callbacks to update media and link information 1519 */ 1520 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, 1521 ixv_media_status); 1522 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL); 1523 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1524 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1525 1526 return; 1527 } 1528 1529 static void 1530 ixv_config_link(struct adapter *adapter) 1531 { 1532 struct ixgbe_hw *hw = &adapter->hw; 1533 u32 autoneg, err = 0; 1534 1535 if (hw->mac.ops.check_link) 1536 err = hw->mac.ops.check_link(hw, &autoneg, 1537 &adapter->link_up, FALSE); 1538 if (err) 1539 goto out; 1540 1541 if (hw->mac.ops.setup_link) 1542 err = hw->mac.ops.setup_link(hw, 1543 autoneg, adapter->link_up); 1544 out: 1545 return; 1546 } 1547 1548 1549 /********************************************************************* 1550 * 1551 * Enable transmit unit. 1552 * 1553 **********************************************************************/ 1554 static void 1555 ixv_initialize_transmit_units(struct adapter *adapter) 1556 { 1557 struct tx_ring *txr = adapter->tx_rings; 1558 struct ixgbe_hw *hw = &adapter->hw; 1559 1560 1561 for (int i = 0; i < adapter->num_queues; i++, txr++) { 1562 u64 tdba = txr->txdma.dma_paddr; 1563 u32 txctrl, txdctl; 1564 1565 /* Set WTHRESH to 8, burst writeback */ 1566 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1567 txdctl |= (8 << 16); 1568 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1569 1570 /* Set the HW Tx Head and Tail indices */ 1571 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0); 1572 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0); 1573 1574 /* Set Tx Tail register */ 1575 txr->tail = IXGBE_VFTDT(i); 1576 1577 /* Set the processing limit */ 1578 txr->process_limit = ixv_tx_process_limit; 1579 1580 /* Set Ring parameters */ 1581 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), 1582 (tdba & 0x00000000ffffffffULL)); 1583 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32)); 1584 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), 1585 adapter->num_tx_desc * 1586 sizeof(struct ixgbe_legacy_tx_desc)); 1587 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i)); 1588 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1589 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl); 1590 1591 /* Now enable */ 1592 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1593 txdctl |= IXGBE_TXDCTL_ENABLE; 1594 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1595 } 1596 1597 return; 1598 } 1599 1600 1601 /********************************************************************* 1602 * 1603 * Setup receive registers and features. 1604 * 1605 **********************************************************************/ 1606 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1607 1608 static void 1609 ixv_initialize_receive_units(struct adapter *adapter) 1610 { 1611 struct rx_ring *rxr = adapter->rx_rings; 1612 struct ixgbe_hw *hw = &adapter->hw; 1613 struct ifnet *ifp = adapter->ifp; 1614 u32 bufsz, rxcsum, psrtype; 1615 int max_frame; 1616 1617 if (ifp->if_mtu > ETHERMTU) 1618 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1619 else 1620 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1621 1622 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1623 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1624 IXGBE_PSRTYPE_L2HDR; 1625 1626 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1627 1628 /* Tell PF our expected packet-size */ 1629 max_frame = ifp->if_mtu + IXGBE_MTU_HDR; 1630 ixgbevf_rlpml_set_vf(hw, max_frame); 1631 1632 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1633 u64 rdba = rxr->rxdma.dma_paddr; 1634 u32 reg, rxdctl; 1635 1636 /* Disable the queue */ 1637 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1638 rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME); 1639 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1640 for (int j = 0; j < 10; j++) { 1641 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1642 IXGBE_RXDCTL_ENABLE) 1643 msec_delay(1); 1644 else 1645 break; 1646 } 1647 wmb(); 1648 /* Setup the Base and Length of the Rx Descriptor Ring */ 1649 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), 1650 (rdba & 0x00000000ffffffffULL)); 1651 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), 1652 (rdba >> 32)); 1653 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), 1654 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1655 1656 /* Reset the ring indices */ 1657 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1658 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1659 1660 /* Set up the SRRCTL register */ 1661 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); 1662 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1663 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1664 reg |= bufsz; 1665 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1666 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg); 1667 1668 /* Set the Tail Pointer */ 1669 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1670 adapter->num_rx_desc - 1); 1671 1672 /* Set the processing limit */ 1673 rxr->process_limit = ixv_rx_process_limit; 1674 1675 /* Capture Rx Tail index */ 1676 rxr->tail = IXGBE_VFRDT(rxr->me); 1677 1678 /* Do the queue enabling last */ 1679 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1680 rxdctl |= IXGBE_RXDCTL_ENABLE; 1681 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1682 for (int k = 0; k < 10; k++) { 1683 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1684 IXGBE_RXDCTL_ENABLE) 1685 break; 1686 else 1687 msec_delay(1); 1688 } 1689 wmb(); 1690 } 1691 1692 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1693 1694 if (ifp->if_capenable & IFCAP_RXCSUM) 1695 rxcsum |= IXGBE_RXCSUM_PCSD; 1696 1697 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 1698 rxcsum |= IXGBE_RXCSUM_IPPCSE; 1699 1700 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1701 1702 return; 1703 } 1704 1705 static void 1706 ixv_setup_vlan_support(struct adapter *adapter) 1707 { 1708 struct ixgbe_hw *hw = &adapter->hw; 1709 u32 ctrl, vid, vfta, retry; 1710 1711 1712 /* 1713 ** We get here thru init_locked, meaning 1714 ** a soft reset, this has already cleared 1715 ** the VFTA and other state, so if there 1716 ** have been no vlan's registered do nothing. 1717 */ 1718 if (adapter->num_vlans == 0) 1719 return; 1720 1721 /* Enable the queues */ 1722 for (int i = 0; i < adapter->num_queues; i++) { 1723 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1724 ctrl |= IXGBE_RXDCTL_VME; 1725 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); 1726 } 1727 1728 /* 1729 ** A soft reset zero's out the VFTA, so 1730 ** we need to repopulate it now. 1731 */ 1732 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 1733 if (ixv_shadow_vfta[i] == 0) 1734 continue; 1735 vfta = ixv_shadow_vfta[i]; 1736 /* 1737 ** Reconstruct the vlan id's 1738 ** based on the bits set in each 1739 ** of the array ints. 1740 */ 1741 for ( int j = 0; j < 32; j++) { 1742 retry = 0; 1743 if ((vfta & (1 << j)) == 0) 1744 continue; 1745 vid = (i * 32) + j; 1746 /* Call the shared code mailbox routine */ 1747 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) { 1748 if (++retry > 5) 1749 break; 1750 } 1751 } 1752 } 1753 } 1754 1755 /* 1756 ** This routine is run via an vlan config EVENT, 1757 ** it enables us to use the HW Filter table since 1758 ** we can get the vlan id. This just creates the 1759 ** entry in the soft version of the VFTA, init will 1760 ** repopulate the real table. 1761 */ 1762 static void 1763 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1764 { 1765 struct adapter *adapter = ifp->if_softc; 1766 u16 index, bit; 1767 1768 if (ifp->if_softc != arg) /* Not our event */ 1769 return; 1770 1771 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1772 return; 1773 1774 IXGBE_CORE_LOCK(adapter); 1775 index = (vtag >> 5) & 0x7F; 1776 bit = vtag & 0x1F; 1777 ixv_shadow_vfta[index] |= (1 << bit); 1778 ++adapter->num_vlans; 1779 /* Re-init to load the changes */ 1780 ixv_init_locked(adapter); 1781 IXGBE_CORE_UNLOCK(adapter); 1782 } 1783 1784 /* 1785 ** This routine is run via an vlan 1786 ** unconfig EVENT, remove our entry 1787 ** in the soft vfta. 1788 */ 1789 static void 1790 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1791 { 1792 struct adapter *adapter = ifp->if_softc; 1793 u16 index, bit; 1794 1795 if (ifp->if_softc != arg) 1796 return; 1797 1798 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1799 return; 1800 1801 IXGBE_CORE_LOCK(adapter); 1802 index = (vtag >> 5) & 0x7F; 1803 bit = vtag & 0x1F; 1804 ixv_shadow_vfta[index] &= ~(1 << bit); 1805 --adapter->num_vlans; 1806 /* Re-init to load the changes */ 1807 ixv_init_locked(adapter); 1808 IXGBE_CORE_UNLOCK(adapter); 1809 } 1810 1811 static void 1812 ixv_enable_intr(struct adapter *adapter) 1813 { 1814 struct ixgbe_hw *hw = &adapter->hw; 1815 struct ix_queue *que = adapter->queues; 1816 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1817 1818 1819 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1820 1821 mask = IXGBE_EIMS_ENABLE_MASK; 1822 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1823 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 1824 1825 for (int i = 0; i < adapter->num_queues; i++, que++) 1826 ixv_enable_queue(adapter, que->msix); 1827 1828 IXGBE_WRITE_FLUSH(hw); 1829 1830 return; 1831 } 1832 1833 static void 1834 ixv_disable_intr(struct adapter *adapter) 1835 { 1836 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 1837 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); 1838 IXGBE_WRITE_FLUSH(&adapter->hw); 1839 return; 1840 } 1841 1842 /* 1843 ** Setup the correct IVAR register for a particular MSIX interrupt 1844 ** - entry is the register array entry 1845 ** - vector is the MSIX vector for this queue 1846 ** - type is RX/TX/MISC 1847 */ 1848 static void 1849 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 1850 { 1851 struct ixgbe_hw *hw = &adapter->hw; 1852 u32 ivar, index; 1853 1854 vector |= IXGBE_IVAR_ALLOC_VAL; 1855 1856 if (type == -1) { /* MISC IVAR */ 1857 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 1858 ivar &= ~0xFF; 1859 ivar |= vector; 1860 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 1861 } else { /* RX/TX IVARS */ 1862 index = (16 * (entry & 1)) + (8 * type); 1863 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 1864 ivar &= ~(0xFF << index); 1865 ivar |= (vector << index); 1866 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 1867 } 1868 } 1869 1870 static void 1871 ixv_configure_ivars(struct adapter *adapter) 1872 { 1873 struct ix_queue *que = adapter->queues; 1874 1875 for (int i = 0; i < adapter->num_queues; i++, que++) { 1876 /* First the RX queue entry */ 1877 ixv_set_ivar(adapter, i, que->msix, 0); 1878 /* ... and the TX */ 1879 ixv_set_ivar(adapter, i, que->msix, 1); 1880 /* Set an initial value in EITR */ 1881 IXGBE_WRITE_REG(&adapter->hw, 1882 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT); 1883 } 1884 1885 /* For the mailbox interrupt */ 1886 ixv_set_ivar(adapter, 1, adapter->vector, -1); 1887 } 1888 1889 1890 /* 1891 ** Tasklet handler for MSIX MBX interrupts 1892 ** - do outside interrupt since it might sleep 1893 */ 1894 static void 1895 ixv_handle_mbx(void *context, int pending) 1896 { 1897 struct adapter *adapter = context; 1898 1899 ixgbe_check_link(&adapter->hw, 1900 &adapter->link_speed, &adapter->link_up, 0); 1901 ixv_update_link_status(adapter); 1902 } 1903 1904 /* 1905 ** The VF stats registers never have a truely virgin 1906 ** starting point, so this routine tries to make an 1907 ** artificial one, marking ground zero on attach as 1908 ** it were. 1909 */ 1910 static void 1911 ixv_save_stats(struct adapter *adapter) 1912 { 1913 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { 1914 adapter->stats.vf.saved_reset_vfgprc += 1915 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; 1916 adapter->stats.vf.saved_reset_vfgptc += 1917 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; 1918 adapter->stats.vf.saved_reset_vfgorc += 1919 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; 1920 adapter->stats.vf.saved_reset_vfgotc += 1921 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; 1922 adapter->stats.vf.saved_reset_vfmprc += 1923 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; 1924 } 1925 } 1926 1927 static void 1928 ixv_init_stats(struct adapter *adapter) 1929 { 1930 struct ixgbe_hw *hw = &adapter->hw; 1931 1932 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1933 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1934 adapter->stats.vf.last_vfgorc |= 1935 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1936 1937 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1938 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1939 adapter->stats.vf.last_vfgotc |= 1940 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1941 1942 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1943 1944 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 1945 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 1946 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 1947 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 1948 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 1949 } 1950 1951 #define UPDATE_STAT_32(reg, last, count) \ 1952 { \ 1953 u32 current = IXGBE_READ_REG(hw, reg); \ 1954 if (current < last) \ 1955 count += 0x100000000LL; \ 1956 last = current; \ 1957 count &= 0xFFFFFFFF00000000LL; \ 1958 count |= current; \ 1959 } 1960 1961 #define UPDATE_STAT_36(lsb, msb, last, count) \ 1962 { \ 1963 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ 1964 u64 cur_msb = IXGBE_READ_REG(hw, msb); \ 1965 u64 current = ((cur_msb << 32) | cur_lsb); \ 1966 if (current < last) \ 1967 count += 0x1000000000LL; \ 1968 last = current; \ 1969 count &= 0xFFFFFFF000000000LL; \ 1970 count |= current; \ 1971 } 1972 1973 /* 1974 ** ixv_update_stats - Update the board statistics counters. 1975 */ 1976 void 1977 ixv_update_stats(struct adapter *adapter) 1978 { 1979 struct ixgbe_hw *hw = &adapter->hw; 1980 1981 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, 1982 adapter->stats.vf.vfgprc); 1983 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, 1984 adapter->stats.vf.vfgptc); 1985 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 1986 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); 1987 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 1988 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); 1989 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, 1990 adapter->stats.vf.vfmprc); 1991 } 1992 1993 /* 1994 * Add statistic sysctls for the VF. 1995 */ 1996 static void 1997 ixv_add_stats_sysctls(struct adapter *adapter) 1998 { 1999 device_t dev = adapter->dev; 2000 struct ix_queue *que = &adapter->queues[0]; 2001 struct tx_ring *txr = que->txr; 2002 struct rx_ring *rxr = que->rxr; 2003 2004 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2005 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2006 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2007 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2008 2009 struct sysctl_oid *stat_node, *queue_node; 2010 struct sysctl_oid_list *stat_list, *queue_list; 2011 2012 /* Driver Statistics */ 2013 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 2014 CTLFLAG_RD, &adapter->dropped_pkts, 2015 "Driver dropped packets"); 2016 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 2017 CTLFLAG_RD, &adapter->mbuf_defrag_failed, 2018 "m_defrag() failed"); 2019 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 2020 CTLFLAG_RD, &adapter->watchdog_events, 2021 "Watchdog timeouts"); 2022 2023 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 2024 CTLFLAG_RD, NULL, 2025 "VF Statistics (read from HW registers)"); 2026 stat_list = SYSCTL_CHILDREN(stat_node); 2027 2028 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 2029 CTLFLAG_RD, &stats->vfgprc, 2030 "Good Packets Received"); 2031 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 2032 CTLFLAG_RD, &stats->vfgorc, 2033 "Good Octets Received"); 2034 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 2035 CTLFLAG_RD, &stats->vfmprc, 2036 "Multicast Packets Received"); 2037 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 2038 CTLFLAG_RD, &stats->vfgptc, 2039 "Good Packets Transmitted"); 2040 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 2041 CTLFLAG_RD, &stats->vfgotc, 2042 "Good Octets Transmitted"); 2043 2044 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que", 2045 CTLFLAG_RD, NULL, 2046 "Queue Statistics (collected by SW)"); 2047 queue_list = SYSCTL_CHILDREN(queue_node); 2048 2049 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 2050 CTLFLAG_RD, &(que->irqs), 2051 "IRQs on queue"); 2052 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs", 2053 CTLFLAG_RD, &(rxr->rx_irq), 2054 "RX irqs on queue"); 2055 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 2056 CTLFLAG_RD, &(rxr->rx_packets), 2057 "RX packets"); 2058 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 2059 CTLFLAG_RD, &(rxr->rx_bytes), 2060 "RX bytes"); 2061 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 2062 CTLFLAG_RD, &(rxr->rx_discarded), 2063 "Discarded RX packets"); 2064 2065 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 2066 CTLFLAG_RD, &(txr->total_packets), 2067 "TX Packets"); 2068 2069 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc", 2070 CTLFLAG_RD, &(txr->no_desc_avail), 2071 "# of times not enough descriptors were available during TX"); 2072 } 2073 2074 /********************************************************************** 2075 * 2076 * This routine is called only when em_display_debug_stats is enabled. 2077 * This routine provides a way to take a look at important statistics 2078 * maintained by the driver and hardware. 2079 * 2080 **********************************************************************/ 2081 static void 2082 ixv_print_debug_info(struct adapter *adapter) 2083 { 2084 device_t dev = adapter->dev; 2085 struct ixgbe_hw *hw = &adapter->hw; 2086 struct ix_queue *que = adapter->queues; 2087 struct rx_ring *rxr; 2088 struct tx_ring *txr; 2089 struct lro_ctrl *lro; 2090 2091 device_printf(dev,"Error Byte Count = %u \n", 2092 IXGBE_READ_REG(hw, IXGBE_ERRBC)); 2093 2094 for (int i = 0; i < adapter->num_queues; i++, que++) { 2095 txr = que->txr; 2096 rxr = que->rxr; 2097 lro = &rxr->lro; 2098 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n", 2099 que->msix, (long)que->irqs); 2100 device_printf(dev,"RX(%d) Packets Received: %lld\n", 2101 rxr->me, (long long)rxr->rx_packets); 2102 device_printf(dev,"RX(%d) Bytes Received: %lu\n", 2103 rxr->me, (long)rxr->rx_bytes); 2104 device_printf(dev,"RX(%d) LRO Queued= %d\n", 2105 rxr->me, lro->lro_queued); 2106 device_printf(dev,"RX(%d) LRO Flushed= %d\n", 2107 rxr->me, lro->lro_flushed); 2108 device_printf(dev,"TX(%d) Packets Sent: %lu\n", 2109 txr->me, (long)txr->total_packets); 2110 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", 2111 txr->me, (long)txr->no_desc_avail); 2112 } 2113 2114 device_printf(dev,"MBX IRQ Handled: %lu\n", 2115 (long)adapter->link_irq); 2116 return; 2117 } 2118 2119 static int 2120 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) 2121 { 2122 int error, result; 2123 struct adapter *adapter; 2124 2125 result = -1; 2126 error = sysctl_handle_int(oidp, &result, 0, req); 2127 2128 if (error || !req->newptr) 2129 return (error); 2130 2131 if (result == 1) { 2132 adapter = (struct adapter *) arg1; 2133 ixv_print_debug_info(adapter); 2134 } 2135 return error; 2136 } 2137 2138