1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #endif 40 41 #include "ixgbe.h" 42 43 /********************************************************************* 44 * Driver version 45 *********************************************************************/ 46 char ixv_driver_version[] = "1.4.0"; 47 48 /********************************************************************* 49 * PCI Device ID Table 50 * 51 * Used by probe to select devices to load on 52 * Last field stores an index into ixv_strings 53 * Last entry must be all 0s 54 * 55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 56 *********************************************************************/ 57 58 static ixgbe_vendor_info_t ixv_vendor_info_array[] = 59 { 60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 64 /* required last entry */ 65 {0, 0, 0, 0, 0} 66 }; 67 68 /********************************************************************* 69 * Table of branding strings 70 *********************************************************************/ 71 72 static char *ixv_strings[] = { 73 "Intel(R) PRO/10GbE Virtual Function Network Driver" 74 }; 75 76 /********************************************************************* 77 * Function prototypes 78 *********************************************************************/ 79 static int ixv_probe(device_t); 80 static int ixv_attach(device_t); 81 static int ixv_detach(device_t); 82 static int ixv_shutdown(device_t); 83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t); 84 static void ixv_init(void *); 85 static void ixv_init_locked(struct adapter *); 86 static void ixv_stop(void *); 87 static void ixv_media_status(struct ifnet *, struct ifmediareq *); 88 static int ixv_media_change(struct ifnet *); 89 static void ixv_identify_hardware(struct adapter *); 90 static int ixv_allocate_pci_resources(struct adapter *); 91 static int ixv_allocate_msix(struct adapter *); 92 static int ixv_setup_msix(struct adapter *); 93 static void ixv_free_pci_resources(struct adapter *); 94 static void ixv_local_timer(void *); 95 static void ixv_setup_interface(device_t, struct adapter *); 96 static void ixv_config_link(struct adapter *); 97 98 static void ixv_initialize_transmit_units(struct adapter *); 99 static void ixv_initialize_receive_units(struct adapter *); 100 101 static void ixv_enable_intr(struct adapter *); 102 static void ixv_disable_intr(struct adapter *); 103 static void ixv_set_multi(struct adapter *); 104 static void ixv_update_link_status(struct adapter *); 105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); 106 static void ixv_set_ivar(struct adapter *, u8, u8, s8); 107 static void ixv_configure_ivars(struct adapter *); 108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 109 110 static void ixv_setup_vlan_support(struct adapter *); 111 static void ixv_register_vlan(void *, struct ifnet *, u16); 112 static void ixv_unregister_vlan(void *, struct ifnet *, u16); 113 114 static void ixv_save_stats(struct adapter *); 115 static void ixv_init_stats(struct adapter *); 116 static void ixv_update_stats(struct adapter *); 117 static void ixv_add_stats_sysctls(struct adapter *); 118 119 /* The MSI/X Interrupt handlers */ 120 static void ixv_msix_que(void *); 121 static void ixv_msix_mbx(void *); 122 123 /* Deferred interrupt tasklets */ 124 static void ixv_handle_que(void *, int); 125 static void ixv_handle_mbx(void *, int); 126 127 #ifdef DEV_NETMAP 128 /* 129 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by 130 * if_ix.c. 131 */ 132 extern void ixgbe_netmap_attach(struct adapter *adapter); 133 134 #include <net/netmap.h> 135 #include <sys/selinfo.h> 136 #include <dev/netmap/netmap_kern.h> 137 #endif /* DEV_NETMAP */ 138 139 /********************************************************************* 140 * FreeBSD Device Interface Entry Points 141 *********************************************************************/ 142 143 static device_method_t ixv_methods[] = { 144 /* Device interface */ 145 DEVMETHOD(device_probe, ixv_probe), 146 DEVMETHOD(device_attach, ixv_attach), 147 DEVMETHOD(device_detach, ixv_detach), 148 DEVMETHOD(device_shutdown, ixv_shutdown), 149 DEVMETHOD_END 150 }; 151 152 static driver_t ixv_driver = { 153 "ixv", ixv_methods, sizeof(struct adapter), 154 }; 155 156 devclass_t ixv_devclass; 157 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 158 MODULE_DEPEND(ixv, pci, 1, 1, 1); 159 MODULE_DEPEND(ixv, ether, 1, 1, 1); 160 #ifdef DEV_NETMAP 161 MODULE_DEPEND(ix, netmap, 1, 1, 1); 162 #endif /* DEV_NETMAP */ 163 /* XXX depend on 'ix' ? */ 164 165 /* 166 ** TUNEABLE PARAMETERS: 167 */ 168 169 /* Number of Queues - do not exceed MSIX vectors - 1 */ 170 static int ixv_num_queues = 1; 171 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 172 173 /* 174 ** AIM: Adaptive Interrupt Moderation 175 ** which means that the interrupt rate 176 ** is varied over time based on the 177 ** traffic for that interrupt vector 178 */ 179 static int ixv_enable_aim = FALSE; 180 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 181 182 /* How many packets rxeof tries to clean at a time */ 183 static int ixv_rx_process_limit = 256; 184 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 185 186 /* How many packets txeof tries to clean at a time */ 187 static int ixv_tx_process_limit = 256; 188 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 189 190 /* Flow control setting, default to full */ 191 static int ixv_flow_control = ixgbe_fc_full; 192 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); 193 194 /* 195 * Header split: this causes the hardware to DMA 196 * the header into a seperate mbuf from the payload, 197 * it can be a performance win in some workloads, but 198 * in others it actually hurts, its off by default. 199 */ 200 static int ixv_header_split = FALSE; 201 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); 202 203 /* 204 ** Number of TX descriptors per ring, 205 ** setting higher than RX as this seems 206 ** the better performing choice. 207 */ 208 static int ixv_txd = DEFAULT_TXD; 209 TUNABLE_INT("hw.ixv.txd", &ixv_txd); 210 211 /* Number of RX descriptors per ring */ 212 static int ixv_rxd = DEFAULT_RXD; 213 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 214 215 /* 216 ** Shadow VFTA table, this is needed because 217 ** the real filter table gets cleared during 218 ** a soft reset and we need to repopulate it. 219 */ 220 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; 221 222 /********************************************************************* 223 * Device identification routine 224 * 225 * ixv_probe determines if the driver should be loaded on 226 * adapter based on PCI vendor/device id of the adapter. 227 * 228 * return BUS_PROBE_DEFAULT on success, positive on failure 229 *********************************************************************/ 230 231 static int 232 ixv_probe(device_t dev) 233 { 234 ixgbe_vendor_info_t *ent; 235 236 u16 pci_vendor_id = 0; 237 u16 pci_device_id = 0; 238 u16 pci_subvendor_id = 0; 239 u16 pci_subdevice_id = 0; 240 char adapter_name[256]; 241 242 243 pci_vendor_id = pci_get_vendor(dev); 244 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 245 return (ENXIO); 246 247 pci_device_id = pci_get_device(dev); 248 pci_subvendor_id = pci_get_subvendor(dev); 249 pci_subdevice_id = pci_get_subdevice(dev); 250 251 ent = ixv_vendor_info_array; 252 while (ent->vendor_id != 0) { 253 if ((pci_vendor_id == ent->vendor_id) && 254 (pci_device_id == ent->device_id) && 255 256 ((pci_subvendor_id == ent->subvendor_id) || 257 (ent->subvendor_id == 0)) && 258 259 ((pci_subdevice_id == ent->subdevice_id) || 260 (ent->subdevice_id == 0))) { 261 sprintf(adapter_name, "%s, Version - %s", 262 ixv_strings[ent->index], 263 ixv_driver_version); 264 device_set_desc_copy(dev, adapter_name); 265 return (BUS_PROBE_DEFAULT); 266 } 267 ent++; 268 } 269 return (ENXIO); 270 } 271 272 /********************************************************************* 273 * Device initialization routine 274 * 275 * The attach entry point is called when the driver is being loaded. 276 * This routine identifies the type of hardware, allocates all resources 277 * and initializes the hardware. 278 * 279 * return 0 on success, positive on failure 280 *********************************************************************/ 281 282 static int 283 ixv_attach(device_t dev) 284 { 285 struct adapter *adapter; 286 struct ixgbe_hw *hw; 287 int error = 0; 288 289 INIT_DEBUGOUT("ixv_attach: begin"); 290 291 /* Allocate, clear, and link in our adapter structure */ 292 adapter = device_get_softc(dev); 293 adapter->dev = adapter->osdep.dev = dev; 294 hw = &adapter->hw; 295 296 #ifdef DEV_NETMAP 297 adapter->init_locked = ixv_init_locked; 298 adapter->stop_locked = ixv_stop; 299 #endif 300 301 /* Core Lock Init*/ 302 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 303 304 /* SYSCTL APIs */ 305 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 306 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 307 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, 308 adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); 309 310 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 311 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 312 OID_AUTO, "enable_aim", CTLFLAG_RW, 313 &ixv_enable_aim, 1, "Interrupt Moderation"); 314 315 /* Set up the timer callout */ 316 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 317 318 /* Determine hardware revision */ 319 ixv_identify_hardware(adapter); 320 321 /* Do base PCI setup - map BAR0 */ 322 if (ixv_allocate_pci_resources(adapter)) { 323 device_printf(dev, "Allocation of PCI resources failed\n"); 324 error = ENXIO; 325 goto err_out; 326 } 327 328 /* Do descriptor calc and sanity checks */ 329 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 330 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 331 device_printf(dev, "TXD config issue, using default!\n"); 332 adapter->num_tx_desc = DEFAULT_TXD; 333 } else 334 adapter->num_tx_desc = ixv_txd; 335 336 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 337 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 338 device_printf(dev, "RXD config issue, using default!\n"); 339 adapter->num_rx_desc = DEFAULT_RXD; 340 } else 341 adapter->num_rx_desc = ixv_rxd; 342 343 /* Allocate our TX/RX Queues */ 344 if (ixgbe_allocate_queues(adapter)) { 345 error = ENOMEM; 346 goto err_out; 347 } 348 349 /* 350 ** Initialize the shared code: its 351 ** at this point the mac type is set. 352 */ 353 error = ixgbe_init_shared_code(hw); 354 if (error) { 355 device_printf(dev,"Shared Code Initialization Failure\n"); 356 error = EIO; 357 goto err_late; 358 } 359 360 /* Setup the mailbox */ 361 ixgbe_init_mbx_params_vf(hw); 362 363 ixgbe_reset_hw(hw); 364 365 /* Get the Mailbox API version */ 366 device_printf(dev,"MBX API %d negotiation: %d\n", 367 ixgbe_mbox_api_11, 368 ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11)); 369 370 error = ixgbe_init_hw(hw); 371 if (error) { 372 device_printf(dev,"Hardware Initialization Failure\n"); 373 error = EIO; 374 goto err_late; 375 } 376 377 error = ixv_allocate_msix(adapter); 378 if (error) 379 goto err_late; 380 381 /* If no mac address was assigned, make a random one */ 382 if (!ixv_check_ether_addr(hw->mac.addr)) { 383 u8 addr[ETHER_ADDR_LEN]; 384 arc4rand(&addr, sizeof(addr), 0); 385 addr[0] &= 0xFE; 386 addr[0] |= 0x02; 387 bcopy(addr, hw->mac.addr, sizeof(addr)); 388 } 389 390 /* Setup OS specific network interface */ 391 ixv_setup_interface(dev, adapter); 392 393 /* Do the stats setup */ 394 ixv_save_stats(adapter); 395 ixv_init_stats(adapter); 396 ixv_add_stats_sysctls(adapter); 397 398 /* Register for VLAN events */ 399 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 400 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 401 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 402 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 403 404 #ifdef DEV_NETMAP 405 ixgbe_netmap_attach(adapter); 406 #endif /* DEV_NETMAP */ 407 INIT_DEBUGOUT("ixv_attach: end"); 408 return (0); 409 410 err_late: 411 ixgbe_free_transmit_structures(adapter); 412 ixgbe_free_receive_structures(adapter); 413 err_out: 414 ixv_free_pci_resources(adapter); 415 return (error); 416 417 } 418 419 /********************************************************************* 420 * Device removal routine 421 * 422 * The detach entry point is called when the driver is being removed. 423 * This routine stops the adapter and deallocates all the resources 424 * that were allocated for driver operation. 425 * 426 * return 0 on success, positive on failure 427 *********************************************************************/ 428 429 static int 430 ixv_detach(device_t dev) 431 { 432 struct adapter *adapter = device_get_softc(dev); 433 struct ix_queue *que = adapter->queues; 434 435 INIT_DEBUGOUT("ixv_detach: begin"); 436 437 /* Make sure VLANS are not using driver */ 438 if (adapter->ifp->if_vlantrunk != NULL) { 439 device_printf(dev,"Vlan in use, detach first\n"); 440 return (EBUSY); 441 } 442 443 IXGBE_CORE_LOCK(adapter); 444 ixv_stop(adapter); 445 IXGBE_CORE_UNLOCK(adapter); 446 447 for (int i = 0; i < adapter->num_queues; i++, que++) { 448 if (que->tq) { 449 struct tx_ring *txr = que->txr; 450 taskqueue_drain(que->tq, &txr->txq_task); 451 taskqueue_drain(que->tq, &que->que_task); 452 taskqueue_free(que->tq); 453 } 454 } 455 456 /* Drain the Mailbox(link) queue */ 457 if (adapter->tq) { 458 taskqueue_drain(adapter->tq, &adapter->link_task); 459 taskqueue_free(adapter->tq); 460 } 461 462 /* Unregister VLAN events */ 463 if (adapter->vlan_attach != NULL) 464 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 465 if (adapter->vlan_detach != NULL) 466 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 467 468 ether_ifdetach(adapter->ifp); 469 callout_drain(&adapter->timer); 470 #ifdef DEV_NETMAP 471 netmap_detach(adapter->ifp); 472 #endif /* DEV_NETMAP */ 473 ixv_free_pci_resources(adapter); 474 bus_generic_detach(dev); 475 if_free(adapter->ifp); 476 477 ixgbe_free_transmit_structures(adapter); 478 ixgbe_free_receive_structures(adapter); 479 480 IXGBE_CORE_LOCK_DESTROY(adapter); 481 return (0); 482 } 483 484 /********************************************************************* 485 * 486 * Shutdown entry point 487 * 488 **********************************************************************/ 489 static int 490 ixv_shutdown(device_t dev) 491 { 492 struct adapter *adapter = device_get_softc(dev); 493 IXGBE_CORE_LOCK(adapter); 494 ixv_stop(adapter); 495 IXGBE_CORE_UNLOCK(adapter); 496 return (0); 497 } 498 499 500 /********************************************************************* 501 * Ioctl entry point 502 * 503 * ixv_ioctl is called when the user wants to configure the 504 * interface. 505 * 506 * return 0 on success, positive on failure 507 **********************************************************************/ 508 509 static int 510 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 511 { 512 struct adapter *adapter = ifp->if_softc; 513 struct ifreq *ifr = (struct ifreq *) data; 514 #if defined(INET) || defined(INET6) 515 struct ifaddr *ifa = (struct ifaddr *) data; 516 bool avoid_reset = FALSE; 517 #endif 518 int error = 0; 519 520 switch (command) { 521 522 case SIOCSIFADDR: 523 #ifdef INET 524 if (ifa->ifa_addr->sa_family == AF_INET) 525 avoid_reset = TRUE; 526 #endif 527 #ifdef INET6 528 if (ifa->ifa_addr->sa_family == AF_INET6) 529 avoid_reset = TRUE; 530 #endif 531 #if defined(INET) || defined(INET6) 532 /* 533 ** Calling init results in link renegotiation, 534 ** so we avoid doing it when possible. 535 */ 536 if (avoid_reset) { 537 ifp->if_flags |= IFF_UP; 538 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 539 ixv_init(adapter); 540 if (!(ifp->if_flags & IFF_NOARP)) 541 arp_ifinit(ifp, ifa); 542 } else 543 error = ether_ioctl(ifp, command, data); 544 break; 545 #endif 546 case SIOCSIFMTU: 547 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 548 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) { 549 error = EINVAL; 550 } else { 551 IXGBE_CORE_LOCK(adapter); 552 ifp->if_mtu = ifr->ifr_mtu; 553 adapter->max_frame_size = 554 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 555 ixv_init_locked(adapter); 556 IXGBE_CORE_UNLOCK(adapter); 557 } 558 break; 559 case SIOCSIFFLAGS: 560 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 561 IXGBE_CORE_LOCK(adapter); 562 if (ifp->if_flags & IFF_UP) { 563 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 564 ixv_init_locked(adapter); 565 } else 566 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 567 ixv_stop(adapter); 568 adapter->if_flags = ifp->if_flags; 569 IXGBE_CORE_UNLOCK(adapter); 570 break; 571 case SIOCADDMULTI: 572 case SIOCDELMULTI: 573 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 574 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 575 IXGBE_CORE_LOCK(adapter); 576 ixv_disable_intr(adapter); 577 ixv_set_multi(adapter); 578 ixv_enable_intr(adapter); 579 IXGBE_CORE_UNLOCK(adapter); 580 } 581 break; 582 case SIOCSIFMEDIA: 583 case SIOCGIFMEDIA: 584 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 585 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 586 break; 587 case SIOCSIFCAP: 588 { 589 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 590 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 591 if (mask & IFCAP_HWCSUM) 592 ifp->if_capenable ^= IFCAP_HWCSUM; 593 if (mask & IFCAP_TSO4) 594 ifp->if_capenable ^= IFCAP_TSO4; 595 if (mask & IFCAP_LRO) 596 ifp->if_capenable ^= IFCAP_LRO; 597 if (mask & IFCAP_VLAN_HWTAGGING) 598 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 599 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 600 IXGBE_CORE_LOCK(adapter); 601 ixv_init_locked(adapter); 602 IXGBE_CORE_UNLOCK(adapter); 603 } 604 VLAN_CAPABILITIES(ifp); 605 break; 606 } 607 608 default: 609 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 610 error = ether_ioctl(ifp, command, data); 611 break; 612 } 613 614 return (error); 615 } 616 617 /********************************************************************* 618 * Init entry point 619 * 620 * This routine is used in two ways. It is used by the stack as 621 * init entry point in network interface structure. It is also used 622 * by the driver as a hw/sw initialization routine to get to a 623 * consistent state. 624 * 625 * return 0 on success, positive on failure 626 **********************************************************************/ 627 #define IXGBE_MHADD_MFS_SHIFT 16 628 629 static void 630 ixv_init_locked(struct adapter *adapter) 631 { 632 struct ifnet *ifp = adapter->ifp; 633 device_t dev = adapter->dev; 634 struct ixgbe_hw *hw = &adapter->hw; 635 u32 mhadd, gpie; 636 637 INIT_DEBUGOUT("ixv_init: begin"); 638 mtx_assert(&adapter->core_mtx, MA_OWNED); 639 hw->adapter_stopped = FALSE; 640 ixgbe_stop_adapter(hw); 641 callout_stop(&adapter->timer); 642 643 /* reprogram the RAR[0] in case user changed it. */ 644 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 645 646 /* Get the latest mac address, User can use a LAA */ 647 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, 648 IXGBE_ETH_LENGTH_OF_ADDRESS); 649 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); 650 hw->addr_ctrl.rar_used_count = 1; 651 652 /* Prepare transmit descriptors and buffers */ 653 if (ixgbe_setup_transmit_structures(adapter)) { 654 device_printf(dev,"Could not setup transmit structures\n"); 655 ixv_stop(adapter); 656 return; 657 } 658 659 ixgbe_reset_hw(hw); 660 ixv_initialize_transmit_units(adapter); 661 662 /* Setup Multicast table */ 663 ixv_set_multi(adapter); 664 665 /* 666 ** Determine the correct mbuf pool 667 ** for doing jumbo/headersplit 668 */ 669 if (ifp->if_mtu > ETHERMTU) 670 adapter->rx_mbuf_sz = MJUMPAGESIZE; 671 else 672 adapter->rx_mbuf_sz = MCLBYTES; 673 674 /* Prepare receive descriptors and buffers */ 675 if (ixgbe_setup_receive_structures(adapter)) { 676 device_printf(dev,"Could not setup receive structures\n"); 677 ixv_stop(adapter); 678 return; 679 } 680 681 /* Configure RX settings */ 682 ixv_initialize_receive_units(adapter); 683 684 /* Enable Enhanced MSIX mode */ 685 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); 686 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME; 687 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; 688 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 689 690 /* Set the various hardware offload abilities */ 691 ifp->if_hwassist = 0; 692 if (ifp->if_capenable & IFCAP_TSO4) 693 ifp->if_hwassist |= CSUM_TSO; 694 if (ifp->if_capenable & IFCAP_TXCSUM) { 695 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 696 #if __FreeBSD_version >= 800000 697 ifp->if_hwassist |= CSUM_SCTP; 698 #endif 699 } 700 701 /* Set MTU size */ 702 if (ifp->if_mtu > ETHERMTU) { 703 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 704 mhadd &= ~IXGBE_MHADD_MFS_MASK; 705 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 706 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 707 } 708 709 /* Set up VLAN offload and filter */ 710 ixv_setup_vlan_support(adapter); 711 712 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 713 714 /* Set up MSI/X routing */ 715 ixv_configure_ivars(adapter); 716 717 /* Set up auto-mask */ 718 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); 719 720 /* Set moderation on the Link interrupt */ 721 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); 722 723 /* Stats init */ 724 ixv_init_stats(adapter); 725 726 /* Config/Enable Link */ 727 ixv_config_link(adapter); 728 729 /* And now turn on interrupts */ 730 ixv_enable_intr(adapter); 731 732 /* Now inform the stack we're ready */ 733 ifp->if_drv_flags |= IFF_DRV_RUNNING; 734 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 735 736 return; 737 } 738 739 static void 740 ixv_init(void *arg) 741 { 742 struct adapter *adapter = arg; 743 744 IXGBE_CORE_LOCK(adapter); 745 ixv_init_locked(adapter); 746 IXGBE_CORE_UNLOCK(adapter); 747 return; 748 } 749 750 751 /* 752 ** 753 ** MSIX Interrupt Handlers and Tasklets 754 ** 755 */ 756 757 static inline void 758 ixv_enable_queue(struct adapter *adapter, u32 vector) 759 { 760 struct ixgbe_hw *hw = &adapter->hw; 761 u32 queue = 1 << vector; 762 u32 mask; 763 764 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 765 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 766 } 767 768 static inline void 769 ixv_disable_queue(struct adapter *adapter, u32 vector) 770 { 771 struct ixgbe_hw *hw = &adapter->hw; 772 u64 queue = (u64)(1 << vector); 773 u32 mask; 774 775 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 776 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 777 } 778 779 static inline void 780 ixv_rearm_queues(struct adapter *adapter, u64 queues) 781 { 782 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 783 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 784 } 785 786 787 static void 788 ixv_handle_que(void *context, int pending) 789 { 790 struct ix_queue *que = context; 791 struct adapter *adapter = que->adapter; 792 struct tx_ring *txr = que->txr; 793 struct ifnet *ifp = adapter->ifp; 794 bool more; 795 796 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 797 more = ixgbe_rxeof(que); 798 IXGBE_TX_LOCK(txr); 799 ixgbe_txeof(txr); 800 #if __FreeBSD_version >= 800000 801 if (!drbr_empty(ifp, txr->br)) 802 ixgbe_mq_start_locked(ifp, txr); 803 #else 804 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 805 ixgbe_start_locked(txr, ifp); 806 #endif 807 IXGBE_TX_UNLOCK(txr); 808 if (more) { 809 taskqueue_enqueue(que->tq, &que->que_task); 810 return; 811 } 812 } 813 814 /* Reenable this interrupt */ 815 ixv_enable_queue(adapter, que->msix); 816 return; 817 } 818 819 /********************************************************************* 820 * 821 * MSI Queue Interrupt Service routine 822 * 823 **********************************************************************/ 824 void 825 ixv_msix_que(void *arg) 826 { 827 struct ix_queue *que = arg; 828 struct adapter *adapter = que->adapter; 829 struct ifnet *ifp = adapter->ifp; 830 struct tx_ring *txr = que->txr; 831 struct rx_ring *rxr = que->rxr; 832 bool more; 833 u32 newitr = 0; 834 835 ixv_disable_queue(adapter, que->msix); 836 ++que->irqs; 837 838 more = ixgbe_rxeof(que); 839 840 IXGBE_TX_LOCK(txr); 841 ixgbe_txeof(txr); 842 /* 843 ** Make certain that if the stack 844 ** has anything queued the task gets 845 ** scheduled to handle it. 846 */ 847 #ifdef IXGBE_LEGACY_TX 848 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) 849 ixgbe_start_locked(txr, ifp); 850 #else 851 if (!drbr_empty(adapter->ifp, txr->br)) 852 ixgbe_mq_start_locked(ifp, txr); 853 #endif 854 IXGBE_TX_UNLOCK(txr); 855 856 /* Do AIM now? */ 857 858 if (ixv_enable_aim == FALSE) 859 goto no_calc; 860 /* 861 ** Do Adaptive Interrupt Moderation: 862 ** - Write out last calculated setting 863 ** - Calculate based on average size over 864 ** the last interval. 865 */ 866 if (que->eitr_setting) 867 IXGBE_WRITE_REG(&adapter->hw, 868 IXGBE_VTEITR(que->msix), 869 que->eitr_setting); 870 871 que->eitr_setting = 0; 872 873 /* Idle, do nothing */ 874 if ((txr->bytes == 0) && (rxr->bytes == 0)) 875 goto no_calc; 876 877 if ((txr->bytes) && (txr->packets)) 878 newitr = txr->bytes/txr->packets; 879 if ((rxr->bytes) && (rxr->packets)) 880 newitr = max(newitr, 881 (rxr->bytes / rxr->packets)); 882 newitr += 24; /* account for hardware frame, crc */ 883 884 /* set an upper boundary */ 885 newitr = min(newitr, 3000); 886 887 /* Be nice to the mid range */ 888 if ((newitr > 300) && (newitr < 1200)) 889 newitr = (newitr / 3); 890 else 891 newitr = (newitr / 2); 892 893 newitr |= newitr << 16; 894 895 /* save for next interrupt */ 896 que->eitr_setting = newitr; 897 898 /* Reset state */ 899 txr->bytes = 0; 900 txr->packets = 0; 901 rxr->bytes = 0; 902 rxr->packets = 0; 903 904 no_calc: 905 if (more) 906 taskqueue_enqueue(que->tq, &que->que_task); 907 else /* Reenable this interrupt */ 908 ixv_enable_queue(adapter, que->msix); 909 return; 910 } 911 912 static void 913 ixv_msix_mbx(void *arg) 914 { 915 struct adapter *adapter = arg; 916 struct ixgbe_hw *hw = &adapter->hw; 917 u32 reg; 918 919 ++adapter->link_irq; 920 921 /* First get the cause */ 922 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); 923 /* Clear interrupt with write */ 924 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); 925 926 /* Link status change */ 927 if (reg & IXGBE_EICR_LSC) 928 taskqueue_enqueue(adapter->tq, &adapter->link_task); 929 930 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); 931 return; 932 } 933 934 /********************************************************************* 935 * 936 * Media Ioctl callback 937 * 938 * This routine is called whenever the user queries the status of 939 * the interface using ifconfig. 940 * 941 **********************************************************************/ 942 static void 943 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 944 { 945 struct adapter *adapter = ifp->if_softc; 946 947 INIT_DEBUGOUT("ixv_media_status: begin"); 948 IXGBE_CORE_LOCK(adapter); 949 ixv_update_link_status(adapter); 950 951 ifmr->ifm_status = IFM_AVALID; 952 ifmr->ifm_active = IFM_ETHER; 953 954 if (!adapter->link_active) { 955 IXGBE_CORE_UNLOCK(adapter); 956 return; 957 } 958 959 ifmr->ifm_status |= IFM_ACTIVE; 960 961 switch (adapter->link_speed) { 962 case IXGBE_LINK_SPEED_1GB_FULL: 963 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 964 break; 965 case IXGBE_LINK_SPEED_10GB_FULL: 966 ifmr->ifm_active |= IFM_FDX; 967 break; 968 } 969 970 IXGBE_CORE_UNLOCK(adapter); 971 972 return; 973 } 974 975 /********************************************************************* 976 * 977 * Media Ioctl callback 978 * 979 * This routine is called when the user changes speed/duplex using 980 * media/mediopt option with ifconfig. 981 * 982 **********************************************************************/ 983 static int 984 ixv_media_change(struct ifnet * ifp) 985 { 986 struct adapter *adapter = ifp->if_softc; 987 struct ifmedia *ifm = &adapter->media; 988 989 INIT_DEBUGOUT("ixv_media_change: begin"); 990 991 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 992 return (EINVAL); 993 994 switch (IFM_SUBTYPE(ifm->ifm_media)) { 995 case IFM_AUTO: 996 break; 997 default: 998 device_printf(adapter->dev, "Only auto media type\n"); 999 return (EINVAL); 1000 } 1001 1002 return (0); 1003 } 1004 1005 1006 /********************************************************************* 1007 * Multicast Update 1008 * 1009 * This routine is called whenever multicast address list is updated. 1010 * 1011 **********************************************************************/ 1012 #define IXGBE_RAR_ENTRIES 16 1013 1014 static void 1015 ixv_set_multi(struct adapter *adapter) 1016 { 1017 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 1018 u8 *update_ptr; 1019 struct ifmultiaddr *ifma; 1020 int mcnt = 0; 1021 struct ifnet *ifp = adapter->ifp; 1022 1023 IOCTL_DEBUGOUT("ixv_set_multi: begin"); 1024 1025 #if __FreeBSD_version < 800000 1026 IF_ADDR_LOCK(ifp); 1027 #else 1028 if_maddr_rlock(ifp); 1029 #endif 1030 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1031 if (ifma->ifma_addr->sa_family != AF_LINK) 1032 continue; 1033 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1034 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 1035 IXGBE_ETH_LENGTH_OF_ADDRESS); 1036 mcnt++; 1037 } 1038 #if __FreeBSD_version < 800000 1039 IF_ADDR_UNLOCK(ifp); 1040 #else 1041 if_maddr_runlock(ifp); 1042 #endif 1043 1044 update_ptr = mta; 1045 1046 ixgbe_update_mc_addr_list(&adapter->hw, 1047 update_ptr, mcnt, ixv_mc_array_itr, TRUE); 1048 1049 return; 1050 } 1051 1052 /* 1053 * This is an iterator function now needed by the multicast 1054 * shared code. It simply feeds the shared code routine the 1055 * addresses in the array of ixv_set_multi() one by one. 1056 */ 1057 static u8 * 1058 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1059 { 1060 u8 *addr = *update_ptr; 1061 u8 *newptr; 1062 *vmdq = 0; 1063 1064 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 1065 *update_ptr = newptr; 1066 return addr; 1067 } 1068 1069 /********************************************************************* 1070 * Timer routine 1071 * 1072 * This routine checks for link status,updates statistics, 1073 * and runs the watchdog check. 1074 * 1075 **********************************************************************/ 1076 1077 static void 1078 ixv_local_timer(void *arg) 1079 { 1080 struct adapter *adapter = arg; 1081 device_t dev = adapter->dev; 1082 struct ix_queue *que = adapter->queues; 1083 u64 queues = 0; 1084 int hung = 0; 1085 1086 mtx_assert(&adapter->core_mtx, MA_OWNED); 1087 1088 ixv_update_link_status(adapter); 1089 1090 /* Stats Update */ 1091 ixv_update_stats(adapter); 1092 1093 /* 1094 ** Check the TX queues status 1095 ** - mark hung queues so we don't schedule on them 1096 ** - watchdog only if all queues show hung 1097 */ 1098 for (int i = 0; i < adapter->num_queues; i++, que++) { 1099 /* Keep track of queues with work for soft irq */ 1100 if (que->txr->busy) 1101 queues |= ((u64)1 << que->me); 1102 /* 1103 ** Each time txeof runs without cleaning, but there 1104 ** are uncleaned descriptors it increments busy. If 1105 ** we get to the MAX we declare it hung. 1106 */ 1107 if (que->busy == IXGBE_QUEUE_HUNG) { 1108 ++hung; 1109 /* Mark the queue as inactive */ 1110 adapter->active_queues &= ~((u64)1 << que->me); 1111 continue; 1112 } else { 1113 /* Check if we've come back from hung */ 1114 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1115 adapter->active_queues |= ((u64)1 << que->me); 1116 } 1117 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1118 device_printf(dev,"Warning queue %d " 1119 "appears to be hung!\n", i); 1120 que->txr->busy = IXGBE_QUEUE_HUNG; 1121 ++hung; 1122 } 1123 1124 } 1125 1126 /* Only truely watchdog if all queues show hung */ 1127 if (hung == adapter->num_queues) 1128 goto watchdog; 1129 else if (queues != 0) { /* Force an IRQ on queues with work */ 1130 ixv_rearm_queues(adapter, queues); 1131 } 1132 1133 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1134 return; 1135 1136 watchdog: 1137 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1138 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1139 adapter->watchdog_events++; 1140 ixv_init_locked(adapter); 1141 } 1142 1143 /* 1144 ** Note: this routine updates the OS on the link state 1145 ** the real check of the hardware only happens with 1146 ** a link interrupt. 1147 */ 1148 static void 1149 ixv_update_link_status(struct adapter *adapter) 1150 { 1151 struct ifnet *ifp = adapter->ifp; 1152 device_t dev = adapter->dev; 1153 1154 if (adapter->link_up){ 1155 if (adapter->link_active == FALSE) { 1156 if (bootverbose) 1157 device_printf(dev,"Link is up %d Gbps %s \n", 1158 ((adapter->link_speed == 128)? 10:1), 1159 "Full Duplex"); 1160 adapter->link_active = TRUE; 1161 if_link_state_change(ifp, LINK_STATE_UP); 1162 } 1163 } else { /* Link down */ 1164 if (adapter->link_active == TRUE) { 1165 if (bootverbose) 1166 device_printf(dev,"Link is Down\n"); 1167 if_link_state_change(ifp, LINK_STATE_DOWN); 1168 adapter->link_active = FALSE; 1169 } 1170 } 1171 1172 return; 1173 } 1174 1175 1176 /********************************************************************* 1177 * 1178 * This routine disables all traffic on the adapter by issuing a 1179 * global reset on the MAC and deallocates TX/RX buffers. 1180 * 1181 **********************************************************************/ 1182 1183 static void 1184 ixv_stop(void *arg) 1185 { 1186 struct ifnet *ifp; 1187 struct adapter *adapter = arg; 1188 struct ixgbe_hw *hw = &adapter->hw; 1189 ifp = adapter->ifp; 1190 1191 mtx_assert(&adapter->core_mtx, MA_OWNED); 1192 1193 INIT_DEBUGOUT("ixv_stop: begin\n"); 1194 ixv_disable_intr(adapter); 1195 1196 /* Tell the stack that the interface is no longer active */ 1197 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1198 1199 ixgbe_reset_hw(hw); 1200 adapter->hw.adapter_stopped = FALSE; 1201 ixgbe_stop_adapter(hw); 1202 callout_stop(&adapter->timer); 1203 1204 /* reprogram the RAR[0] in case user changed it. */ 1205 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1206 1207 return; 1208 } 1209 1210 1211 /********************************************************************* 1212 * 1213 * Determine hardware revision. 1214 * 1215 **********************************************************************/ 1216 static void 1217 ixv_identify_hardware(struct adapter *adapter) 1218 { 1219 device_t dev = adapter->dev; 1220 struct ixgbe_hw *hw = &adapter->hw; 1221 1222 /* 1223 ** Make sure BUSMASTER is set, on a VM under 1224 ** KVM it may not be and will break things. 1225 */ 1226 pci_enable_busmaster(dev); 1227 1228 /* Save off the information about this board */ 1229 hw->vendor_id = pci_get_vendor(dev); 1230 hw->device_id = pci_get_device(dev); 1231 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 1232 hw->subsystem_vendor_id = 1233 pci_read_config(dev, PCIR_SUBVEND_0, 2); 1234 hw->subsystem_device_id = 1235 pci_read_config(dev, PCIR_SUBDEV_0, 2); 1236 1237 /* We need this to determine device-specific things */ 1238 ixgbe_set_mac_type(hw); 1239 1240 /* Set the right number of segments */ 1241 adapter->num_segs = IXGBE_82599_SCATTER; 1242 1243 return; 1244 } 1245 1246 /********************************************************************* 1247 * 1248 * Setup MSIX Interrupt resources and handlers 1249 * 1250 **********************************************************************/ 1251 static int 1252 ixv_allocate_msix(struct adapter *adapter) 1253 { 1254 device_t dev = adapter->dev; 1255 struct ix_queue *que = adapter->queues; 1256 struct tx_ring *txr = adapter->tx_rings; 1257 int error, rid, vector = 0; 1258 1259 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 1260 rid = vector + 1; 1261 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1262 RF_SHAREABLE | RF_ACTIVE); 1263 if (que->res == NULL) { 1264 device_printf(dev,"Unable to allocate" 1265 " bus resource: que interrupt [%d]\n", vector); 1266 return (ENXIO); 1267 } 1268 /* Set the handler function */ 1269 error = bus_setup_intr(dev, que->res, 1270 INTR_TYPE_NET | INTR_MPSAFE, NULL, 1271 ixv_msix_que, que, &que->tag); 1272 if (error) { 1273 que->res = NULL; 1274 device_printf(dev, "Failed to register QUE handler"); 1275 return (error); 1276 } 1277 #if __FreeBSD_version >= 800504 1278 bus_describe_intr(dev, que->res, que->tag, "que %d", i); 1279 #endif 1280 que->msix = vector; 1281 adapter->active_queues |= (u64)(1 << que->msix); 1282 /* 1283 ** Bind the msix vector, and thus the 1284 ** ring to the corresponding cpu. 1285 */ 1286 if (adapter->num_queues > 1) 1287 bus_bind_intr(dev, que->res, i); 1288 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 1289 TASK_INIT(&que->que_task, 0, ixv_handle_que, que); 1290 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, 1291 taskqueue_thread_enqueue, &que->tq); 1292 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", 1293 device_get_nameunit(adapter->dev)); 1294 } 1295 1296 /* and Mailbox */ 1297 rid = vector + 1; 1298 adapter->res = bus_alloc_resource_any(dev, 1299 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 1300 if (!adapter->res) { 1301 device_printf(dev,"Unable to allocate" 1302 " bus resource: MBX interrupt [%d]\n", rid); 1303 return (ENXIO); 1304 } 1305 /* Set the mbx handler function */ 1306 error = bus_setup_intr(dev, adapter->res, 1307 INTR_TYPE_NET | INTR_MPSAFE, NULL, 1308 ixv_msix_mbx, adapter, &adapter->tag); 1309 if (error) { 1310 adapter->res = NULL; 1311 device_printf(dev, "Failed to register LINK handler"); 1312 return (error); 1313 } 1314 #if __FreeBSD_version >= 800504 1315 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx"); 1316 #endif 1317 adapter->vector = vector; 1318 /* Tasklets for Mailbox */ 1319 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter); 1320 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, 1321 taskqueue_thread_enqueue, &adapter->tq); 1322 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", 1323 device_get_nameunit(adapter->dev)); 1324 /* 1325 ** Due to a broken design QEMU will fail to properly 1326 ** enable the guest for MSIX unless the vectors in 1327 ** the table are all set up, so we must rewrite the 1328 ** ENABLE in the MSIX control register again at this 1329 ** point to cause it to successfully initialize us. 1330 */ 1331 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { 1332 int msix_ctrl; 1333 pci_find_cap(dev, PCIY_MSIX, &rid); 1334 rid += PCIR_MSIX_CTRL; 1335 msix_ctrl = pci_read_config(dev, rid, 2); 1336 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1337 pci_write_config(dev, rid, msix_ctrl, 2); 1338 } 1339 1340 return (0); 1341 } 1342 1343 /* 1344 * Setup MSIX resources, note that the VF 1345 * device MUST use MSIX, there is no fallback. 1346 */ 1347 static int 1348 ixv_setup_msix(struct adapter *adapter) 1349 { 1350 device_t dev = adapter->dev; 1351 int rid, want, msgs; 1352 1353 1354 /* Must have at least 2 MSIX vectors */ 1355 msgs = pci_msix_count(dev); 1356 if (msgs < 2) 1357 goto out; 1358 rid = PCIR_BAR(3); 1359 adapter->msix_mem = bus_alloc_resource_any(dev, 1360 SYS_RES_MEMORY, &rid, RF_ACTIVE); 1361 if (adapter->msix_mem == NULL) { 1362 device_printf(adapter->dev, 1363 "Unable to map MSIX table \n"); 1364 goto out; 1365 } 1366 1367 /* 1368 ** Want vectors for the queues, 1369 ** plus an additional for mailbox. 1370 */ 1371 want = adapter->num_queues + 1; 1372 if (want > msgs) { 1373 want = msgs; 1374 adapter->num_queues = msgs - 1; 1375 } else 1376 msgs = want; 1377 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 1378 device_printf(adapter->dev, 1379 "Using MSIX interrupts with %d vectors\n", want); 1380 return (want); 1381 } 1382 /* Release in case alloc was insufficient */ 1383 pci_release_msi(dev); 1384 out: 1385 if (adapter->msix_mem != NULL) { 1386 bus_release_resource(dev, SYS_RES_MEMORY, 1387 rid, adapter->msix_mem); 1388 adapter->msix_mem = NULL; 1389 } 1390 device_printf(adapter->dev,"MSIX config error\n"); 1391 return (ENXIO); 1392 } 1393 1394 1395 static int 1396 ixv_allocate_pci_resources(struct adapter *adapter) 1397 { 1398 int rid; 1399 device_t dev = adapter->dev; 1400 1401 rid = PCIR_BAR(0); 1402 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1403 &rid, RF_ACTIVE); 1404 1405 if (!(adapter->pci_mem)) { 1406 device_printf(dev,"Unable to allocate bus resource: memory\n"); 1407 return (ENXIO); 1408 } 1409 1410 adapter->osdep.mem_bus_space_tag = 1411 rman_get_bustag(adapter->pci_mem); 1412 adapter->osdep.mem_bus_space_handle = 1413 rman_get_bushandle(adapter->pci_mem); 1414 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; 1415 1416 /* Pick up the tuneable queues */ 1417 adapter->num_queues = ixv_num_queues; 1418 1419 adapter->hw.back = &adapter->osdep; 1420 1421 /* 1422 ** Now setup MSI/X, should 1423 ** return us the number of 1424 ** configured vectors. 1425 */ 1426 adapter->msix = ixv_setup_msix(adapter); 1427 if (adapter->msix == ENXIO) 1428 return (ENXIO); 1429 else 1430 return (0); 1431 } 1432 1433 static void 1434 ixv_free_pci_resources(struct adapter * adapter) 1435 { 1436 struct ix_queue *que = adapter->queues; 1437 device_t dev = adapter->dev; 1438 int rid, memrid; 1439 1440 memrid = PCIR_BAR(MSIX_82598_BAR); 1441 1442 /* 1443 ** There is a slight possibility of a failure mode 1444 ** in attach that will result in entering this function 1445 ** before interrupt resources have been initialized, and 1446 ** in that case we do not want to execute the loops below 1447 ** We can detect this reliably by the state of the adapter 1448 ** res pointer. 1449 */ 1450 if (adapter->res == NULL) 1451 goto mem; 1452 1453 /* 1454 ** Release all msix queue resources: 1455 */ 1456 for (int i = 0; i < adapter->num_queues; i++, que++) { 1457 rid = que->msix + 1; 1458 if (que->tag != NULL) { 1459 bus_teardown_intr(dev, que->res, que->tag); 1460 que->tag = NULL; 1461 } 1462 if (que->res != NULL) 1463 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 1464 } 1465 1466 1467 /* Clean the Legacy or Link interrupt last */ 1468 if (adapter->vector) /* we are doing MSIX */ 1469 rid = adapter->vector + 1; 1470 else 1471 (adapter->msix != 0) ? (rid = 1):(rid = 0); 1472 1473 if (adapter->tag != NULL) { 1474 bus_teardown_intr(dev, adapter->res, adapter->tag); 1475 adapter->tag = NULL; 1476 } 1477 if (adapter->res != NULL) 1478 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 1479 1480 mem: 1481 if (adapter->msix) 1482 pci_release_msi(dev); 1483 1484 if (adapter->msix_mem != NULL) 1485 bus_release_resource(dev, SYS_RES_MEMORY, 1486 memrid, adapter->msix_mem); 1487 1488 if (adapter->pci_mem != NULL) 1489 bus_release_resource(dev, SYS_RES_MEMORY, 1490 PCIR_BAR(0), adapter->pci_mem); 1491 1492 return; 1493 } 1494 1495 /********************************************************************* 1496 * 1497 * Setup networking device structure and register an interface. 1498 * 1499 **********************************************************************/ 1500 static void 1501 ixv_setup_interface(device_t dev, struct adapter *adapter) 1502 { 1503 struct ifnet *ifp; 1504 1505 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1506 1507 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1508 if (ifp == NULL) 1509 panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); 1510 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1511 ifp->if_baudrate = 1000000000; 1512 ifp->if_init = ixv_init; 1513 ifp->if_softc = adapter; 1514 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1515 ifp->if_ioctl = ixv_ioctl; 1516 #if __FreeBSD_version >= 800000 1517 ifp->if_transmit = ixgbe_mq_start; 1518 ifp->if_qflush = ixgbe_qflush; 1519 #else 1520 ifp->if_start = ixgbe_start; 1521 #endif 1522 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; 1523 1524 ether_ifattach(ifp, adapter->hw.mac.addr); 1525 1526 adapter->max_frame_size = 1527 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1528 1529 /* 1530 * Tell the upper layer(s) we support long frames. 1531 */ 1532 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1533 1534 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; 1535 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1536 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING 1537 | IFCAP_VLAN_HWTSO 1538 | IFCAP_VLAN_MTU; 1539 ifp->if_capabilities |= IFCAP_LRO; 1540 ifp->if_capenable = ifp->if_capabilities; 1541 1542 /* 1543 * Specify the media types supported by this adapter and register 1544 * callbacks to update media and link information 1545 */ 1546 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, 1547 ixv_media_status); 1548 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL); 1549 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1550 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1551 1552 return; 1553 } 1554 1555 static void 1556 ixv_config_link(struct adapter *adapter) 1557 { 1558 struct ixgbe_hw *hw = &adapter->hw; 1559 u32 autoneg, err = 0; 1560 1561 if (hw->mac.ops.check_link) 1562 err = hw->mac.ops.check_link(hw, &autoneg, 1563 &adapter->link_up, FALSE); 1564 if (err) 1565 goto out; 1566 1567 if (hw->mac.ops.setup_link) 1568 err = hw->mac.ops.setup_link(hw, 1569 autoneg, adapter->link_up); 1570 out: 1571 return; 1572 } 1573 1574 1575 /********************************************************************* 1576 * 1577 * Enable transmit unit. 1578 * 1579 **********************************************************************/ 1580 static void 1581 ixv_initialize_transmit_units(struct adapter *adapter) 1582 { 1583 struct tx_ring *txr = adapter->tx_rings; 1584 struct ixgbe_hw *hw = &adapter->hw; 1585 1586 1587 for (int i = 0; i < adapter->num_queues; i++, txr++) { 1588 u64 tdba = txr->txdma.dma_paddr; 1589 u32 txctrl, txdctl; 1590 1591 /* Set WTHRESH to 8, burst writeback */ 1592 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1593 txdctl |= (8 << 16); 1594 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1595 1596 /* Set the HW Tx Head and Tail indices */ 1597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0); 1598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0); 1599 1600 /* Set Tx Tail register */ 1601 txr->tail = IXGBE_VFTDT(i); 1602 1603 /* Set the processing limit */ 1604 txr->process_limit = ixv_tx_process_limit; 1605 1606 /* Set Ring parameters */ 1607 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), 1608 (tdba & 0x00000000ffffffffULL)); 1609 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32)); 1610 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), 1611 adapter->num_tx_desc * 1612 sizeof(struct ixgbe_legacy_tx_desc)); 1613 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i)); 1614 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1615 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl); 1616 1617 /* Now enable */ 1618 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1619 txdctl |= IXGBE_TXDCTL_ENABLE; 1620 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1621 } 1622 1623 return; 1624 } 1625 1626 1627 /********************************************************************* 1628 * 1629 * Setup receive registers and features. 1630 * 1631 **********************************************************************/ 1632 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1633 1634 static void 1635 ixv_initialize_receive_units(struct adapter *adapter) 1636 { 1637 struct rx_ring *rxr = adapter->rx_rings; 1638 struct ixgbe_hw *hw = &adapter->hw; 1639 struct ifnet *ifp = adapter->ifp; 1640 u32 bufsz, rxcsum, psrtype; 1641 int max_frame; 1642 1643 if (ifp->if_mtu > ETHERMTU) 1644 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1645 else 1646 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1647 1648 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1649 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1650 IXGBE_PSRTYPE_L2HDR; 1651 1652 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1653 1654 /* Tell PF our expected packet-size */ 1655 max_frame = ifp->if_mtu + IXGBE_MTU_HDR; 1656 ixgbevf_rlpml_set_vf(hw, max_frame); 1657 1658 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1659 u64 rdba = rxr->rxdma.dma_paddr; 1660 u32 reg, rxdctl; 1661 1662 /* Disable the queue */ 1663 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1664 rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME); 1665 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1666 for (int j = 0; j < 10; j++) { 1667 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1668 IXGBE_RXDCTL_ENABLE) 1669 msec_delay(1); 1670 else 1671 break; 1672 } 1673 wmb(); 1674 /* Setup the Base and Length of the Rx Descriptor Ring */ 1675 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), 1676 (rdba & 0x00000000ffffffffULL)); 1677 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), 1678 (rdba >> 32)); 1679 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), 1680 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1681 1682 /* Reset the ring indices */ 1683 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1684 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1685 1686 /* Set up the SRRCTL register */ 1687 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); 1688 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1689 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1690 reg |= bufsz; 1691 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1692 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg); 1693 1694 /* Set the processing limit */ 1695 rxr->process_limit = ixv_rx_process_limit; 1696 1697 /* Capture Rx Tail index */ 1698 rxr->tail = IXGBE_VFRDT(rxr->me); 1699 1700 /* Do the queue enabling last */ 1701 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1702 rxdctl |= IXGBE_RXDCTL_ENABLE; 1703 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1704 for (int k = 0; k < 10; k++) { 1705 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1706 IXGBE_RXDCTL_ENABLE) 1707 break; 1708 else 1709 msec_delay(1); 1710 } 1711 wmb(); 1712 1713 /* Set the Tail Pointer */ 1714 #ifdef DEV_NETMAP 1715 /* 1716 * In netmap mode, we must preserve the buffers made 1717 * available to userspace before the if_init() 1718 * (this is true by default on the TX side, because 1719 * init makes all buffers available to userspace). 1720 * 1721 * netmap_reset() and the device specific routines 1722 * (e.g. ixgbe_setup_receive_rings()) map these 1723 * buffers at the end of the NIC ring, so here we 1724 * must set the RDT (tail) register to make sure 1725 * they are not overwritten. 1726 * 1727 * In this driver the NIC ring starts at RDH = 0, 1728 * RDT points to the last slot available for reception (?), 1729 * so RDT = num_rx_desc - 1 means the whole ring is available. 1730 */ 1731 if (ifp->if_capenable & IFCAP_NETMAP) { 1732 struct netmap_adapter *na = NA(adapter->ifp); 1733 struct netmap_kring *kring = &na->rx_rings[i]; 1734 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1735 1736 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1737 } else 1738 #endif /* DEV_NETMAP */ 1739 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1740 adapter->num_rx_desc - 1); 1741 } 1742 1743 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1744 1745 if (ifp->if_capenable & IFCAP_RXCSUM) 1746 rxcsum |= IXGBE_RXCSUM_PCSD; 1747 1748 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 1749 rxcsum |= IXGBE_RXCSUM_IPPCSE; 1750 1751 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1752 1753 return; 1754 } 1755 1756 static void 1757 ixv_setup_vlan_support(struct adapter *adapter) 1758 { 1759 struct ixgbe_hw *hw = &adapter->hw; 1760 u32 ctrl, vid, vfta, retry; 1761 1762 1763 /* 1764 ** We get here thru init_locked, meaning 1765 ** a soft reset, this has already cleared 1766 ** the VFTA and other state, so if there 1767 ** have been no vlan's registered do nothing. 1768 */ 1769 if (adapter->num_vlans == 0) 1770 return; 1771 1772 /* Enable the queues */ 1773 for (int i = 0; i < adapter->num_queues; i++) { 1774 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1775 ctrl |= IXGBE_RXDCTL_VME; 1776 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); 1777 } 1778 1779 /* 1780 ** A soft reset zero's out the VFTA, so 1781 ** we need to repopulate it now. 1782 */ 1783 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 1784 if (ixv_shadow_vfta[i] == 0) 1785 continue; 1786 vfta = ixv_shadow_vfta[i]; 1787 /* 1788 ** Reconstruct the vlan id's 1789 ** based on the bits set in each 1790 ** of the array ints. 1791 */ 1792 for ( int j = 0; j < 32; j++) { 1793 retry = 0; 1794 if ((vfta & (1 << j)) == 0) 1795 continue; 1796 vid = (i * 32) + j; 1797 /* Call the shared code mailbox routine */ 1798 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) { 1799 if (++retry > 5) 1800 break; 1801 } 1802 } 1803 } 1804 } 1805 1806 /* 1807 ** This routine is run via an vlan config EVENT, 1808 ** it enables us to use the HW Filter table since 1809 ** we can get the vlan id. This just creates the 1810 ** entry in the soft version of the VFTA, init will 1811 ** repopulate the real table. 1812 */ 1813 static void 1814 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1815 { 1816 struct adapter *adapter = ifp->if_softc; 1817 u16 index, bit; 1818 1819 if (ifp->if_softc != arg) /* Not our event */ 1820 return; 1821 1822 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1823 return; 1824 1825 IXGBE_CORE_LOCK(adapter); 1826 index = (vtag >> 5) & 0x7F; 1827 bit = vtag & 0x1F; 1828 ixv_shadow_vfta[index] |= (1 << bit); 1829 ++adapter->num_vlans; 1830 /* Re-init to load the changes */ 1831 ixv_init_locked(adapter); 1832 IXGBE_CORE_UNLOCK(adapter); 1833 } 1834 1835 /* 1836 ** This routine is run via an vlan 1837 ** unconfig EVENT, remove our entry 1838 ** in the soft vfta. 1839 */ 1840 static void 1841 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1842 { 1843 struct adapter *adapter = ifp->if_softc; 1844 u16 index, bit; 1845 1846 if (ifp->if_softc != arg) 1847 return; 1848 1849 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1850 return; 1851 1852 IXGBE_CORE_LOCK(adapter); 1853 index = (vtag >> 5) & 0x7F; 1854 bit = vtag & 0x1F; 1855 ixv_shadow_vfta[index] &= ~(1 << bit); 1856 --adapter->num_vlans; 1857 /* Re-init to load the changes */ 1858 ixv_init_locked(adapter); 1859 IXGBE_CORE_UNLOCK(adapter); 1860 } 1861 1862 static void 1863 ixv_enable_intr(struct adapter *adapter) 1864 { 1865 struct ixgbe_hw *hw = &adapter->hw; 1866 struct ix_queue *que = adapter->queues; 1867 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1868 1869 1870 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1871 1872 mask = IXGBE_EIMS_ENABLE_MASK; 1873 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1874 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 1875 1876 for (int i = 0; i < adapter->num_queues; i++, que++) 1877 ixv_enable_queue(adapter, que->msix); 1878 1879 IXGBE_WRITE_FLUSH(hw); 1880 1881 return; 1882 } 1883 1884 static void 1885 ixv_disable_intr(struct adapter *adapter) 1886 { 1887 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 1888 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); 1889 IXGBE_WRITE_FLUSH(&adapter->hw); 1890 return; 1891 } 1892 1893 /* 1894 ** Setup the correct IVAR register for a particular MSIX interrupt 1895 ** - entry is the register array entry 1896 ** - vector is the MSIX vector for this queue 1897 ** - type is RX/TX/MISC 1898 */ 1899 static void 1900 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 1901 { 1902 struct ixgbe_hw *hw = &adapter->hw; 1903 u32 ivar, index; 1904 1905 vector |= IXGBE_IVAR_ALLOC_VAL; 1906 1907 if (type == -1) { /* MISC IVAR */ 1908 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 1909 ivar &= ~0xFF; 1910 ivar |= vector; 1911 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 1912 } else { /* RX/TX IVARS */ 1913 index = (16 * (entry & 1)) + (8 * type); 1914 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 1915 ivar &= ~(0xFF << index); 1916 ivar |= (vector << index); 1917 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 1918 } 1919 } 1920 1921 static void 1922 ixv_configure_ivars(struct adapter *adapter) 1923 { 1924 struct ix_queue *que = adapter->queues; 1925 1926 for (int i = 0; i < adapter->num_queues; i++, que++) { 1927 /* First the RX queue entry */ 1928 ixv_set_ivar(adapter, i, que->msix, 0); 1929 /* ... and the TX */ 1930 ixv_set_ivar(adapter, i, que->msix, 1); 1931 /* Set an initial value in EITR */ 1932 IXGBE_WRITE_REG(&adapter->hw, 1933 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT); 1934 } 1935 1936 /* For the mailbox interrupt */ 1937 ixv_set_ivar(adapter, 1, adapter->vector, -1); 1938 } 1939 1940 1941 /* 1942 ** Tasklet handler for MSIX MBX interrupts 1943 ** - do outside interrupt since it might sleep 1944 */ 1945 static void 1946 ixv_handle_mbx(void *context, int pending) 1947 { 1948 struct adapter *adapter = context; 1949 1950 ixgbe_check_link(&adapter->hw, 1951 &adapter->link_speed, &adapter->link_up, 0); 1952 ixv_update_link_status(adapter); 1953 } 1954 1955 /* 1956 ** The VF stats registers never have a truely virgin 1957 ** starting point, so this routine tries to make an 1958 ** artificial one, marking ground zero on attach as 1959 ** it were. 1960 */ 1961 static void 1962 ixv_save_stats(struct adapter *adapter) 1963 { 1964 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { 1965 adapter->stats.vf.saved_reset_vfgprc += 1966 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; 1967 adapter->stats.vf.saved_reset_vfgptc += 1968 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; 1969 adapter->stats.vf.saved_reset_vfgorc += 1970 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; 1971 adapter->stats.vf.saved_reset_vfgotc += 1972 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; 1973 adapter->stats.vf.saved_reset_vfmprc += 1974 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; 1975 } 1976 } 1977 1978 static void 1979 ixv_init_stats(struct adapter *adapter) 1980 { 1981 struct ixgbe_hw *hw = &adapter->hw; 1982 1983 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1984 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1985 adapter->stats.vf.last_vfgorc |= 1986 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1987 1988 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1989 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1990 adapter->stats.vf.last_vfgotc |= 1991 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1992 1993 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1994 1995 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 1996 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 1997 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 1998 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 1999 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 2000 } 2001 2002 #define UPDATE_STAT_32(reg, last, count) \ 2003 { \ 2004 u32 current = IXGBE_READ_REG(hw, reg); \ 2005 if (current < last) \ 2006 count += 0x100000000LL; \ 2007 last = current; \ 2008 count &= 0xFFFFFFFF00000000LL; \ 2009 count |= current; \ 2010 } 2011 2012 #define UPDATE_STAT_36(lsb, msb, last, count) \ 2013 { \ 2014 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ 2015 u64 cur_msb = IXGBE_READ_REG(hw, msb); \ 2016 u64 current = ((cur_msb << 32) | cur_lsb); \ 2017 if (current < last) \ 2018 count += 0x1000000000LL; \ 2019 last = current; \ 2020 count &= 0xFFFFFFF000000000LL; \ 2021 count |= current; \ 2022 } 2023 2024 /* 2025 ** ixv_update_stats - Update the board statistics counters. 2026 */ 2027 void 2028 ixv_update_stats(struct adapter *adapter) 2029 { 2030 struct ixgbe_hw *hw = &adapter->hw; 2031 2032 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, 2033 adapter->stats.vf.vfgprc); 2034 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, 2035 adapter->stats.vf.vfgptc); 2036 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2037 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); 2038 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2039 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); 2040 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, 2041 adapter->stats.vf.vfmprc); 2042 } 2043 2044 /* 2045 * Add statistic sysctls for the VF. 2046 */ 2047 static void 2048 ixv_add_stats_sysctls(struct adapter *adapter) 2049 { 2050 device_t dev = adapter->dev; 2051 struct ix_queue *que = &adapter->queues[0]; 2052 struct tx_ring *txr = que->txr; 2053 struct rx_ring *rxr = que->rxr; 2054 2055 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2056 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2057 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2058 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2059 2060 struct sysctl_oid *stat_node, *queue_node; 2061 struct sysctl_oid_list *stat_list, *queue_list; 2062 2063 /* Driver Statistics */ 2064 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 2065 CTLFLAG_RD, &adapter->dropped_pkts, 2066 "Driver dropped packets"); 2067 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 2068 CTLFLAG_RD, &adapter->mbuf_defrag_failed, 2069 "m_defrag() failed"); 2070 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 2071 CTLFLAG_RD, &adapter->watchdog_events, 2072 "Watchdog timeouts"); 2073 2074 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 2075 CTLFLAG_RD, NULL, 2076 "VF Statistics (read from HW registers)"); 2077 stat_list = SYSCTL_CHILDREN(stat_node); 2078 2079 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 2080 CTLFLAG_RD, &stats->vfgprc, 2081 "Good Packets Received"); 2082 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 2083 CTLFLAG_RD, &stats->vfgorc, 2084 "Good Octets Received"); 2085 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 2086 CTLFLAG_RD, &stats->vfmprc, 2087 "Multicast Packets Received"); 2088 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 2089 CTLFLAG_RD, &stats->vfgptc, 2090 "Good Packets Transmitted"); 2091 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 2092 CTLFLAG_RD, &stats->vfgotc, 2093 "Good Octets Transmitted"); 2094 2095 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que", 2096 CTLFLAG_RD, NULL, 2097 "Queue Statistics (collected by SW)"); 2098 queue_list = SYSCTL_CHILDREN(queue_node); 2099 2100 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 2101 CTLFLAG_RD, &(que->irqs), 2102 "IRQs on queue"); 2103 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs", 2104 CTLFLAG_RD, &(rxr->rx_irq), 2105 "RX irqs on queue"); 2106 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 2107 CTLFLAG_RD, &(rxr->rx_packets), 2108 "RX packets"); 2109 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 2110 CTLFLAG_RD, &(rxr->rx_bytes), 2111 "RX bytes"); 2112 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 2113 CTLFLAG_RD, &(rxr->rx_discarded), 2114 "Discarded RX packets"); 2115 2116 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 2117 CTLFLAG_RD, &(txr->total_packets), 2118 "TX Packets"); 2119 2120 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc", 2121 CTLFLAG_RD, &(txr->no_desc_avail), 2122 "# of times not enough descriptors were available during TX"); 2123 } 2124 2125 /********************************************************************** 2126 * 2127 * This routine is called only when em_display_debug_stats is enabled. 2128 * This routine provides a way to take a look at important statistics 2129 * maintained by the driver and hardware. 2130 * 2131 **********************************************************************/ 2132 static void 2133 ixv_print_debug_info(struct adapter *adapter) 2134 { 2135 device_t dev = adapter->dev; 2136 struct ixgbe_hw *hw = &adapter->hw; 2137 struct ix_queue *que = adapter->queues; 2138 struct rx_ring *rxr; 2139 struct tx_ring *txr; 2140 struct lro_ctrl *lro; 2141 2142 device_printf(dev,"Error Byte Count = %u \n", 2143 IXGBE_READ_REG(hw, IXGBE_ERRBC)); 2144 2145 for (int i = 0; i < adapter->num_queues; i++, que++) { 2146 txr = que->txr; 2147 rxr = que->rxr; 2148 lro = &rxr->lro; 2149 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n", 2150 que->msix, (long)que->irqs); 2151 device_printf(dev,"RX(%d) Packets Received: %lld\n", 2152 rxr->me, (long long)rxr->rx_packets); 2153 device_printf(dev,"RX(%d) Bytes Received: %lu\n", 2154 rxr->me, (long)rxr->rx_bytes); 2155 device_printf(dev,"RX(%d) LRO Queued= %d\n", 2156 rxr->me, lro->lro_queued); 2157 device_printf(dev,"RX(%d) LRO Flushed= %d\n", 2158 rxr->me, lro->lro_flushed); 2159 device_printf(dev,"TX(%d) Packets Sent: %lu\n", 2160 txr->me, (long)txr->total_packets); 2161 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", 2162 txr->me, (long)txr->no_desc_avail); 2163 } 2164 2165 device_printf(dev,"MBX IRQ Handled: %lu\n", 2166 (long)adapter->link_irq); 2167 return; 2168 } 2169 2170 static int 2171 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) 2172 { 2173 int error, result; 2174 struct adapter *adapter; 2175 2176 result = -1; 2177 error = sysctl_handle_int(oidp, &result, 0, req); 2178 2179 if (error || !req->newptr) 2180 return (error); 2181 2182 if (result == 1) { 2183 adapter = (struct adapter *) arg1; 2184 ixv_print_debug_info(adapter); 2185 } 2186 return error; 2187 } 2188 2189