1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #endif 40 41 #include "ixgbe.h" 42 43 /********************************************************************* 44 * Driver version 45 *********************************************************************/ 46 char ixv_driver_version[] = "1.4.0"; 47 48 /********************************************************************* 49 * PCI Device ID Table 50 * 51 * Used by probe to select devices to load on 52 * Last field stores an index into ixv_strings 53 * Last entry must be all 0s 54 * 55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 56 *********************************************************************/ 57 58 static ixgbe_vendor_info_t ixv_vendor_info_array[] = 59 { 60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 64 /* required last entry */ 65 {0, 0, 0, 0, 0} 66 }; 67 68 /********************************************************************* 69 * Table of branding strings 70 *********************************************************************/ 71 72 static char *ixv_strings[] = { 73 "Intel(R) PRO/10GbE Virtual Function Network Driver" 74 }; 75 76 /********************************************************************* 77 * Function prototypes 78 *********************************************************************/ 79 static int ixv_probe(device_t); 80 static int ixv_attach(device_t); 81 static int ixv_detach(device_t); 82 static int ixv_shutdown(device_t); 83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t); 84 static void ixv_init(void *); 85 static void ixv_init_locked(struct adapter *); 86 static void ixv_stop(void *); 87 static void ixv_media_status(struct ifnet *, struct ifmediareq *); 88 static int ixv_media_change(struct ifnet *); 89 static void ixv_identify_hardware(struct adapter *); 90 static int ixv_allocate_pci_resources(struct adapter *); 91 static int ixv_allocate_msix(struct adapter *); 92 static int ixv_setup_msix(struct adapter *); 93 static void ixv_free_pci_resources(struct adapter *); 94 static void ixv_local_timer(void *); 95 static void ixv_setup_interface(device_t, struct adapter *); 96 static void ixv_config_link(struct adapter *); 97 98 static void ixv_initialize_transmit_units(struct adapter *); 99 static void ixv_initialize_receive_units(struct adapter *); 100 101 static void ixv_enable_intr(struct adapter *); 102 static void ixv_disable_intr(struct adapter *); 103 static void ixv_set_multi(struct adapter *); 104 static void ixv_update_link_status(struct adapter *); 105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); 106 static void ixv_set_ivar(struct adapter *, u8, u8, s8); 107 static void ixv_configure_ivars(struct adapter *); 108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 109 110 static void ixv_setup_vlan_support(struct adapter *); 111 static void ixv_register_vlan(void *, struct ifnet *, u16); 112 static void ixv_unregister_vlan(void *, struct ifnet *, u16); 113 114 static void ixv_save_stats(struct adapter *); 115 static void ixv_init_stats(struct adapter *); 116 static void ixv_update_stats(struct adapter *); 117 static void ixv_add_stats_sysctls(struct adapter *); 118 static void ixv_set_sysctl_value(struct adapter *, const char *, 119 const char *, int *, int); 120 121 /* The MSI/X Interrupt handlers */ 122 static void ixv_msix_que(void *); 123 static void ixv_msix_mbx(void *); 124 125 /* Deferred interrupt tasklets */ 126 static void ixv_handle_que(void *, int); 127 static void ixv_handle_mbx(void *, int); 128 129 #ifdef DEV_NETMAP 130 /* 131 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by 132 * if_ix.c. 133 */ 134 extern void ixgbe_netmap_attach(struct adapter *adapter); 135 136 #include <net/netmap.h> 137 #include <sys/selinfo.h> 138 #include <dev/netmap/netmap_kern.h> 139 #endif /* DEV_NETMAP */ 140 141 /********************************************************************* 142 * FreeBSD Device Interface Entry Points 143 *********************************************************************/ 144 145 static device_method_t ixv_methods[] = { 146 /* Device interface */ 147 DEVMETHOD(device_probe, ixv_probe), 148 DEVMETHOD(device_attach, ixv_attach), 149 DEVMETHOD(device_detach, ixv_detach), 150 DEVMETHOD(device_shutdown, ixv_shutdown), 151 DEVMETHOD_END 152 }; 153 154 static driver_t ixv_driver = { 155 "ixv", ixv_methods, sizeof(struct adapter), 156 }; 157 158 devclass_t ixv_devclass; 159 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 160 MODULE_DEPEND(ixv, pci, 1, 1, 1); 161 MODULE_DEPEND(ixv, ether, 1, 1, 1); 162 #ifdef DEV_NETMAP 163 MODULE_DEPEND(ix, netmap, 1, 1, 1); 164 #endif /* DEV_NETMAP */ 165 /* XXX depend on 'ix' ? */ 166 167 /* 168 ** TUNEABLE PARAMETERS: 169 */ 170 171 /* Number of Queues - do not exceed MSIX vectors - 1 */ 172 static int ixv_num_queues = 1; 173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 174 175 /* 176 ** AIM: Adaptive Interrupt Moderation 177 ** which means that the interrupt rate 178 ** is varied over time based on the 179 ** traffic for that interrupt vector 180 */ 181 static int ixv_enable_aim = FALSE; 182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 183 184 /* How many packets rxeof tries to clean at a time */ 185 static int ixv_rx_process_limit = 256; 186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 187 188 /* How many packets txeof tries to clean at a time */ 189 static int ixv_tx_process_limit = 256; 190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 191 192 /* Flow control setting, default to full */ 193 static int ixv_flow_control = ixgbe_fc_full; 194 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); 195 196 /* 197 * Header split: this causes the hardware to DMA 198 * the header into a seperate mbuf from the payload, 199 * it can be a performance win in some workloads, but 200 * in others it actually hurts, its off by default. 201 */ 202 static int ixv_header_split = FALSE; 203 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); 204 205 /* 206 ** Number of TX descriptors per ring, 207 ** setting higher than RX as this seems 208 ** the better performing choice. 209 */ 210 static int ixv_txd = DEFAULT_TXD; 211 TUNABLE_INT("hw.ixv.txd", &ixv_txd); 212 213 /* Number of RX descriptors per ring */ 214 static int ixv_rxd = DEFAULT_RXD; 215 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 216 217 /* 218 ** Shadow VFTA table, this is needed because 219 ** the real filter table gets cleared during 220 ** a soft reset and we need to repopulate it. 221 */ 222 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; 223 224 /********************************************************************* 225 * Device identification routine 226 * 227 * ixv_probe determines if the driver should be loaded on 228 * adapter based on PCI vendor/device id of the adapter. 229 * 230 * return BUS_PROBE_DEFAULT on success, positive on failure 231 *********************************************************************/ 232 233 static int 234 ixv_probe(device_t dev) 235 { 236 ixgbe_vendor_info_t *ent; 237 238 u16 pci_vendor_id = 0; 239 u16 pci_device_id = 0; 240 u16 pci_subvendor_id = 0; 241 u16 pci_subdevice_id = 0; 242 char adapter_name[256]; 243 244 245 pci_vendor_id = pci_get_vendor(dev); 246 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 247 return (ENXIO); 248 249 pci_device_id = pci_get_device(dev); 250 pci_subvendor_id = pci_get_subvendor(dev); 251 pci_subdevice_id = pci_get_subdevice(dev); 252 253 ent = ixv_vendor_info_array; 254 while (ent->vendor_id != 0) { 255 if ((pci_vendor_id == ent->vendor_id) && 256 (pci_device_id == ent->device_id) && 257 258 ((pci_subvendor_id == ent->subvendor_id) || 259 (ent->subvendor_id == 0)) && 260 261 ((pci_subdevice_id == ent->subdevice_id) || 262 (ent->subdevice_id == 0))) { 263 sprintf(adapter_name, "%s, Version - %s", 264 ixv_strings[ent->index], 265 ixv_driver_version); 266 device_set_desc_copy(dev, adapter_name); 267 return (BUS_PROBE_DEFAULT); 268 } 269 ent++; 270 } 271 return (ENXIO); 272 } 273 274 /********************************************************************* 275 * Device initialization routine 276 * 277 * The attach entry point is called when the driver is being loaded. 278 * This routine identifies the type of hardware, allocates all resources 279 * and initializes the hardware. 280 * 281 * return 0 on success, positive on failure 282 *********************************************************************/ 283 284 static int 285 ixv_attach(device_t dev) 286 { 287 struct adapter *adapter; 288 struct ixgbe_hw *hw; 289 int error = 0; 290 291 INIT_DEBUGOUT("ixv_attach: begin"); 292 293 /* Allocate, clear, and link in our adapter structure */ 294 adapter = device_get_softc(dev); 295 adapter->dev = adapter->osdep.dev = dev; 296 hw = &adapter->hw; 297 298 #ifdef DEV_NETMAP 299 adapter->init_locked = ixv_init_locked; 300 adapter->stop_locked = ixv_stop; 301 #endif 302 303 /* Core Lock Init*/ 304 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 305 306 /* SYSCTL APIs */ 307 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 308 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 309 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, 310 adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); 311 312 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 313 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 314 OID_AUTO, "enable_aim", CTLFLAG_RW, 315 &ixv_enable_aim, 1, "Interrupt Moderation"); 316 317 /* Set up the timer callout */ 318 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 319 320 /* Determine hardware revision */ 321 ixv_identify_hardware(adapter); 322 323 /* Do base PCI setup - map BAR0 */ 324 if (ixv_allocate_pci_resources(adapter)) { 325 device_printf(dev, "Allocation of PCI resources failed\n"); 326 error = ENXIO; 327 goto err_out; 328 } 329 330 /* Sysctls for limiting the amount of work done in the taskqueues */ 331 ixv_set_sysctl_value(adapter, "rx_processing_limit", 332 "max number of rx packets to process", 333 &adapter->rx_process_limit, ixv_rx_process_limit); 334 335 ixv_set_sysctl_value(adapter, "tx_processing_limit", 336 "max number of tx packets to process", 337 &adapter->tx_process_limit, ixv_tx_process_limit); 338 339 /* Do descriptor calc and sanity checks */ 340 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 341 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 342 device_printf(dev, "TXD config issue, using default!\n"); 343 adapter->num_tx_desc = DEFAULT_TXD; 344 } else 345 adapter->num_tx_desc = ixv_txd; 346 347 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 348 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 349 device_printf(dev, "RXD config issue, using default!\n"); 350 adapter->num_rx_desc = DEFAULT_RXD; 351 } else 352 adapter->num_rx_desc = ixv_rxd; 353 354 /* Allocate our TX/RX Queues */ 355 if (ixgbe_allocate_queues(adapter)) { 356 error = ENOMEM; 357 goto err_out; 358 } 359 360 /* 361 ** Initialize the shared code: its 362 ** at this point the mac type is set. 363 */ 364 error = ixgbe_init_shared_code(hw); 365 if (error) { 366 device_printf(dev,"Shared Code Initialization Failure\n"); 367 error = EIO; 368 goto err_late; 369 } 370 371 /* Setup the mailbox */ 372 ixgbe_init_mbx_params_vf(hw); 373 374 ixgbe_reset_hw(hw); 375 376 /* Get the Mailbox API version */ 377 device_printf(dev,"MBX API %d negotiation: %d\n", 378 ixgbe_mbox_api_11, 379 ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11)); 380 381 error = ixgbe_init_hw(hw); 382 if (error) { 383 device_printf(dev,"Hardware Initialization Failure\n"); 384 error = EIO; 385 goto err_late; 386 } 387 388 error = ixv_allocate_msix(adapter); 389 if (error) 390 goto err_late; 391 392 /* If no mac address was assigned, make a random one */ 393 if (!ixv_check_ether_addr(hw->mac.addr)) { 394 u8 addr[ETHER_ADDR_LEN]; 395 arc4rand(&addr, sizeof(addr), 0); 396 addr[0] &= 0xFE; 397 addr[0] |= 0x02; 398 bcopy(addr, hw->mac.addr, sizeof(addr)); 399 } 400 401 /* Setup OS specific network interface */ 402 ixv_setup_interface(dev, adapter); 403 404 /* Do the stats setup */ 405 ixv_save_stats(adapter); 406 ixv_init_stats(adapter); 407 ixv_add_stats_sysctls(adapter); 408 409 /* Register for VLAN events */ 410 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 411 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 412 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 413 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 414 415 #ifdef DEV_NETMAP 416 ixgbe_netmap_attach(adapter); 417 #endif /* DEV_NETMAP */ 418 INIT_DEBUGOUT("ixv_attach: end"); 419 return (0); 420 421 err_late: 422 ixgbe_free_transmit_structures(adapter); 423 ixgbe_free_receive_structures(adapter); 424 err_out: 425 ixv_free_pci_resources(adapter); 426 return (error); 427 428 } 429 430 /********************************************************************* 431 * Device removal routine 432 * 433 * The detach entry point is called when the driver is being removed. 434 * This routine stops the adapter and deallocates all the resources 435 * that were allocated for driver operation. 436 * 437 * return 0 on success, positive on failure 438 *********************************************************************/ 439 440 static int 441 ixv_detach(device_t dev) 442 { 443 struct adapter *adapter = device_get_softc(dev); 444 struct ix_queue *que = adapter->queues; 445 446 INIT_DEBUGOUT("ixv_detach: begin"); 447 448 /* Make sure VLANS are not using driver */ 449 if (adapter->ifp->if_vlantrunk != NULL) { 450 device_printf(dev,"Vlan in use, detach first\n"); 451 return (EBUSY); 452 } 453 454 IXGBE_CORE_LOCK(adapter); 455 ixv_stop(adapter); 456 IXGBE_CORE_UNLOCK(adapter); 457 458 for (int i = 0; i < adapter->num_queues; i++, que++) { 459 if (que->tq) { 460 struct tx_ring *txr = que->txr; 461 taskqueue_drain(que->tq, &txr->txq_task); 462 taskqueue_drain(que->tq, &que->que_task); 463 taskqueue_free(que->tq); 464 } 465 } 466 467 /* Drain the Mailbox(link) queue */ 468 if (adapter->tq) { 469 taskqueue_drain(adapter->tq, &adapter->link_task); 470 taskqueue_free(adapter->tq); 471 } 472 473 /* Unregister VLAN events */ 474 if (adapter->vlan_attach != NULL) 475 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 476 if (adapter->vlan_detach != NULL) 477 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 478 479 ether_ifdetach(adapter->ifp); 480 callout_drain(&adapter->timer); 481 #ifdef DEV_NETMAP 482 netmap_detach(adapter->ifp); 483 #endif /* DEV_NETMAP */ 484 ixv_free_pci_resources(adapter); 485 bus_generic_detach(dev); 486 if_free(adapter->ifp); 487 488 ixgbe_free_transmit_structures(adapter); 489 ixgbe_free_receive_structures(adapter); 490 491 IXGBE_CORE_LOCK_DESTROY(adapter); 492 return (0); 493 } 494 495 /********************************************************************* 496 * 497 * Shutdown entry point 498 * 499 **********************************************************************/ 500 static int 501 ixv_shutdown(device_t dev) 502 { 503 struct adapter *adapter = device_get_softc(dev); 504 IXGBE_CORE_LOCK(adapter); 505 ixv_stop(adapter); 506 IXGBE_CORE_UNLOCK(adapter); 507 return (0); 508 } 509 510 511 /********************************************************************* 512 * Ioctl entry point 513 * 514 * ixv_ioctl is called when the user wants to configure the 515 * interface. 516 * 517 * return 0 on success, positive on failure 518 **********************************************************************/ 519 520 static int 521 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 522 { 523 struct adapter *adapter = ifp->if_softc; 524 struct ifreq *ifr = (struct ifreq *) data; 525 #if defined(INET) || defined(INET6) 526 struct ifaddr *ifa = (struct ifaddr *) data; 527 bool avoid_reset = FALSE; 528 #endif 529 int error = 0; 530 531 switch (command) { 532 533 case SIOCSIFADDR: 534 #ifdef INET 535 if (ifa->ifa_addr->sa_family == AF_INET) 536 avoid_reset = TRUE; 537 #endif 538 #ifdef INET6 539 if (ifa->ifa_addr->sa_family == AF_INET6) 540 avoid_reset = TRUE; 541 #endif 542 #if defined(INET) || defined(INET6) 543 /* 544 ** Calling init results in link renegotiation, 545 ** so we avoid doing it when possible. 546 */ 547 if (avoid_reset) { 548 ifp->if_flags |= IFF_UP; 549 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 550 ixv_init(adapter); 551 if (!(ifp->if_flags & IFF_NOARP)) 552 arp_ifinit(ifp, ifa); 553 } else 554 error = ether_ioctl(ifp, command, data); 555 break; 556 #endif 557 case SIOCSIFMTU: 558 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 559 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) { 560 error = EINVAL; 561 } else { 562 IXGBE_CORE_LOCK(adapter); 563 ifp->if_mtu = ifr->ifr_mtu; 564 adapter->max_frame_size = 565 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 566 ixv_init_locked(adapter); 567 IXGBE_CORE_UNLOCK(adapter); 568 } 569 break; 570 case SIOCSIFFLAGS: 571 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 572 IXGBE_CORE_LOCK(adapter); 573 if (ifp->if_flags & IFF_UP) { 574 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 575 ixv_init_locked(adapter); 576 } else 577 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 578 ixv_stop(adapter); 579 adapter->if_flags = ifp->if_flags; 580 IXGBE_CORE_UNLOCK(adapter); 581 break; 582 case SIOCADDMULTI: 583 case SIOCDELMULTI: 584 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 585 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 586 IXGBE_CORE_LOCK(adapter); 587 ixv_disable_intr(adapter); 588 ixv_set_multi(adapter); 589 ixv_enable_intr(adapter); 590 IXGBE_CORE_UNLOCK(adapter); 591 } 592 break; 593 case SIOCSIFMEDIA: 594 case SIOCGIFMEDIA: 595 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 596 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 597 break; 598 case SIOCSIFCAP: 599 { 600 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 601 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 602 if (mask & IFCAP_HWCSUM) 603 ifp->if_capenable ^= IFCAP_HWCSUM; 604 if (mask & IFCAP_TSO4) 605 ifp->if_capenable ^= IFCAP_TSO4; 606 if (mask & IFCAP_LRO) 607 ifp->if_capenable ^= IFCAP_LRO; 608 if (mask & IFCAP_VLAN_HWTAGGING) 609 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 610 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 611 IXGBE_CORE_LOCK(adapter); 612 ixv_init_locked(adapter); 613 IXGBE_CORE_UNLOCK(adapter); 614 } 615 VLAN_CAPABILITIES(ifp); 616 break; 617 } 618 619 default: 620 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 621 error = ether_ioctl(ifp, command, data); 622 break; 623 } 624 625 return (error); 626 } 627 628 /********************************************************************* 629 * Init entry point 630 * 631 * This routine is used in two ways. It is used by the stack as 632 * init entry point in network interface structure. It is also used 633 * by the driver as a hw/sw initialization routine to get to a 634 * consistent state. 635 * 636 * return 0 on success, positive on failure 637 **********************************************************************/ 638 #define IXGBE_MHADD_MFS_SHIFT 16 639 640 static void 641 ixv_init_locked(struct adapter *adapter) 642 { 643 struct ifnet *ifp = adapter->ifp; 644 device_t dev = adapter->dev; 645 struct ixgbe_hw *hw = &adapter->hw; 646 u32 mhadd, gpie; 647 648 INIT_DEBUGOUT("ixv_init: begin"); 649 mtx_assert(&adapter->core_mtx, MA_OWNED); 650 hw->adapter_stopped = FALSE; 651 ixgbe_stop_adapter(hw); 652 callout_stop(&adapter->timer); 653 654 /* reprogram the RAR[0] in case user changed it. */ 655 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 656 657 /* Get the latest mac address, User can use a LAA */ 658 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, 659 IXGBE_ETH_LENGTH_OF_ADDRESS); 660 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); 661 hw->addr_ctrl.rar_used_count = 1; 662 663 /* Prepare transmit descriptors and buffers */ 664 if (ixgbe_setup_transmit_structures(adapter)) { 665 device_printf(dev,"Could not setup transmit structures\n"); 666 ixv_stop(adapter); 667 return; 668 } 669 670 ixgbe_reset_hw(hw); 671 ixv_initialize_transmit_units(adapter); 672 673 /* Setup Multicast table */ 674 ixv_set_multi(adapter); 675 676 /* 677 ** Determine the correct mbuf pool 678 ** for doing jumbo/headersplit 679 */ 680 if (ifp->if_mtu > ETHERMTU) 681 adapter->rx_mbuf_sz = MJUMPAGESIZE; 682 else 683 adapter->rx_mbuf_sz = MCLBYTES; 684 685 /* Prepare receive descriptors and buffers */ 686 if (ixgbe_setup_receive_structures(adapter)) { 687 device_printf(dev,"Could not setup receive structures\n"); 688 ixv_stop(adapter); 689 return; 690 } 691 692 /* Configure RX settings */ 693 ixv_initialize_receive_units(adapter); 694 695 /* Enable Enhanced MSIX mode */ 696 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); 697 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME; 698 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; 699 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 700 701 /* Set the various hardware offload abilities */ 702 ifp->if_hwassist = 0; 703 if (ifp->if_capenable & IFCAP_TSO4) 704 ifp->if_hwassist |= CSUM_TSO; 705 if (ifp->if_capenable & IFCAP_TXCSUM) { 706 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 707 #if __FreeBSD_version >= 800000 708 ifp->if_hwassist |= CSUM_SCTP; 709 #endif 710 } 711 712 /* Set MTU size */ 713 if (ifp->if_mtu > ETHERMTU) { 714 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 715 mhadd &= ~IXGBE_MHADD_MFS_MASK; 716 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 717 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 718 } 719 720 /* Set up VLAN offload and filter */ 721 ixv_setup_vlan_support(adapter); 722 723 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 724 725 /* Set up MSI/X routing */ 726 ixv_configure_ivars(adapter); 727 728 /* Set up auto-mask */ 729 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); 730 731 /* Set moderation on the Link interrupt */ 732 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); 733 734 /* Stats init */ 735 ixv_init_stats(adapter); 736 737 /* Config/Enable Link */ 738 ixv_config_link(adapter); 739 740 /* And now turn on interrupts */ 741 ixv_enable_intr(adapter); 742 743 /* Now inform the stack we're ready */ 744 ifp->if_drv_flags |= IFF_DRV_RUNNING; 745 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 746 747 return; 748 } 749 750 static void 751 ixv_init(void *arg) 752 { 753 struct adapter *adapter = arg; 754 755 IXGBE_CORE_LOCK(adapter); 756 ixv_init_locked(adapter); 757 IXGBE_CORE_UNLOCK(adapter); 758 return; 759 } 760 761 762 /* 763 ** 764 ** MSIX Interrupt Handlers and Tasklets 765 ** 766 */ 767 768 static inline void 769 ixv_enable_queue(struct adapter *adapter, u32 vector) 770 { 771 struct ixgbe_hw *hw = &adapter->hw; 772 u32 queue = 1 << vector; 773 u32 mask; 774 775 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 776 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 777 } 778 779 static inline void 780 ixv_disable_queue(struct adapter *adapter, u32 vector) 781 { 782 struct ixgbe_hw *hw = &adapter->hw; 783 u64 queue = (u64)(1 << vector); 784 u32 mask; 785 786 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 787 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 788 } 789 790 static inline void 791 ixv_rearm_queues(struct adapter *adapter, u64 queues) 792 { 793 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 794 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 795 } 796 797 798 static void 799 ixv_handle_que(void *context, int pending) 800 { 801 struct ix_queue *que = context; 802 struct adapter *adapter = que->adapter; 803 struct tx_ring *txr = que->txr; 804 struct ifnet *ifp = adapter->ifp; 805 bool more; 806 807 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 808 more = ixgbe_rxeof(que); 809 IXGBE_TX_LOCK(txr); 810 ixgbe_txeof(txr); 811 #if __FreeBSD_version >= 800000 812 if (!drbr_empty(ifp, txr->br)) 813 ixgbe_mq_start_locked(ifp, txr); 814 #else 815 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 816 ixgbe_start_locked(txr, ifp); 817 #endif 818 IXGBE_TX_UNLOCK(txr); 819 if (more) { 820 taskqueue_enqueue(que->tq, &que->que_task); 821 return; 822 } 823 } 824 825 /* Reenable this interrupt */ 826 ixv_enable_queue(adapter, que->msix); 827 return; 828 } 829 830 /********************************************************************* 831 * 832 * MSI Queue Interrupt Service routine 833 * 834 **********************************************************************/ 835 void 836 ixv_msix_que(void *arg) 837 { 838 struct ix_queue *que = arg; 839 struct adapter *adapter = que->adapter; 840 struct ifnet *ifp = adapter->ifp; 841 struct tx_ring *txr = que->txr; 842 struct rx_ring *rxr = que->rxr; 843 bool more; 844 u32 newitr = 0; 845 846 ixv_disable_queue(adapter, que->msix); 847 ++que->irqs; 848 849 more = ixgbe_rxeof(que); 850 851 IXGBE_TX_LOCK(txr); 852 ixgbe_txeof(txr); 853 /* 854 ** Make certain that if the stack 855 ** has anything queued the task gets 856 ** scheduled to handle it. 857 */ 858 #ifdef IXGBE_LEGACY_TX 859 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) 860 ixgbe_start_locked(txr, ifp); 861 #else 862 if (!drbr_empty(adapter->ifp, txr->br)) 863 ixgbe_mq_start_locked(ifp, txr); 864 #endif 865 IXGBE_TX_UNLOCK(txr); 866 867 /* Do AIM now? */ 868 869 if (ixv_enable_aim == FALSE) 870 goto no_calc; 871 /* 872 ** Do Adaptive Interrupt Moderation: 873 ** - Write out last calculated setting 874 ** - Calculate based on average size over 875 ** the last interval. 876 */ 877 if (que->eitr_setting) 878 IXGBE_WRITE_REG(&adapter->hw, 879 IXGBE_VTEITR(que->msix), 880 que->eitr_setting); 881 882 que->eitr_setting = 0; 883 884 /* Idle, do nothing */ 885 if ((txr->bytes == 0) && (rxr->bytes == 0)) 886 goto no_calc; 887 888 if ((txr->bytes) && (txr->packets)) 889 newitr = txr->bytes/txr->packets; 890 if ((rxr->bytes) && (rxr->packets)) 891 newitr = max(newitr, 892 (rxr->bytes / rxr->packets)); 893 newitr += 24; /* account for hardware frame, crc */ 894 895 /* set an upper boundary */ 896 newitr = min(newitr, 3000); 897 898 /* Be nice to the mid range */ 899 if ((newitr > 300) && (newitr < 1200)) 900 newitr = (newitr / 3); 901 else 902 newitr = (newitr / 2); 903 904 newitr |= newitr << 16; 905 906 /* save for next interrupt */ 907 que->eitr_setting = newitr; 908 909 /* Reset state */ 910 txr->bytes = 0; 911 txr->packets = 0; 912 rxr->bytes = 0; 913 rxr->packets = 0; 914 915 no_calc: 916 if (more) 917 taskqueue_enqueue(que->tq, &que->que_task); 918 else /* Reenable this interrupt */ 919 ixv_enable_queue(adapter, que->msix); 920 return; 921 } 922 923 static void 924 ixv_msix_mbx(void *arg) 925 { 926 struct adapter *adapter = arg; 927 struct ixgbe_hw *hw = &adapter->hw; 928 u32 reg; 929 930 ++adapter->link_irq; 931 932 /* First get the cause */ 933 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); 934 /* Clear interrupt with write */ 935 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); 936 937 /* Link status change */ 938 if (reg & IXGBE_EICR_LSC) 939 taskqueue_enqueue(adapter->tq, &adapter->link_task); 940 941 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); 942 return; 943 } 944 945 /********************************************************************* 946 * 947 * Media Ioctl callback 948 * 949 * This routine is called whenever the user queries the status of 950 * the interface using ifconfig. 951 * 952 **********************************************************************/ 953 static void 954 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 955 { 956 struct adapter *adapter = ifp->if_softc; 957 958 INIT_DEBUGOUT("ixv_media_status: begin"); 959 IXGBE_CORE_LOCK(adapter); 960 ixv_update_link_status(adapter); 961 962 ifmr->ifm_status = IFM_AVALID; 963 ifmr->ifm_active = IFM_ETHER; 964 965 if (!adapter->link_active) { 966 IXGBE_CORE_UNLOCK(adapter); 967 return; 968 } 969 970 ifmr->ifm_status |= IFM_ACTIVE; 971 972 switch (adapter->link_speed) { 973 case IXGBE_LINK_SPEED_1GB_FULL: 974 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 975 break; 976 case IXGBE_LINK_SPEED_10GB_FULL: 977 ifmr->ifm_active |= IFM_FDX; 978 break; 979 } 980 981 IXGBE_CORE_UNLOCK(adapter); 982 983 return; 984 } 985 986 /********************************************************************* 987 * 988 * Media Ioctl callback 989 * 990 * This routine is called when the user changes speed/duplex using 991 * media/mediopt option with ifconfig. 992 * 993 **********************************************************************/ 994 static int 995 ixv_media_change(struct ifnet * ifp) 996 { 997 struct adapter *adapter = ifp->if_softc; 998 struct ifmedia *ifm = &adapter->media; 999 1000 INIT_DEBUGOUT("ixv_media_change: begin"); 1001 1002 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1003 return (EINVAL); 1004 1005 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1006 case IFM_AUTO: 1007 break; 1008 default: 1009 device_printf(adapter->dev, "Only auto media type\n"); 1010 return (EINVAL); 1011 } 1012 1013 return (0); 1014 } 1015 1016 1017 /********************************************************************* 1018 * Multicast Update 1019 * 1020 * This routine is called whenever multicast address list is updated. 1021 * 1022 **********************************************************************/ 1023 #define IXGBE_RAR_ENTRIES 16 1024 1025 static void 1026 ixv_set_multi(struct adapter *adapter) 1027 { 1028 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 1029 u8 *update_ptr; 1030 struct ifmultiaddr *ifma; 1031 int mcnt = 0; 1032 struct ifnet *ifp = adapter->ifp; 1033 1034 IOCTL_DEBUGOUT("ixv_set_multi: begin"); 1035 1036 #if __FreeBSD_version < 800000 1037 IF_ADDR_LOCK(ifp); 1038 #else 1039 if_maddr_rlock(ifp); 1040 #endif 1041 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1042 if (ifma->ifma_addr->sa_family != AF_LINK) 1043 continue; 1044 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1045 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 1046 IXGBE_ETH_LENGTH_OF_ADDRESS); 1047 mcnt++; 1048 } 1049 #if __FreeBSD_version < 800000 1050 IF_ADDR_UNLOCK(ifp); 1051 #else 1052 if_maddr_runlock(ifp); 1053 #endif 1054 1055 update_ptr = mta; 1056 1057 ixgbe_update_mc_addr_list(&adapter->hw, 1058 update_ptr, mcnt, ixv_mc_array_itr, TRUE); 1059 1060 return; 1061 } 1062 1063 /* 1064 * This is an iterator function now needed by the multicast 1065 * shared code. It simply feeds the shared code routine the 1066 * addresses in the array of ixv_set_multi() one by one. 1067 */ 1068 static u8 * 1069 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1070 { 1071 u8 *addr = *update_ptr; 1072 u8 *newptr; 1073 *vmdq = 0; 1074 1075 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 1076 *update_ptr = newptr; 1077 return addr; 1078 } 1079 1080 /********************************************************************* 1081 * Timer routine 1082 * 1083 * This routine checks for link status,updates statistics, 1084 * and runs the watchdog check. 1085 * 1086 **********************************************************************/ 1087 1088 static void 1089 ixv_local_timer(void *arg) 1090 { 1091 struct adapter *adapter = arg; 1092 device_t dev = adapter->dev; 1093 struct ix_queue *que = adapter->queues; 1094 u64 queues = 0; 1095 int hung = 0; 1096 1097 mtx_assert(&adapter->core_mtx, MA_OWNED); 1098 1099 ixv_update_link_status(adapter); 1100 1101 /* Stats Update */ 1102 ixv_update_stats(adapter); 1103 1104 /* 1105 ** Check the TX queues status 1106 ** - mark hung queues so we don't schedule on them 1107 ** - watchdog only if all queues show hung 1108 */ 1109 for (int i = 0; i < adapter->num_queues; i++, que++) { 1110 /* Keep track of queues with work for soft irq */ 1111 if (que->txr->busy) 1112 queues |= ((u64)1 << que->me); 1113 /* 1114 ** Each time txeof runs without cleaning, but there 1115 ** are uncleaned descriptors it increments busy. If 1116 ** we get to the MAX we declare it hung. 1117 */ 1118 if (que->busy == IXGBE_QUEUE_HUNG) { 1119 ++hung; 1120 /* Mark the queue as inactive */ 1121 adapter->active_queues &= ~((u64)1 << que->me); 1122 continue; 1123 } else { 1124 /* Check if we've come back from hung */ 1125 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1126 adapter->active_queues |= ((u64)1 << que->me); 1127 } 1128 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1129 device_printf(dev,"Warning queue %d " 1130 "appears to be hung!\n", i); 1131 que->txr->busy = IXGBE_QUEUE_HUNG; 1132 ++hung; 1133 } 1134 1135 } 1136 1137 /* Only truely watchdog if all queues show hung */ 1138 if (hung == adapter->num_queues) 1139 goto watchdog; 1140 else if (queues != 0) { /* Force an IRQ on queues with work */ 1141 ixv_rearm_queues(adapter, queues); 1142 } 1143 1144 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1145 return; 1146 1147 watchdog: 1148 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1149 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1150 adapter->watchdog_events++; 1151 ixv_init_locked(adapter); 1152 } 1153 1154 /* 1155 ** Note: this routine updates the OS on the link state 1156 ** the real check of the hardware only happens with 1157 ** a link interrupt. 1158 */ 1159 static void 1160 ixv_update_link_status(struct adapter *adapter) 1161 { 1162 struct ifnet *ifp = adapter->ifp; 1163 device_t dev = adapter->dev; 1164 1165 if (adapter->link_up){ 1166 if (adapter->link_active == FALSE) { 1167 if (bootverbose) 1168 device_printf(dev,"Link is up %d Gbps %s \n", 1169 ((adapter->link_speed == 128)? 10:1), 1170 "Full Duplex"); 1171 adapter->link_active = TRUE; 1172 if_link_state_change(ifp, LINK_STATE_UP); 1173 } 1174 } else { /* Link down */ 1175 if (adapter->link_active == TRUE) { 1176 if (bootverbose) 1177 device_printf(dev,"Link is Down\n"); 1178 if_link_state_change(ifp, LINK_STATE_DOWN); 1179 adapter->link_active = FALSE; 1180 } 1181 } 1182 1183 return; 1184 } 1185 1186 1187 /********************************************************************* 1188 * 1189 * This routine disables all traffic on the adapter by issuing a 1190 * global reset on the MAC and deallocates TX/RX buffers. 1191 * 1192 **********************************************************************/ 1193 1194 static void 1195 ixv_stop(void *arg) 1196 { 1197 struct ifnet *ifp; 1198 struct adapter *adapter = arg; 1199 struct ixgbe_hw *hw = &adapter->hw; 1200 ifp = adapter->ifp; 1201 1202 mtx_assert(&adapter->core_mtx, MA_OWNED); 1203 1204 INIT_DEBUGOUT("ixv_stop: begin\n"); 1205 ixv_disable_intr(adapter); 1206 1207 /* Tell the stack that the interface is no longer active */ 1208 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1209 1210 ixgbe_reset_hw(hw); 1211 adapter->hw.adapter_stopped = FALSE; 1212 ixgbe_stop_adapter(hw); 1213 callout_stop(&adapter->timer); 1214 1215 /* reprogram the RAR[0] in case user changed it. */ 1216 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1217 1218 return; 1219 } 1220 1221 1222 /********************************************************************* 1223 * 1224 * Determine hardware revision. 1225 * 1226 **********************************************************************/ 1227 static void 1228 ixv_identify_hardware(struct adapter *adapter) 1229 { 1230 device_t dev = adapter->dev; 1231 struct ixgbe_hw *hw = &adapter->hw; 1232 1233 /* 1234 ** Make sure BUSMASTER is set, on a VM under 1235 ** KVM it may not be and will break things. 1236 */ 1237 pci_enable_busmaster(dev); 1238 1239 /* Save off the information about this board */ 1240 hw->vendor_id = pci_get_vendor(dev); 1241 hw->device_id = pci_get_device(dev); 1242 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 1243 hw->subsystem_vendor_id = 1244 pci_read_config(dev, PCIR_SUBVEND_0, 2); 1245 hw->subsystem_device_id = 1246 pci_read_config(dev, PCIR_SUBDEV_0, 2); 1247 1248 /* We need this to determine device-specific things */ 1249 ixgbe_set_mac_type(hw); 1250 1251 /* Set the right number of segments */ 1252 adapter->num_segs = IXGBE_82599_SCATTER; 1253 1254 return; 1255 } 1256 1257 /********************************************************************* 1258 * 1259 * Setup MSIX Interrupt resources and handlers 1260 * 1261 **********************************************************************/ 1262 static int 1263 ixv_allocate_msix(struct adapter *adapter) 1264 { 1265 device_t dev = adapter->dev; 1266 struct ix_queue *que = adapter->queues; 1267 struct tx_ring *txr = adapter->tx_rings; 1268 int error, rid, vector = 0; 1269 1270 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 1271 rid = vector + 1; 1272 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1273 RF_SHAREABLE | RF_ACTIVE); 1274 if (que->res == NULL) { 1275 device_printf(dev,"Unable to allocate" 1276 " bus resource: que interrupt [%d]\n", vector); 1277 return (ENXIO); 1278 } 1279 /* Set the handler function */ 1280 error = bus_setup_intr(dev, que->res, 1281 INTR_TYPE_NET | INTR_MPSAFE, NULL, 1282 ixv_msix_que, que, &que->tag); 1283 if (error) { 1284 que->res = NULL; 1285 device_printf(dev, "Failed to register QUE handler"); 1286 return (error); 1287 } 1288 #if __FreeBSD_version >= 800504 1289 bus_describe_intr(dev, que->res, que->tag, "que %d", i); 1290 #endif 1291 que->msix = vector; 1292 adapter->active_queues |= (u64)(1 << que->msix); 1293 /* 1294 ** Bind the msix vector, and thus the 1295 ** ring to the corresponding cpu. 1296 */ 1297 if (adapter->num_queues > 1) 1298 bus_bind_intr(dev, que->res, i); 1299 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 1300 TASK_INIT(&que->que_task, 0, ixv_handle_que, que); 1301 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, 1302 taskqueue_thread_enqueue, &que->tq); 1303 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", 1304 device_get_nameunit(adapter->dev)); 1305 } 1306 1307 /* and Mailbox */ 1308 rid = vector + 1; 1309 adapter->res = bus_alloc_resource_any(dev, 1310 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 1311 if (!adapter->res) { 1312 device_printf(dev,"Unable to allocate" 1313 " bus resource: MBX interrupt [%d]\n", rid); 1314 return (ENXIO); 1315 } 1316 /* Set the mbx handler function */ 1317 error = bus_setup_intr(dev, adapter->res, 1318 INTR_TYPE_NET | INTR_MPSAFE, NULL, 1319 ixv_msix_mbx, adapter, &adapter->tag); 1320 if (error) { 1321 adapter->res = NULL; 1322 device_printf(dev, "Failed to register LINK handler"); 1323 return (error); 1324 } 1325 #if __FreeBSD_version >= 800504 1326 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx"); 1327 #endif 1328 adapter->vector = vector; 1329 /* Tasklets for Mailbox */ 1330 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter); 1331 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, 1332 taskqueue_thread_enqueue, &adapter->tq); 1333 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", 1334 device_get_nameunit(adapter->dev)); 1335 /* 1336 ** Due to a broken design QEMU will fail to properly 1337 ** enable the guest for MSIX unless the vectors in 1338 ** the table are all set up, so we must rewrite the 1339 ** ENABLE in the MSIX control register again at this 1340 ** point to cause it to successfully initialize us. 1341 */ 1342 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { 1343 int msix_ctrl; 1344 pci_find_cap(dev, PCIY_MSIX, &rid); 1345 rid += PCIR_MSIX_CTRL; 1346 msix_ctrl = pci_read_config(dev, rid, 2); 1347 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1348 pci_write_config(dev, rid, msix_ctrl, 2); 1349 } 1350 1351 return (0); 1352 } 1353 1354 /* 1355 * Setup MSIX resources, note that the VF 1356 * device MUST use MSIX, there is no fallback. 1357 */ 1358 static int 1359 ixv_setup_msix(struct adapter *adapter) 1360 { 1361 device_t dev = adapter->dev; 1362 int rid, want, msgs; 1363 1364 1365 /* Must have at least 2 MSIX vectors */ 1366 msgs = pci_msix_count(dev); 1367 if (msgs < 2) 1368 goto out; 1369 rid = PCIR_BAR(3); 1370 adapter->msix_mem = bus_alloc_resource_any(dev, 1371 SYS_RES_MEMORY, &rid, RF_ACTIVE); 1372 if (adapter->msix_mem == NULL) { 1373 device_printf(adapter->dev, 1374 "Unable to map MSIX table \n"); 1375 goto out; 1376 } 1377 1378 /* 1379 ** Want vectors for the queues, 1380 ** plus an additional for mailbox. 1381 */ 1382 want = adapter->num_queues + 1; 1383 if (want > msgs) { 1384 want = msgs; 1385 adapter->num_queues = msgs - 1; 1386 } else 1387 msgs = want; 1388 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 1389 device_printf(adapter->dev, 1390 "Using MSIX interrupts with %d vectors\n", want); 1391 return (want); 1392 } 1393 /* Release in case alloc was insufficient */ 1394 pci_release_msi(dev); 1395 out: 1396 if (adapter->msix_mem != NULL) { 1397 bus_release_resource(dev, SYS_RES_MEMORY, 1398 rid, adapter->msix_mem); 1399 adapter->msix_mem = NULL; 1400 } 1401 device_printf(adapter->dev,"MSIX config error\n"); 1402 return (ENXIO); 1403 } 1404 1405 1406 static int 1407 ixv_allocate_pci_resources(struct adapter *adapter) 1408 { 1409 int rid; 1410 device_t dev = adapter->dev; 1411 1412 rid = PCIR_BAR(0); 1413 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1414 &rid, RF_ACTIVE); 1415 1416 if (!(adapter->pci_mem)) { 1417 device_printf(dev,"Unable to allocate bus resource: memory\n"); 1418 return (ENXIO); 1419 } 1420 1421 adapter->osdep.mem_bus_space_tag = 1422 rman_get_bustag(adapter->pci_mem); 1423 adapter->osdep.mem_bus_space_handle = 1424 rman_get_bushandle(adapter->pci_mem); 1425 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; 1426 1427 /* Pick up the tuneable queues */ 1428 adapter->num_queues = ixv_num_queues; 1429 1430 adapter->hw.back = &adapter->osdep; 1431 1432 /* 1433 ** Now setup MSI/X, should 1434 ** return us the number of 1435 ** configured vectors. 1436 */ 1437 adapter->msix = ixv_setup_msix(adapter); 1438 if (adapter->msix == ENXIO) 1439 return (ENXIO); 1440 else 1441 return (0); 1442 } 1443 1444 static void 1445 ixv_free_pci_resources(struct adapter * adapter) 1446 { 1447 struct ix_queue *que = adapter->queues; 1448 device_t dev = adapter->dev; 1449 int rid, memrid; 1450 1451 memrid = PCIR_BAR(MSIX_82598_BAR); 1452 1453 /* 1454 ** There is a slight possibility of a failure mode 1455 ** in attach that will result in entering this function 1456 ** before interrupt resources have been initialized, and 1457 ** in that case we do not want to execute the loops below 1458 ** We can detect this reliably by the state of the adapter 1459 ** res pointer. 1460 */ 1461 if (adapter->res == NULL) 1462 goto mem; 1463 1464 /* 1465 ** Release all msix queue resources: 1466 */ 1467 for (int i = 0; i < adapter->num_queues; i++, que++) { 1468 rid = que->msix + 1; 1469 if (que->tag != NULL) { 1470 bus_teardown_intr(dev, que->res, que->tag); 1471 que->tag = NULL; 1472 } 1473 if (que->res != NULL) 1474 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 1475 } 1476 1477 1478 /* Clean the Legacy or Link interrupt last */ 1479 if (adapter->vector) /* we are doing MSIX */ 1480 rid = adapter->vector + 1; 1481 else 1482 (adapter->msix != 0) ? (rid = 1):(rid = 0); 1483 1484 if (adapter->tag != NULL) { 1485 bus_teardown_intr(dev, adapter->res, adapter->tag); 1486 adapter->tag = NULL; 1487 } 1488 if (adapter->res != NULL) 1489 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 1490 1491 mem: 1492 if (adapter->msix) 1493 pci_release_msi(dev); 1494 1495 if (adapter->msix_mem != NULL) 1496 bus_release_resource(dev, SYS_RES_MEMORY, 1497 memrid, adapter->msix_mem); 1498 1499 if (adapter->pci_mem != NULL) 1500 bus_release_resource(dev, SYS_RES_MEMORY, 1501 PCIR_BAR(0), adapter->pci_mem); 1502 1503 return; 1504 } 1505 1506 /********************************************************************* 1507 * 1508 * Setup networking device structure and register an interface. 1509 * 1510 **********************************************************************/ 1511 static void 1512 ixv_setup_interface(device_t dev, struct adapter *adapter) 1513 { 1514 struct ifnet *ifp; 1515 1516 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1517 1518 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1519 if (ifp == NULL) 1520 panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); 1521 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1522 ifp->if_baudrate = 1000000000; 1523 ifp->if_init = ixv_init; 1524 ifp->if_softc = adapter; 1525 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1526 ifp->if_ioctl = ixv_ioctl; 1527 #if __FreeBSD_version >= 800000 1528 ifp->if_transmit = ixgbe_mq_start; 1529 ifp->if_qflush = ixgbe_qflush; 1530 #else 1531 ifp->if_start = ixgbe_start; 1532 #endif 1533 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; 1534 1535 ether_ifattach(ifp, adapter->hw.mac.addr); 1536 1537 adapter->max_frame_size = 1538 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1539 1540 /* 1541 * Tell the upper layer(s) we support long frames. 1542 */ 1543 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1544 1545 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; 1546 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1547 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING 1548 | IFCAP_VLAN_HWTSO 1549 | IFCAP_VLAN_MTU; 1550 ifp->if_capabilities |= IFCAP_LRO; 1551 ifp->if_capenable = ifp->if_capabilities; 1552 1553 /* 1554 * Specify the media types supported by this adapter and register 1555 * callbacks to update media and link information 1556 */ 1557 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, 1558 ixv_media_status); 1559 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL); 1560 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1561 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1562 1563 return; 1564 } 1565 1566 static void 1567 ixv_config_link(struct adapter *adapter) 1568 { 1569 struct ixgbe_hw *hw = &adapter->hw; 1570 u32 autoneg, err = 0; 1571 1572 if (hw->mac.ops.check_link) 1573 err = hw->mac.ops.check_link(hw, &autoneg, 1574 &adapter->link_up, FALSE); 1575 if (err) 1576 goto out; 1577 1578 if (hw->mac.ops.setup_link) 1579 err = hw->mac.ops.setup_link(hw, 1580 autoneg, adapter->link_up); 1581 out: 1582 return; 1583 } 1584 1585 1586 /********************************************************************* 1587 * 1588 * Enable transmit unit. 1589 * 1590 **********************************************************************/ 1591 static void 1592 ixv_initialize_transmit_units(struct adapter *adapter) 1593 { 1594 struct tx_ring *txr = adapter->tx_rings; 1595 struct ixgbe_hw *hw = &adapter->hw; 1596 1597 1598 for (int i = 0; i < adapter->num_queues; i++, txr++) { 1599 u64 tdba = txr->txdma.dma_paddr; 1600 u32 txctrl, txdctl; 1601 1602 /* Set WTHRESH to 8, burst writeback */ 1603 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1604 txdctl |= (8 << 16); 1605 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1606 1607 /* Set the HW Tx Head and Tail indices */ 1608 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0); 1609 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0); 1610 1611 /* Set Tx Tail register */ 1612 txr->tail = IXGBE_VFTDT(i); 1613 1614 /* Set Ring parameters */ 1615 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), 1616 (tdba & 0x00000000ffffffffULL)); 1617 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32)); 1618 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), 1619 adapter->num_tx_desc * 1620 sizeof(struct ixgbe_legacy_tx_desc)); 1621 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i)); 1622 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1623 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl); 1624 1625 /* Now enable */ 1626 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1627 txdctl |= IXGBE_TXDCTL_ENABLE; 1628 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1629 } 1630 1631 return; 1632 } 1633 1634 1635 /********************************************************************* 1636 * 1637 * Setup receive registers and features. 1638 * 1639 **********************************************************************/ 1640 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1641 1642 static void 1643 ixv_initialize_receive_units(struct adapter *adapter) 1644 { 1645 struct rx_ring *rxr = adapter->rx_rings; 1646 struct ixgbe_hw *hw = &adapter->hw; 1647 struct ifnet *ifp = adapter->ifp; 1648 u32 bufsz, rxcsum, psrtype; 1649 int max_frame; 1650 1651 if (ifp->if_mtu > ETHERMTU) 1652 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1653 else 1654 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1655 1656 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1657 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1658 IXGBE_PSRTYPE_L2HDR; 1659 1660 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1661 1662 /* Tell PF our expected packet-size */ 1663 max_frame = ifp->if_mtu + IXGBE_MTU_HDR; 1664 ixgbevf_rlpml_set_vf(hw, max_frame); 1665 1666 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1667 u64 rdba = rxr->rxdma.dma_paddr; 1668 u32 reg, rxdctl; 1669 1670 /* Disable the queue */ 1671 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1672 rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME); 1673 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1674 for (int j = 0; j < 10; j++) { 1675 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1676 IXGBE_RXDCTL_ENABLE) 1677 msec_delay(1); 1678 else 1679 break; 1680 } 1681 wmb(); 1682 /* Setup the Base and Length of the Rx Descriptor Ring */ 1683 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), 1684 (rdba & 0x00000000ffffffffULL)); 1685 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), 1686 (rdba >> 32)); 1687 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), 1688 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1689 1690 /* Reset the ring indices */ 1691 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1692 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1693 1694 /* Set up the SRRCTL register */ 1695 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); 1696 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1697 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1698 reg |= bufsz; 1699 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1700 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg); 1701 1702 /* Capture Rx Tail index */ 1703 rxr->tail = IXGBE_VFRDT(rxr->me); 1704 1705 /* Do the queue enabling last */ 1706 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1707 rxdctl |= IXGBE_RXDCTL_ENABLE; 1708 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1709 for (int k = 0; k < 10; k++) { 1710 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1711 IXGBE_RXDCTL_ENABLE) 1712 break; 1713 else 1714 msec_delay(1); 1715 } 1716 wmb(); 1717 1718 /* Set the Tail Pointer */ 1719 #ifdef DEV_NETMAP 1720 /* 1721 * In netmap mode, we must preserve the buffers made 1722 * available to userspace before the if_init() 1723 * (this is true by default on the TX side, because 1724 * init makes all buffers available to userspace). 1725 * 1726 * netmap_reset() and the device specific routines 1727 * (e.g. ixgbe_setup_receive_rings()) map these 1728 * buffers at the end of the NIC ring, so here we 1729 * must set the RDT (tail) register to make sure 1730 * they are not overwritten. 1731 * 1732 * In this driver the NIC ring starts at RDH = 0, 1733 * RDT points to the last slot available for reception (?), 1734 * so RDT = num_rx_desc - 1 means the whole ring is available. 1735 */ 1736 if (ifp->if_capenable & IFCAP_NETMAP) { 1737 struct netmap_adapter *na = NA(adapter->ifp); 1738 struct netmap_kring *kring = &na->rx_rings[i]; 1739 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1740 1741 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1742 } else 1743 #endif /* DEV_NETMAP */ 1744 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1745 adapter->num_rx_desc - 1); 1746 } 1747 1748 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1749 1750 if (ifp->if_capenable & IFCAP_RXCSUM) 1751 rxcsum |= IXGBE_RXCSUM_PCSD; 1752 1753 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 1754 rxcsum |= IXGBE_RXCSUM_IPPCSE; 1755 1756 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1757 1758 return; 1759 } 1760 1761 static void 1762 ixv_setup_vlan_support(struct adapter *adapter) 1763 { 1764 struct ixgbe_hw *hw = &adapter->hw; 1765 u32 ctrl, vid, vfta, retry; 1766 1767 1768 /* 1769 ** We get here thru init_locked, meaning 1770 ** a soft reset, this has already cleared 1771 ** the VFTA and other state, so if there 1772 ** have been no vlan's registered do nothing. 1773 */ 1774 if (adapter->num_vlans == 0) 1775 return; 1776 1777 /* Enable the queues */ 1778 for (int i = 0; i < adapter->num_queues; i++) { 1779 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1780 ctrl |= IXGBE_RXDCTL_VME; 1781 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); 1782 } 1783 1784 /* 1785 ** A soft reset zero's out the VFTA, so 1786 ** we need to repopulate it now. 1787 */ 1788 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 1789 if (ixv_shadow_vfta[i] == 0) 1790 continue; 1791 vfta = ixv_shadow_vfta[i]; 1792 /* 1793 ** Reconstruct the vlan id's 1794 ** based on the bits set in each 1795 ** of the array ints. 1796 */ 1797 for ( int j = 0; j < 32; j++) { 1798 retry = 0; 1799 if ((vfta & (1 << j)) == 0) 1800 continue; 1801 vid = (i * 32) + j; 1802 /* Call the shared code mailbox routine */ 1803 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) { 1804 if (++retry > 5) 1805 break; 1806 } 1807 } 1808 } 1809 } 1810 1811 /* 1812 ** This routine is run via an vlan config EVENT, 1813 ** it enables us to use the HW Filter table since 1814 ** we can get the vlan id. This just creates the 1815 ** entry in the soft version of the VFTA, init will 1816 ** repopulate the real table. 1817 */ 1818 static void 1819 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1820 { 1821 struct adapter *adapter = ifp->if_softc; 1822 u16 index, bit; 1823 1824 if (ifp->if_softc != arg) /* Not our event */ 1825 return; 1826 1827 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1828 return; 1829 1830 IXGBE_CORE_LOCK(adapter); 1831 index = (vtag >> 5) & 0x7F; 1832 bit = vtag & 0x1F; 1833 ixv_shadow_vfta[index] |= (1 << bit); 1834 ++adapter->num_vlans; 1835 /* Re-init to load the changes */ 1836 ixv_init_locked(adapter); 1837 IXGBE_CORE_UNLOCK(adapter); 1838 } 1839 1840 /* 1841 ** This routine is run via an vlan 1842 ** unconfig EVENT, remove our entry 1843 ** in the soft vfta. 1844 */ 1845 static void 1846 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1847 { 1848 struct adapter *adapter = ifp->if_softc; 1849 u16 index, bit; 1850 1851 if (ifp->if_softc != arg) 1852 return; 1853 1854 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1855 return; 1856 1857 IXGBE_CORE_LOCK(adapter); 1858 index = (vtag >> 5) & 0x7F; 1859 bit = vtag & 0x1F; 1860 ixv_shadow_vfta[index] &= ~(1 << bit); 1861 --adapter->num_vlans; 1862 /* Re-init to load the changes */ 1863 ixv_init_locked(adapter); 1864 IXGBE_CORE_UNLOCK(adapter); 1865 } 1866 1867 static void 1868 ixv_enable_intr(struct adapter *adapter) 1869 { 1870 struct ixgbe_hw *hw = &adapter->hw; 1871 struct ix_queue *que = adapter->queues; 1872 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1873 1874 1875 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1876 1877 mask = IXGBE_EIMS_ENABLE_MASK; 1878 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1879 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 1880 1881 for (int i = 0; i < adapter->num_queues; i++, que++) 1882 ixv_enable_queue(adapter, que->msix); 1883 1884 IXGBE_WRITE_FLUSH(hw); 1885 1886 return; 1887 } 1888 1889 static void 1890 ixv_disable_intr(struct adapter *adapter) 1891 { 1892 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 1893 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); 1894 IXGBE_WRITE_FLUSH(&adapter->hw); 1895 return; 1896 } 1897 1898 /* 1899 ** Setup the correct IVAR register for a particular MSIX interrupt 1900 ** - entry is the register array entry 1901 ** - vector is the MSIX vector for this queue 1902 ** - type is RX/TX/MISC 1903 */ 1904 static void 1905 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 1906 { 1907 struct ixgbe_hw *hw = &adapter->hw; 1908 u32 ivar, index; 1909 1910 vector |= IXGBE_IVAR_ALLOC_VAL; 1911 1912 if (type == -1) { /* MISC IVAR */ 1913 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 1914 ivar &= ~0xFF; 1915 ivar |= vector; 1916 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 1917 } else { /* RX/TX IVARS */ 1918 index = (16 * (entry & 1)) + (8 * type); 1919 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 1920 ivar &= ~(0xFF << index); 1921 ivar |= (vector << index); 1922 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 1923 } 1924 } 1925 1926 static void 1927 ixv_configure_ivars(struct adapter *adapter) 1928 { 1929 struct ix_queue *que = adapter->queues; 1930 1931 for (int i = 0; i < adapter->num_queues; i++, que++) { 1932 /* First the RX queue entry */ 1933 ixv_set_ivar(adapter, i, que->msix, 0); 1934 /* ... and the TX */ 1935 ixv_set_ivar(adapter, i, que->msix, 1); 1936 /* Set an initial value in EITR */ 1937 IXGBE_WRITE_REG(&adapter->hw, 1938 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT); 1939 } 1940 1941 /* For the mailbox interrupt */ 1942 ixv_set_ivar(adapter, 1, adapter->vector, -1); 1943 } 1944 1945 1946 /* 1947 ** Tasklet handler for MSIX MBX interrupts 1948 ** - do outside interrupt since it might sleep 1949 */ 1950 static void 1951 ixv_handle_mbx(void *context, int pending) 1952 { 1953 struct adapter *adapter = context; 1954 1955 ixgbe_check_link(&adapter->hw, 1956 &adapter->link_speed, &adapter->link_up, 0); 1957 ixv_update_link_status(adapter); 1958 } 1959 1960 /* 1961 ** The VF stats registers never have a truely virgin 1962 ** starting point, so this routine tries to make an 1963 ** artificial one, marking ground zero on attach as 1964 ** it were. 1965 */ 1966 static void 1967 ixv_save_stats(struct adapter *adapter) 1968 { 1969 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { 1970 adapter->stats.vf.saved_reset_vfgprc += 1971 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; 1972 adapter->stats.vf.saved_reset_vfgptc += 1973 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; 1974 adapter->stats.vf.saved_reset_vfgorc += 1975 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; 1976 adapter->stats.vf.saved_reset_vfgotc += 1977 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; 1978 adapter->stats.vf.saved_reset_vfmprc += 1979 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; 1980 } 1981 } 1982 1983 static void 1984 ixv_init_stats(struct adapter *adapter) 1985 { 1986 struct ixgbe_hw *hw = &adapter->hw; 1987 1988 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1989 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1990 adapter->stats.vf.last_vfgorc |= 1991 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1992 1993 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1994 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1995 adapter->stats.vf.last_vfgotc |= 1996 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1997 1998 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1999 2000 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 2001 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 2002 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 2003 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 2004 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 2005 } 2006 2007 #define UPDATE_STAT_32(reg, last, count) \ 2008 { \ 2009 u32 current = IXGBE_READ_REG(hw, reg); \ 2010 if (current < last) \ 2011 count += 0x100000000LL; \ 2012 last = current; \ 2013 count &= 0xFFFFFFFF00000000LL; \ 2014 count |= current; \ 2015 } 2016 2017 #define UPDATE_STAT_36(lsb, msb, last, count) \ 2018 { \ 2019 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ 2020 u64 cur_msb = IXGBE_READ_REG(hw, msb); \ 2021 u64 current = ((cur_msb << 32) | cur_lsb); \ 2022 if (current < last) \ 2023 count += 0x1000000000LL; \ 2024 last = current; \ 2025 count &= 0xFFFFFFF000000000LL; \ 2026 count |= current; \ 2027 } 2028 2029 /* 2030 ** ixv_update_stats - Update the board statistics counters. 2031 */ 2032 void 2033 ixv_update_stats(struct adapter *adapter) 2034 { 2035 struct ixgbe_hw *hw = &adapter->hw; 2036 2037 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, 2038 adapter->stats.vf.vfgprc); 2039 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, 2040 adapter->stats.vf.vfgptc); 2041 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2042 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); 2043 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2044 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); 2045 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, 2046 adapter->stats.vf.vfmprc); 2047 } 2048 2049 /* 2050 * Add statistic sysctls for the VF. 2051 */ 2052 static void 2053 ixv_add_stats_sysctls(struct adapter *adapter) 2054 { 2055 device_t dev = adapter->dev; 2056 struct ix_queue *que = &adapter->queues[0]; 2057 struct tx_ring *txr = que->txr; 2058 struct rx_ring *rxr = que->rxr; 2059 2060 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2061 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2062 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2063 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2064 2065 struct sysctl_oid *stat_node, *queue_node; 2066 struct sysctl_oid_list *stat_list, *queue_list; 2067 2068 /* Driver Statistics */ 2069 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 2070 CTLFLAG_RD, &adapter->dropped_pkts, 2071 "Driver dropped packets"); 2072 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 2073 CTLFLAG_RD, &adapter->mbuf_defrag_failed, 2074 "m_defrag() failed"); 2075 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 2076 CTLFLAG_RD, &adapter->watchdog_events, 2077 "Watchdog timeouts"); 2078 2079 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 2080 CTLFLAG_RD, NULL, 2081 "VF Statistics (read from HW registers)"); 2082 stat_list = SYSCTL_CHILDREN(stat_node); 2083 2084 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 2085 CTLFLAG_RD, &stats->vfgprc, 2086 "Good Packets Received"); 2087 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 2088 CTLFLAG_RD, &stats->vfgorc, 2089 "Good Octets Received"); 2090 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 2091 CTLFLAG_RD, &stats->vfmprc, 2092 "Multicast Packets Received"); 2093 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 2094 CTLFLAG_RD, &stats->vfgptc, 2095 "Good Packets Transmitted"); 2096 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 2097 CTLFLAG_RD, &stats->vfgotc, 2098 "Good Octets Transmitted"); 2099 2100 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que", 2101 CTLFLAG_RD, NULL, 2102 "Queue Statistics (collected by SW)"); 2103 queue_list = SYSCTL_CHILDREN(queue_node); 2104 2105 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 2106 CTLFLAG_RD, &(que->irqs), 2107 "IRQs on queue"); 2108 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs", 2109 CTLFLAG_RD, &(rxr->rx_irq), 2110 "RX irqs on queue"); 2111 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 2112 CTLFLAG_RD, &(rxr->rx_packets), 2113 "RX packets"); 2114 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 2115 CTLFLAG_RD, &(rxr->rx_bytes), 2116 "RX bytes"); 2117 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 2118 CTLFLAG_RD, &(rxr->rx_discarded), 2119 "Discarded RX packets"); 2120 2121 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 2122 CTLFLAG_RD, &(txr->total_packets), 2123 "TX Packets"); 2124 2125 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc", 2126 CTLFLAG_RD, &(txr->no_desc_avail), 2127 "# of times not enough descriptors were available during TX"); 2128 } 2129 2130 static void 2131 ixv_set_sysctl_value(struct adapter *adapter, const char *name, 2132 const char *description, int *limit, int value) 2133 { 2134 *limit = value; 2135 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 2136 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 2137 OID_AUTO, name, CTLFLAG_RW, limit, value, description); 2138 } 2139 2140 /********************************************************************** 2141 * 2142 * This routine is called only when em_display_debug_stats is enabled. 2143 * This routine provides a way to take a look at important statistics 2144 * maintained by the driver and hardware. 2145 * 2146 **********************************************************************/ 2147 static void 2148 ixv_print_debug_info(struct adapter *adapter) 2149 { 2150 device_t dev = adapter->dev; 2151 struct ixgbe_hw *hw = &adapter->hw; 2152 struct ix_queue *que = adapter->queues; 2153 struct rx_ring *rxr; 2154 struct tx_ring *txr; 2155 struct lro_ctrl *lro; 2156 2157 device_printf(dev,"Error Byte Count = %u \n", 2158 IXGBE_READ_REG(hw, IXGBE_ERRBC)); 2159 2160 for (int i = 0; i < adapter->num_queues; i++, que++) { 2161 txr = que->txr; 2162 rxr = que->rxr; 2163 lro = &rxr->lro; 2164 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n", 2165 que->msix, (long)que->irqs); 2166 device_printf(dev,"RX(%d) Packets Received: %lld\n", 2167 rxr->me, (long long)rxr->rx_packets); 2168 device_printf(dev,"RX(%d) Bytes Received: %lu\n", 2169 rxr->me, (long)rxr->rx_bytes); 2170 device_printf(dev,"RX(%d) LRO Queued= %d\n", 2171 rxr->me, lro->lro_queued); 2172 device_printf(dev,"RX(%d) LRO Flushed= %d\n", 2173 rxr->me, lro->lro_flushed); 2174 device_printf(dev,"TX(%d) Packets Sent: %lu\n", 2175 txr->me, (long)txr->total_packets); 2176 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", 2177 txr->me, (long)txr->no_desc_avail); 2178 } 2179 2180 device_printf(dev,"MBX IRQ Handled: %lu\n", 2181 (long)adapter->link_irq); 2182 return; 2183 } 2184 2185 static int 2186 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) 2187 { 2188 int error, result; 2189 struct adapter *adapter; 2190 2191 result = -1; 2192 error = sysctl_handle_int(oidp, &result, 0, req); 2193 2194 if (error || !req->newptr) 2195 return (error); 2196 2197 if (result == 1) { 2198 adapter = (struct adapter *) arg1; 2199 ixv_print_debug_info(adapter); 2200 } 2201 return error; 2202 } 2203 2204