1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #endif 40 41 #include "ixgbe.h" 42 43 /********************************************************************* 44 * Driver version 45 *********************************************************************/ 46 char ixv_driver_version[] = "1.4.6-k"; 47 48 /********************************************************************* 49 * PCI Device ID Table 50 * 51 * Used by probe to select devices to load on 52 * Last field stores an index into ixv_strings 53 * Last entry must be all 0s 54 * 55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 56 *********************************************************************/ 57 58 static ixgbe_vendor_info_t ixv_vendor_info_array[] = 59 { 60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 64 /* required last entry */ 65 {0, 0, 0, 0, 0} 66 }; 67 68 /********************************************************************* 69 * Table of branding strings 70 *********************************************************************/ 71 72 static char *ixv_strings[] = { 73 "Intel(R) PRO/10GbE Virtual Function Network Driver" 74 }; 75 76 /********************************************************************* 77 * Function prototypes 78 *********************************************************************/ 79 static int ixv_probe(device_t); 80 static int ixv_attach(device_t); 81 static int ixv_detach(device_t); 82 static int ixv_shutdown(device_t); 83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t); 84 static void ixv_init(void *); 85 static void ixv_init_locked(struct adapter *); 86 static void ixv_stop(void *); 87 static void ixv_media_status(struct ifnet *, struct ifmediareq *); 88 static int ixv_media_change(struct ifnet *); 89 static void ixv_identify_hardware(struct adapter *); 90 static int ixv_allocate_pci_resources(struct adapter *); 91 static int ixv_allocate_msix(struct adapter *); 92 static int ixv_setup_msix(struct adapter *); 93 static void ixv_free_pci_resources(struct adapter *); 94 static void ixv_local_timer(void *); 95 static void ixv_setup_interface(device_t, struct adapter *); 96 static void ixv_config_link(struct adapter *); 97 98 static void ixv_initialize_transmit_units(struct adapter *); 99 static void ixv_initialize_receive_units(struct adapter *); 100 101 static void ixv_enable_intr(struct adapter *); 102 static void ixv_disable_intr(struct adapter *); 103 static void ixv_set_multi(struct adapter *); 104 static void ixv_update_link_status(struct adapter *); 105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); 106 static void ixv_set_ivar(struct adapter *, u8, u8, s8); 107 static void ixv_configure_ivars(struct adapter *); 108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 109 110 static void ixv_setup_vlan_support(struct adapter *); 111 static void ixv_register_vlan(void *, struct ifnet *, u16); 112 static void ixv_unregister_vlan(void *, struct ifnet *, u16); 113 114 static void ixv_save_stats(struct adapter *); 115 static void ixv_init_stats(struct adapter *); 116 static void ixv_update_stats(struct adapter *); 117 static void ixv_add_stats_sysctls(struct adapter *); 118 static void ixv_set_sysctl_value(struct adapter *, const char *, 119 const char *, int *, int); 120 121 /* The MSI/X Interrupt handlers */ 122 static void ixv_msix_que(void *); 123 static void ixv_msix_mbx(void *); 124 125 /* Deferred interrupt tasklets */ 126 static void ixv_handle_que(void *, int); 127 static void ixv_handle_mbx(void *, int); 128 129 #ifdef DEV_NETMAP 130 /* 131 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by 132 * if_ix.c. 133 */ 134 extern void ixgbe_netmap_attach(struct adapter *adapter); 135 136 #include <net/netmap.h> 137 #include <sys/selinfo.h> 138 #include <dev/netmap/netmap_kern.h> 139 #endif /* DEV_NETMAP */ 140 141 /********************************************************************* 142 * FreeBSD Device Interface Entry Points 143 *********************************************************************/ 144 145 static device_method_t ixv_methods[] = { 146 /* Device interface */ 147 DEVMETHOD(device_probe, ixv_probe), 148 DEVMETHOD(device_attach, ixv_attach), 149 DEVMETHOD(device_detach, ixv_detach), 150 DEVMETHOD(device_shutdown, ixv_shutdown), 151 DEVMETHOD_END 152 }; 153 154 static driver_t ixv_driver = { 155 "ixv", ixv_methods, sizeof(struct adapter), 156 }; 157 158 devclass_t ixv_devclass; 159 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 160 MODULE_DEPEND(ixv, pci, 1, 1, 1); 161 MODULE_DEPEND(ixv, ether, 1, 1, 1); 162 #ifdef DEV_NETMAP 163 MODULE_DEPEND(ix, netmap, 1, 1, 1); 164 #endif /* DEV_NETMAP */ 165 /* XXX depend on 'ix' ? */ 166 167 /* 168 ** TUNEABLE PARAMETERS: 169 */ 170 171 /* Number of Queues - do not exceed MSIX vectors - 1 */ 172 static int ixv_num_queues = 1; 173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 174 175 /* 176 ** AIM: Adaptive Interrupt Moderation 177 ** which means that the interrupt rate 178 ** is varied over time based on the 179 ** traffic for that interrupt vector 180 */ 181 static int ixv_enable_aim = FALSE; 182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 183 184 /* How many packets rxeof tries to clean at a time */ 185 static int ixv_rx_process_limit = 256; 186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 187 188 /* How many packets txeof tries to clean at a time */ 189 static int ixv_tx_process_limit = 256; 190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 191 192 /* Flow control setting, default to full */ 193 static int ixv_flow_control = ixgbe_fc_full; 194 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); 195 196 /* 197 * Header split: this causes the hardware to DMA 198 * the header into a seperate mbuf from the payload, 199 * it can be a performance win in some workloads, but 200 * in others it actually hurts, its off by default. 201 */ 202 static int ixv_header_split = FALSE; 203 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); 204 205 /* 206 ** Number of TX descriptors per ring, 207 ** setting higher than RX as this seems 208 ** the better performing choice. 209 */ 210 static int ixv_txd = DEFAULT_TXD; 211 TUNABLE_INT("hw.ixv.txd", &ixv_txd); 212 213 /* Number of RX descriptors per ring */ 214 static int ixv_rxd = DEFAULT_RXD; 215 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 216 217 /* 218 ** Shadow VFTA table, this is needed because 219 ** the real filter table gets cleared during 220 ** a soft reset and we need to repopulate it. 221 */ 222 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; 223 224 /********************************************************************* 225 * Device identification routine 226 * 227 * ixv_probe determines if the driver should be loaded on 228 * adapter based on PCI vendor/device id of the adapter. 229 * 230 * return BUS_PROBE_DEFAULT on success, positive on failure 231 *********************************************************************/ 232 233 static int 234 ixv_probe(device_t dev) 235 { 236 ixgbe_vendor_info_t *ent; 237 238 u16 pci_vendor_id = 0; 239 u16 pci_device_id = 0; 240 u16 pci_subvendor_id = 0; 241 u16 pci_subdevice_id = 0; 242 char adapter_name[256]; 243 244 245 pci_vendor_id = pci_get_vendor(dev); 246 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 247 return (ENXIO); 248 249 pci_device_id = pci_get_device(dev); 250 pci_subvendor_id = pci_get_subvendor(dev); 251 pci_subdevice_id = pci_get_subdevice(dev); 252 253 ent = ixv_vendor_info_array; 254 while (ent->vendor_id != 0) { 255 if ((pci_vendor_id == ent->vendor_id) && 256 (pci_device_id == ent->device_id) && 257 258 ((pci_subvendor_id == ent->subvendor_id) || 259 (ent->subvendor_id == 0)) && 260 261 ((pci_subdevice_id == ent->subdevice_id) || 262 (ent->subdevice_id == 0))) { 263 sprintf(adapter_name, "%s, Version - %s", 264 ixv_strings[ent->index], 265 ixv_driver_version); 266 device_set_desc_copy(dev, adapter_name); 267 return (BUS_PROBE_DEFAULT); 268 } 269 ent++; 270 } 271 return (ENXIO); 272 } 273 274 /********************************************************************* 275 * Device initialization routine 276 * 277 * The attach entry point is called when the driver is being loaded. 278 * This routine identifies the type of hardware, allocates all resources 279 * and initializes the hardware. 280 * 281 * return 0 on success, positive on failure 282 *********************************************************************/ 283 284 static int 285 ixv_attach(device_t dev) 286 { 287 struct adapter *adapter; 288 struct ixgbe_hw *hw; 289 int error = 0; 290 291 INIT_DEBUGOUT("ixv_attach: begin"); 292 293 /* Allocate, clear, and link in our adapter structure */ 294 adapter = device_get_softc(dev); 295 adapter->dev = dev; 296 hw = &adapter->hw; 297 298 #ifdef DEV_NETMAP 299 adapter->init_locked = ixv_init_locked; 300 adapter->stop_locked = ixv_stop; 301 #endif 302 303 /* Core Lock Init*/ 304 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 305 306 /* SYSCTL APIs */ 307 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 308 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 309 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, 310 adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); 311 312 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 313 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 314 OID_AUTO, "enable_aim", CTLFLAG_RW, 315 &ixv_enable_aim, 1, "Interrupt Moderation"); 316 317 /* Set up the timer callout */ 318 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 319 320 /* Determine hardware revision */ 321 ixv_identify_hardware(adapter); 322 323 /* Do base PCI setup - map BAR0 */ 324 if (ixv_allocate_pci_resources(adapter)) { 325 device_printf(dev, "ixv_allocate_pci_resources() failed!\n"); 326 error = ENXIO; 327 goto err_out; 328 } 329 330 /* Sysctls for limiting the amount of work done in the taskqueues */ 331 ixv_set_sysctl_value(adapter, "rx_processing_limit", 332 "max number of rx packets to process", 333 &adapter->rx_process_limit, ixv_rx_process_limit); 334 335 ixv_set_sysctl_value(adapter, "tx_processing_limit", 336 "max number of tx packets to process", 337 &adapter->tx_process_limit, ixv_tx_process_limit); 338 339 /* Do descriptor calc and sanity checks */ 340 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 341 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 342 device_printf(dev, "TXD config issue, using default!\n"); 343 adapter->num_tx_desc = DEFAULT_TXD; 344 } else 345 adapter->num_tx_desc = ixv_txd; 346 347 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 348 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 349 device_printf(dev, "RXD config issue, using default!\n"); 350 adapter->num_rx_desc = DEFAULT_RXD; 351 } else 352 adapter->num_rx_desc = ixv_rxd; 353 354 /* Allocate our TX/RX Queues */ 355 if (ixgbe_allocate_queues(adapter)) { 356 device_printf(dev, "ixgbe_allocate_queues() failed!\n"); 357 error = ENOMEM; 358 goto err_out; 359 } 360 361 /* 362 ** Initialize the shared code: its 363 ** at this point the mac type is set. 364 */ 365 error = ixgbe_init_shared_code(hw); 366 if (error) { 367 device_printf(dev, "ixgbe_init_shared_code() failed!\n"); 368 error = EIO; 369 goto err_late; 370 } 371 372 /* Setup the mailbox */ 373 ixgbe_init_mbx_params_vf(hw); 374 375 /* Reset mbox api to 1.0 */ 376 error = ixgbe_reset_hw(hw); 377 if (error == IXGBE_ERR_RESET_FAILED) 378 device_printf(dev, "ixgbe_reset_hw() failure: Reset Failed!\n"); 379 else if (error) 380 device_printf(dev, "ixgbe_reset_hw() failed with error %d\n", error); 381 if (error) { 382 error = EIO; 383 goto err_late; 384 } 385 386 /* Negotiate mailbox API version */ 387 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11); 388 if (error) { 389 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error); 390 error = EIO; 391 goto err_late; 392 } 393 394 error = ixgbe_init_hw(hw); 395 if (error) { 396 device_printf(dev, "ixgbe_init_hw() failed!\n"); 397 error = EIO; 398 goto err_late; 399 } 400 401 error = ixv_allocate_msix(adapter); 402 if (error) { 403 device_printf(dev, "ixv_allocate_msix() failed!\n"); 404 goto err_late; 405 } 406 407 /* If no mac address was assigned, make a random one */ 408 if (!ixv_check_ether_addr(hw->mac.addr)) { 409 u8 addr[ETHER_ADDR_LEN]; 410 arc4rand(&addr, sizeof(addr), 0); 411 addr[0] &= 0xFE; 412 addr[0] |= 0x02; 413 bcopy(addr, hw->mac.addr, sizeof(addr)); 414 } 415 416 /* Setup OS specific network interface */ 417 ixv_setup_interface(dev, adapter); 418 419 /* Do the stats setup */ 420 ixv_save_stats(adapter); 421 ixv_init_stats(adapter); 422 ixv_add_stats_sysctls(adapter); 423 424 /* Register for VLAN events */ 425 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 426 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 427 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 428 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 429 430 #ifdef DEV_NETMAP 431 ixgbe_netmap_attach(adapter); 432 #endif /* DEV_NETMAP */ 433 INIT_DEBUGOUT("ixv_attach: end"); 434 return (0); 435 436 err_late: 437 ixgbe_free_transmit_structures(adapter); 438 ixgbe_free_receive_structures(adapter); 439 err_out: 440 ixv_free_pci_resources(adapter); 441 return (error); 442 443 } 444 445 /********************************************************************* 446 * Device removal routine 447 * 448 * The detach entry point is called when the driver is being removed. 449 * This routine stops the adapter and deallocates all the resources 450 * that were allocated for driver operation. 451 * 452 * return 0 on success, positive on failure 453 *********************************************************************/ 454 455 static int 456 ixv_detach(device_t dev) 457 { 458 struct adapter *adapter = device_get_softc(dev); 459 struct ix_queue *que = adapter->queues; 460 461 INIT_DEBUGOUT("ixv_detach: begin"); 462 463 /* Make sure VLANS are not using driver */ 464 if (adapter->ifp->if_vlantrunk != NULL) { 465 device_printf(dev, "Vlan in use, detach first\n"); 466 return (EBUSY); 467 } 468 469 IXGBE_CORE_LOCK(adapter); 470 ixv_stop(adapter); 471 IXGBE_CORE_UNLOCK(adapter); 472 473 for (int i = 0; i < adapter->num_queues; i++, que++) { 474 if (que->tq) { 475 struct tx_ring *txr = que->txr; 476 taskqueue_drain(que->tq, &txr->txq_task); 477 taskqueue_drain(que->tq, &que->que_task); 478 taskqueue_free(que->tq); 479 } 480 } 481 482 /* Drain the Mailbox(link) queue */ 483 if (adapter->tq) { 484 taskqueue_drain(adapter->tq, &adapter->link_task); 485 taskqueue_free(adapter->tq); 486 } 487 488 /* Unregister VLAN events */ 489 if (adapter->vlan_attach != NULL) 490 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 491 if (adapter->vlan_detach != NULL) 492 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 493 494 ether_ifdetach(adapter->ifp); 495 callout_drain(&adapter->timer); 496 #ifdef DEV_NETMAP 497 netmap_detach(adapter->ifp); 498 #endif /* DEV_NETMAP */ 499 ixv_free_pci_resources(adapter); 500 bus_generic_detach(dev); 501 if_free(adapter->ifp); 502 503 ixgbe_free_transmit_structures(adapter); 504 ixgbe_free_receive_structures(adapter); 505 506 IXGBE_CORE_LOCK_DESTROY(adapter); 507 return (0); 508 } 509 510 /********************************************************************* 511 * 512 * Shutdown entry point 513 * 514 **********************************************************************/ 515 static int 516 ixv_shutdown(device_t dev) 517 { 518 struct adapter *adapter = device_get_softc(dev); 519 IXGBE_CORE_LOCK(adapter); 520 ixv_stop(adapter); 521 IXGBE_CORE_UNLOCK(adapter); 522 return (0); 523 } 524 525 526 /********************************************************************* 527 * Ioctl entry point 528 * 529 * ixv_ioctl is called when the user wants to configure the 530 * interface. 531 * 532 * return 0 on success, positive on failure 533 **********************************************************************/ 534 535 static int 536 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 537 { 538 struct adapter *adapter = ifp->if_softc; 539 struct ifreq *ifr = (struct ifreq *) data; 540 #if defined(INET) || defined(INET6) 541 struct ifaddr *ifa = (struct ifaddr *) data; 542 bool avoid_reset = FALSE; 543 #endif 544 int error = 0; 545 546 switch (command) { 547 548 case SIOCSIFADDR: 549 #ifdef INET 550 if (ifa->ifa_addr->sa_family == AF_INET) 551 avoid_reset = TRUE; 552 #endif 553 #ifdef INET6 554 if (ifa->ifa_addr->sa_family == AF_INET6) 555 avoid_reset = TRUE; 556 #endif 557 #if defined(INET) || defined(INET6) 558 /* 559 ** Calling init results in link renegotiation, 560 ** so we avoid doing it when possible. 561 */ 562 if (avoid_reset) { 563 ifp->if_flags |= IFF_UP; 564 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 565 ixv_init(adapter); 566 if (!(ifp->if_flags & IFF_NOARP)) 567 arp_ifinit(ifp, ifa); 568 } else 569 error = ether_ioctl(ifp, command, data); 570 break; 571 #endif 572 case SIOCSIFMTU: 573 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 574 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) { 575 error = EINVAL; 576 } else { 577 IXGBE_CORE_LOCK(adapter); 578 ifp->if_mtu = ifr->ifr_mtu; 579 adapter->max_frame_size = 580 ifp->if_mtu + IXGBE_MTU_HDR; 581 ixv_init_locked(adapter); 582 IXGBE_CORE_UNLOCK(adapter); 583 } 584 break; 585 case SIOCSIFFLAGS: 586 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 587 IXGBE_CORE_LOCK(adapter); 588 if (ifp->if_flags & IFF_UP) { 589 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 590 ixv_init_locked(adapter); 591 } else 592 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 593 ixv_stop(adapter); 594 adapter->if_flags = ifp->if_flags; 595 IXGBE_CORE_UNLOCK(adapter); 596 break; 597 case SIOCADDMULTI: 598 case SIOCDELMULTI: 599 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 600 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 601 IXGBE_CORE_LOCK(adapter); 602 ixv_disable_intr(adapter); 603 ixv_set_multi(adapter); 604 ixv_enable_intr(adapter); 605 IXGBE_CORE_UNLOCK(adapter); 606 } 607 break; 608 case SIOCSIFMEDIA: 609 case SIOCGIFMEDIA: 610 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 611 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 612 break; 613 case SIOCSIFCAP: 614 { 615 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 616 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 617 if (mask & IFCAP_HWCSUM) 618 ifp->if_capenable ^= IFCAP_HWCSUM; 619 if (mask & IFCAP_TSO4) 620 ifp->if_capenable ^= IFCAP_TSO4; 621 if (mask & IFCAP_LRO) 622 ifp->if_capenable ^= IFCAP_LRO; 623 if (mask & IFCAP_VLAN_HWTAGGING) 624 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 625 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 626 IXGBE_CORE_LOCK(adapter); 627 ixv_init_locked(adapter); 628 IXGBE_CORE_UNLOCK(adapter); 629 } 630 VLAN_CAPABILITIES(ifp); 631 break; 632 } 633 634 default: 635 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 636 error = ether_ioctl(ifp, command, data); 637 break; 638 } 639 640 return (error); 641 } 642 643 /********************************************************************* 644 * Init entry point 645 * 646 * This routine is used in two ways. It is used by the stack as 647 * init entry point in network interface structure. It is also used 648 * by the driver as a hw/sw initialization routine to get to a 649 * consistent state. 650 * 651 * return 0 on success, positive on failure 652 **********************************************************************/ 653 #define IXGBE_MHADD_MFS_SHIFT 16 654 655 static void 656 ixv_init_locked(struct adapter *adapter) 657 { 658 struct ifnet *ifp = adapter->ifp; 659 device_t dev = adapter->dev; 660 struct ixgbe_hw *hw = &adapter->hw; 661 int error = 0; 662 663 INIT_DEBUGOUT("ixv_init_locked: begin"); 664 mtx_assert(&adapter->core_mtx, MA_OWNED); 665 hw->adapter_stopped = FALSE; 666 ixgbe_stop_adapter(hw); 667 callout_stop(&adapter->timer); 668 669 /* reprogram the RAR[0] in case user changed it. */ 670 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 671 672 /* Get the latest mac address, User can use a LAA */ 673 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, 674 IXGBE_ETH_LENGTH_OF_ADDRESS); 675 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); 676 hw->addr_ctrl.rar_used_count = 1; 677 678 /* Prepare transmit descriptors and buffers */ 679 if (ixgbe_setup_transmit_structures(adapter)) { 680 device_printf(dev, "Could not setup transmit structures\n"); 681 ixv_stop(adapter); 682 return; 683 } 684 685 /* Reset VF and renegotiate mailbox API version */ 686 ixgbe_reset_hw(hw); 687 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11); 688 if (error) 689 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error); 690 691 ixv_initialize_transmit_units(adapter); 692 693 /* Setup Multicast table */ 694 ixv_set_multi(adapter); 695 696 /* 697 ** Determine the correct mbuf pool 698 ** for doing jumbo/headersplit 699 */ 700 if (ifp->if_mtu > ETHERMTU) 701 adapter->rx_mbuf_sz = MJUMPAGESIZE; 702 else 703 adapter->rx_mbuf_sz = MCLBYTES; 704 705 /* Prepare receive descriptors and buffers */ 706 if (ixgbe_setup_receive_structures(adapter)) { 707 device_printf(dev, "Could not setup receive structures\n"); 708 ixv_stop(adapter); 709 return; 710 } 711 712 /* Configure RX settings */ 713 ixv_initialize_receive_units(adapter); 714 715 /* Set the various hardware offload abilities */ 716 ifp->if_hwassist = 0; 717 if (ifp->if_capenable & IFCAP_TSO4) 718 ifp->if_hwassist |= CSUM_TSO; 719 if (ifp->if_capenable & IFCAP_TXCSUM) { 720 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 721 #if __FreeBSD_version >= 800000 722 ifp->if_hwassist |= CSUM_SCTP; 723 #endif 724 } 725 726 /* Set up VLAN offload and filter */ 727 ixv_setup_vlan_support(adapter); 728 729 /* Set up MSI/X routing */ 730 ixv_configure_ivars(adapter); 731 732 /* Set up auto-mask */ 733 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); 734 735 /* Set moderation on the Link interrupt */ 736 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); 737 738 /* Stats init */ 739 ixv_init_stats(adapter); 740 741 /* Config/Enable Link */ 742 ixv_config_link(adapter); 743 744 /* Start watchdog */ 745 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 746 747 /* And now turn on interrupts */ 748 ixv_enable_intr(adapter); 749 750 /* Now inform the stack we're ready */ 751 ifp->if_drv_flags |= IFF_DRV_RUNNING; 752 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 753 754 return; 755 } 756 757 static void 758 ixv_init(void *arg) 759 { 760 struct adapter *adapter = arg; 761 762 IXGBE_CORE_LOCK(adapter); 763 ixv_init_locked(adapter); 764 IXGBE_CORE_UNLOCK(adapter); 765 return; 766 } 767 768 769 /* 770 ** 771 ** MSIX Interrupt Handlers and Tasklets 772 ** 773 */ 774 775 static inline void 776 ixv_enable_queue(struct adapter *adapter, u32 vector) 777 { 778 struct ixgbe_hw *hw = &adapter->hw; 779 u32 queue = 1 << vector; 780 u32 mask; 781 782 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 783 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 784 } 785 786 static inline void 787 ixv_disable_queue(struct adapter *adapter, u32 vector) 788 { 789 struct ixgbe_hw *hw = &adapter->hw; 790 u64 queue = (u64)(1 << vector); 791 u32 mask; 792 793 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 794 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 795 } 796 797 static inline void 798 ixv_rearm_queues(struct adapter *adapter, u64 queues) 799 { 800 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 801 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 802 } 803 804 805 static void 806 ixv_handle_que(void *context, int pending) 807 { 808 struct ix_queue *que = context; 809 struct adapter *adapter = que->adapter; 810 struct tx_ring *txr = que->txr; 811 struct ifnet *ifp = adapter->ifp; 812 bool more; 813 814 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 815 more = ixgbe_rxeof(que); 816 IXGBE_TX_LOCK(txr); 817 ixgbe_txeof(txr); 818 #if __FreeBSD_version >= 800000 819 if (!drbr_empty(ifp, txr->br)) 820 ixgbe_mq_start_locked(ifp, txr); 821 #else 822 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 823 ixgbe_start_locked(txr, ifp); 824 #endif 825 IXGBE_TX_UNLOCK(txr); 826 if (more) { 827 taskqueue_enqueue(que->tq, &que->que_task); 828 return; 829 } 830 } 831 832 /* Reenable this interrupt */ 833 ixv_enable_queue(adapter, que->msix); 834 return; 835 } 836 837 /********************************************************************* 838 * 839 * MSI Queue Interrupt Service routine 840 * 841 **********************************************************************/ 842 void 843 ixv_msix_que(void *arg) 844 { 845 struct ix_queue *que = arg; 846 struct adapter *adapter = que->adapter; 847 struct ifnet *ifp = adapter->ifp; 848 struct tx_ring *txr = que->txr; 849 struct rx_ring *rxr = que->rxr; 850 bool more; 851 u32 newitr = 0; 852 853 ixv_disable_queue(adapter, que->msix); 854 ++que->irqs; 855 856 more = ixgbe_rxeof(que); 857 858 IXGBE_TX_LOCK(txr); 859 ixgbe_txeof(txr); 860 /* 861 ** Make certain that if the stack 862 ** has anything queued the task gets 863 ** scheduled to handle it. 864 */ 865 #ifdef IXGBE_LEGACY_TX 866 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) 867 ixgbe_start_locked(txr, ifp); 868 #else 869 if (!drbr_empty(adapter->ifp, txr->br)) 870 ixgbe_mq_start_locked(ifp, txr); 871 #endif 872 IXGBE_TX_UNLOCK(txr); 873 874 /* Do AIM now? */ 875 876 if (ixv_enable_aim == FALSE) 877 goto no_calc; 878 /* 879 ** Do Adaptive Interrupt Moderation: 880 ** - Write out last calculated setting 881 ** - Calculate based on average size over 882 ** the last interval. 883 */ 884 if (que->eitr_setting) 885 IXGBE_WRITE_REG(&adapter->hw, 886 IXGBE_VTEITR(que->msix), 887 que->eitr_setting); 888 889 que->eitr_setting = 0; 890 891 /* Idle, do nothing */ 892 if ((txr->bytes == 0) && (rxr->bytes == 0)) 893 goto no_calc; 894 895 if ((txr->bytes) && (txr->packets)) 896 newitr = txr->bytes/txr->packets; 897 if ((rxr->bytes) && (rxr->packets)) 898 newitr = max(newitr, 899 (rxr->bytes / rxr->packets)); 900 newitr += 24; /* account for hardware frame, crc */ 901 902 /* set an upper boundary */ 903 newitr = min(newitr, 3000); 904 905 /* Be nice to the mid range */ 906 if ((newitr > 300) && (newitr < 1200)) 907 newitr = (newitr / 3); 908 else 909 newitr = (newitr / 2); 910 911 newitr |= newitr << 16; 912 913 /* save for next interrupt */ 914 que->eitr_setting = newitr; 915 916 /* Reset state */ 917 txr->bytes = 0; 918 txr->packets = 0; 919 rxr->bytes = 0; 920 rxr->packets = 0; 921 922 no_calc: 923 if (more) 924 taskqueue_enqueue(que->tq, &que->que_task); 925 else /* Reenable this interrupt */ 926 ixv_enable_queue(adapter, que->msix); 927 return; 928 } 929 930 static void 931 ixv_msix_mbx(void *arg) 932 { 933 struct adapter *adapter = arg; 934 struct ixgbe_hw *hw = &adapter->hw; 935 u32 reg; 936 937 ++adapter->link_irq; 938 939 /* First get the cause */ 940 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); 941 /* Clear interrupt with write */ 942 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); 943 944 /* Link status change */ 945 if (reg & IXGBE_EICR_LSC) 946 taskqueue_enqueue(adapter->tq, &adapter->link_task); 947 948 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); 949 return; 950 } 951 952 /********************************************************************* 953 * 954 * Media Ioctl callback 955 * 956 * This routine is called whenever the user queries the status of 957 * the interface using ifconfig. 958 * 959 **********************************************************************/ 960 static void 961 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 962 { 963 struct adapter *adapter = ifp->if_softc; 964 965 INIT_DEBUGOUT("ixv_media_status: begin"); 966 IXGBE_CORE_LOCK(adapter); 967 ixv_update_link_status(adapter); 968 969 ifmr->ifm_status = IFM_AVALID; 970 ifmr->ifm_active = IFM_ETHER; 971 972 if (!adapter->link_active) { 973 IXGBE_CORE_UNLOCK(adapter); 974 return; 975 } 976 977 ifmr->ifm_status |= IFM_ACTIVE; 978 979 switch (adapter->link_speed) { 980 case IXGBE_LINK_SPEED_1GB_FULL: 981 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 982 break; 983 case IXGBE_LINK_SPEED_10GB_FULL: 984 ifmr->ifm_active |= IFM_FDX; 985 break; 986 } 987 988 IXGBE_CORE_UNLOCK(adapter); 989 990 return; 991 } 992 993 /********************************************************************* 994 * 995 * Media Ioctl callback 996 * 997 * This routine is called when the user changes speed/duplex using 998 * media/mediopt option with ifconfig. 999 * 1000 **********************************************************************/ 1001 static int 1002 ixv_media_change(struct ifnet * ifp) 1003 { 1004 struct adapter *adapter = ifp->if_softc; 1005 struct ifmedia *ifm = &adapter->media; 1006 1007 INIT_DEBUGOUT("ixv_media_change: begin"); 1008 1009 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1010 return (EINVAL); 1011 1012 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1013 case IFM_AUTO: 1014 break; 1015 default: 1016 device_printf(adapter->dev, "Only auto media type\n"); 1017 return (EINVAL); 1018 } 1019 1020 return (0); 1021 } 1022 1023 1024 /********************************************************************* 1025 * Multicast Update 1026 * 1027 * This routine is called whenever multicast address list is updated. 1028 * 1029 **********************************************************************/ 1030 #define IXGBE_RAR_ENTRIES 16 1031 1032 static void 1033 ixv_set_multi(struct adapter *adapter) 1034 { 1035 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 1036 u8 *update_ptr; 1037 struct ifmultiaddr *ifma; 1038 int mcnt = 0; 1039 struct ifnet *ifp = adapter->ifp; 1040 1041 IOCTL_DEBUGOUT("ixv_set_multi: begin"); 1042 1043 #if __FreeBSD_version < 800000 1044 IF_ADDR_LOCK(ifp); 1045 #else 1046 if_maddr_rlock(ifp); 1047 #endif 1048 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1049 if (ifma->ifma_addr->sa_family != AF_LINK) 1050 continue; 1051 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1052 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 1053 IXGBE_ETH_LENGTH_OF_ADDRESS); 1054 mcnt++; 1055 } 1056 #if __FreeBSD_version < 800000 1057 IF_ADDR_UNLOCK(ifp); 1058 #else 1059 if_maddr_runlock(ifp); 1060 #endif 1061 1062 update_ptr = mta; 1063 1064 ixgbe_update_mc_addr_list(&adapter->hw, 1065 update_ptr, mcnt, ixv_mc_array_itr, TRUE); 1066 1067 return; 1068 } 1069 1070 /* 1071 * This is an iterator function now needed by the multicast 1072 * shared code. It simply feeds the shared code routine the 1073 * addresses in the array of ixv_set_multi() one by one. 1074 */ 1075 static u8 * 1076 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1077 { 1078 u8 *addr = *update_ptr; 1079 u8 *newptr; 1080 *vmdq = 0; 1081 1082 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 1083 *update_ptr = newptr; 1084 return addr; 1085 } 1086 1087 /********************************************************************* 1088 * Timer routine 1089 * 1090 * This routine checks for link status,updates statistics, 1091 * and runs the watchdog check. 1092 * 1093 **********************************************************************/ 1094 1095 static void 1096 ixv_local_timer(void *arg) 1097 { 1098 struct adapter *adapter = arg; 1099 device_t dev = adapter->dev; 1100 struct ix_queue *que = adapter->queues; 1101 u64 queues = 0; 1102 int hung = 0; 1103 1104 mtx_assert(&adapter->core_mtx, MA_OWNED); 1105 1106 ixv_update_link_status(adapter); 1107 1108 /* Stats Update */ 1109 ixv_update_stats(adapter); 1110 1111 /* 1112 ** Check the TX queues status 1113 ** - mark hung queues so we don't schedule on them 1114 ** - watchdog only if all queues show hung 1115 */ 1116 for (int i = 0; i < adapter->num_queues; i++, que++) { 1117 /* Keep track of queues with work for soft irq */ 1118 if (que->txr->busy) 1119 queues |= ((u64)1 << que->me); 1120 /* 1121 ** Each time txeof runs without cleaning, but there 1122 ** are uncleaned descriptors it increments busy. If 1123 ** we get to the MAX we declare it hung. 1124 */ 1125 if (que->busy == IXGBE_QUEUE_HUNG) { 1126 ++hung; 1127 /* Mark the queue as inactive */ 1128 adapter->active_queues &= ~((u64)1 << que->me); 1129 continue; 1130 } else { 1131 /* Check if we've come back from hung */ 1132 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1133 adapter->active_queues |= ((u64)1 << que->me); 1134 } 1135 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1136 device_printf(dev,"Warning queue %d " 1137 "appears to be hung!\n", i); 1138 que->txr->busy = IXGBE_QUEUE_HUNG; 1139 ++hung; 1140 } 1141 1142 } 1143 1144 /* Only truely watchdog if all queues show hung */ 1145 if (hung == adapter->num_queues) 1146 goto watchdog; 1147 else if (queues != 0) { /* Force an IRQ on queues with work */ 1148 ixv_rearm_queues(adapter, queues); 1149 } 1150 1151 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1152 return; 1153 1154 watchdog: 1155 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1156 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1157 adapter->watchdog_events++; 1158 ixv_init_locked(adapter); 1159 } 1160 1161 /* 1162 ** Note: this routine updates the OS on the link state 1163 ** the real check of the hardware only happens with 1164 ** a link interrupt. 1165 */ 1166 static void 1167 ixv_update_link_status(struct adapter *adapter) 1168 { 1169 struct ifnet *ifp = adapter->ifp; 1170 device_t dev = adapter->dev; 1171 1172 if (adapter->link_up){ 1173 if (adapter->link_active == FALSE) { 1174 if (bootverbose) 1175 device_printf(dev,"Link is up %d Gbps %s \n", 1176 ((adapter->link_speed == 128)? 10:1), 1177 "Full Duplex"); 1178 adapter->link_active = TRUE; 1179 if_link_state_change(ifp, LINK_STATE_UP); 1180 } 1181 } else { /* Link down */ 1182 if (adapter->link_active == TRUE) { 1183 if (bootverbose) 1184 device_printf(dev,"Link is Down\n"); 1185 if_link_state_change(ifp, LINK_STATE_DOWN); 1186 adapter->link_active = FALSE; 1187 } 1188 } 1189 1190 return; 1191 } 1192 1193 1194 /********************************************************************* 1195 * 1196 * This routine disables all traffic on the adapter by issuing a 1197 * global reset on the MAC and deallocates TX/RX buffers. 1198 * 1199 **********************************************************************/ 1200 1201 static void 1202 ixv_stop(void *arg) 1203 { 1204 struct ifnet *ifp; 1205 struct adapter *adapter = arg; 1206 struct ixgbe_hw *hw = &adapter->hw; 1207 ifp = adapter->ifp; 1208 1209 mtx_assert(&adapter->core_mtx, MA_OWNED); 1210 1211 INIT_DEBUGOUT("ixv_stop: begin\n"); 1212 ixv_disable_intr(adapter); 1213 1214 /* Tell the stack that the interface is no longer active */ 1215 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1216 1217 ixgbe_reset_hw(hw); 1218 adapter->hw.adapter_stopped = FALSE; 1219 ixgbe_stop_adapter(hw); 1220 callout_stop(&adapter->timer); 1221 1222 /* reprogram the RAR[0] in case user changed it. */ 1223 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1224 1225 return; 1226 } 1227 1228 1229 /********************************************************************* 1230 * 1231 * Determine hardware revision. 1232 * 1233 **********************************************************************/ 1234 static void 1235 ixv_identify_hardware(struct adapter *adapter) 1236 { 1237 device_t dev = adapter->dev; 1238 struct ixgbe_hw *hw = &adapter->hw; 1239 1240 /* 1241 ** Make sure BUSMASTER is set, on a VM under 1242 ** KVM it may not be and will break things. 1243 */ 1244 pci_enable_busmaster(dev); 1245 1246 /* Save off the information about this board */ 1247 hw->vendor_id = pci_get_vendor(dev); 1248 hw->device_id = pci_get_device(dev); 1249 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 1250 hw->subsystem_vendor_id = 1251 pci_read_config(dev, PCIR_SUBVEND_0, 2); 1252 hw->subsystem_device_id = 1253 pci_read_config(dev, PCIR_SUBDEV_0, 2); 1254 1255 /* We need this to determine device-specific things */ 1256 ixgbe_set_mac_type(hw); 1257 1258 /* Set the right number of segments */ 1259 adapter->num_segs = IXGBE_82599_SCATTER; 1260 1261 return; 1262 } 1263 1264 /********************************************************************* 1265 * 1266 * Setup MSIX Interrupt resources and handlers 1267 * 1268 **********************************************************************/ 1269 static int 1270 ixv_allocate_msix(struct adapter *adapter) 1271 { 1272 device_t dev = adapter->dev; 1273 struct ix_queue *que = adapter->queues; 1274 struct tx_ring *txr = adapter->tx_rings; 1275 int error, rid, vector = 0; 1276 1277 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 1278 rid = vector + 1; 1279 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1280 RF_SHAREABLE | RF_ACTIVE); 1281 if (que->res == NULL) { 1282 device_printf(dev,"Unable to allocate" 1283 " bus resource: que interrupt [%d]\n", vector); 1284 return (ENXIO); 1285 } 1286 /* Set the handler function */ 1287 error = bus_setup_intr(dev, que->res, 1288 INTR_TYPE_NET | INTR_MPSAFE, NULL, 1289 ixv_msix_que, que, &que->tag); 1290 if (error) { 1291 que->res = NULL; 1292 device_printf(dev, "Failed to register QUE handler"); 1293 return (error); 1294 } 1295 #if __FreeBSD_version >= 800504 1296 bus_describe_intr(dev, que->res, que->tag, "que %d", i); 1297 #endif 1298 que->msix = vector; 1299 adapter->active_queues |= (u64)(1 << que->msix); 1300 /* 1301 ** Bind the msix vector, and thus the 1302 ** ring to the corresponding cpu. 1303 */ 1304 if (adapter->num_queues > 1) 1305 bus_bind_intr(dev, que->res, i); 1306 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 1307 TASK_INIT(&que->que_task, 0, ixv_handle_que, que); 1308 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, 1309 taskqueue_thread_enqueue, &que->tq); 1310 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", 1311 device_get_nameunit(adapter->dev)); 1312 } 1313 1314 /* and Mailbox */ 1315 rid = vector + 1; 1316 adapter->res = bus_alloc_resource_any(dev, 1317 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 1318 if (!adapter->res) { 1319 device_printf(dev,"Unable to allocate" 1320 " bus resource: MBX interrupt [%d]\n", rid); 1321 return (ENXIO); 1322 } 1323 /* Set the mbx handler function */ 1324 error = bus_setup_intr(dev, adapter->res, 1325 INTR_TYPE_NET | INTR_MPSAFE, NULL, 1326 ixv_msix_mbx, adapter, &adapter->tag); 1327 if (error) { 1328 adapter->res = NULL; 1329 device_printf(dev, "Failed to register LINK handler"); 1330 return (error); 1331 } 1332 #if __FreeBSD_version >= 800504 1333 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx"); 1334 #endif 1335 adapter->vector = vector; 1336 /* Tasklets for Mailbox */ 1337 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter); 1338 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, 1339 taskqueue_thread_enqueue, &adapter->tq); 1340 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", 1341 device_get_nameunit(adapter->dev)); 1342 /* 1343 ** Due to a broken design QEMU will fail to properly 1344 ** enable the guest for MSIX unless the vectors in 1345 ** the table are all set up, so we must rewrite the 1346 ** ENABLE in the MSIX control register again at this 1347 ** point to cause it to successfully initialize us. 1348 */ 1349 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { 1350 int msix_ctrl; 1351 pci_find_cap(dev, PCIY_MSIX, &rid); 1352 rid += PCIR_MSIX_CTRL; 1353 msix_ctrl = pci_read_config(dev, rid, 2); 1354 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1355 pci_write_config(dev, rid, msix_ctrl, 2); 1356 } 1357 1358 return (0); 1359 } 1360 1361 /* 1362 * Setup MSIX resources, note that the VF 1363 * device MUST use MSIX, there is no fallback. 1364 */ 1365 static int 1366 ixv_setup_msix(struct adapter *adapter) 1367 { 1368 device_t dev = adapter->dev; 1369 int rid, want, msgs; 1370 1371 1372 /* Must have at least 2 MSIX vectors */ 1373 msgs = pci_msix_count(dev); 1374 if (msgs < 2) 1375 goto out; 1376 rid = PCIR_BAR(3); 1377 adapter->msix_mem = bus_alloc_resource_any(dev, 1378 SYS_RES_MEMORY, &rid, RF_ACTIVE); 1379 if (adapter->msix_mem == NULL) { 1380 device_printf(adapter->dev, 1381 "Unable to map MSIX table \n"); 1382 goto out; 1383 } 1384 1385 /* 1386 ** Want vectors for the queues, 1387 ** plus an additional for mailbox. 1388 */ 1389 want = adapter->num_queues + 1; 1390 if (want > msgs) { 1391 want = msgs; 1392 adapter->num_queues = msgs - 1; 1393 } else 1394 msgs = want; 1395 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 1396 device_printf(adapter->dev, 1397 "Using MSIX interrupts with %d vectors\n", want); 1398 return (want); 1399 } 1400 /* Release in case alloc was insufficient */ 1401 pci_release_msi(dev); 1402 out: 1403 if (adapter->msix_mem != NULL) { 1404 bus_release_resource(dev, SYS_RES_MEMORY, 1405 rid, adapter->msix_mem); 1406 adapter->msix_mem = NULL; 1407 } 1408 device_printf(adapter->dev,"MSIX config error\n"); 1409 return (ENXIO); 1410 } 1411 1412 1413 static int 1414 ixv_allocate_pci_resources(struct adapter *adapter) 1415 { 1416 int rid; 1417 device_t dev = adapter->dev; 1418 1419 rid = PCIR_BAR(0); 1420 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1421 &rid, RF_ACTIVE); 1422 1423 if (!(adapter->pci_mem)) { 1424 device_printf(dev, "Unable to allocate bus resource: memory\n"); 1425 return (ENXIO); 1426 } 1427 1428 adapter->osdep.mem_bus_space_tag = 1429 rman_get_bustag(adapter->pci_mem); 1430 adapter->osdep.mem_bus_space_handle = 1431 rman_get_bushandle(adapter->pci_mem); 1432 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 1433 1434 /* Pick up the tuneable queues */ 1435 adapter->num_queues = ixv_num_queues; 1436 adapter->hw.back = adapter; 1437 1438 /* 1439 ** Now setup MSI/X, should 1440 ** return us the number of 1441 ** configured vectors. 1442 */ 1443 adapter->msix = ixv_setup_msix(adapter); 1444 if (adapter->msix == ENXIO) 1445 return (ENXIO); 1446 else 1447 return (0); 1448 } 1449 1450 static void 1451 ixv_free_pci_resources(struct adapter * adapter) 1452 { 1453 struct ix_queue *que = adapter->queues; 1454 device_t dev = adapter->dev; 1455 int rid, memrid; 1456 1457 memrid = PCIR_BAR(MSIX_82598_BAR); 1458 1459 /* 1460 ** There is a slight possibility of a failure mode 1461 ** in attach that will result in entering this function 1462 ** before interrupt resources have been initialized, and 1463 ** in that case we do not want to execute the loops below 1464 ** We can detect this reliably by the state of the adapter 1465 ** res pointer. 1466 */ 1467 if (adapter->res == NULL) 1468 goto mem; 1469 1470 /* 1471 ** Release all msix queue resources: 1472 */ 1473 for (int i = 0; i < adapter->num_queues; i++, que++) { 1474 rid = que->msix + 1; 1475 if (que->tag != NULL) { 1476 bus_teardown_intr(dev, que->res, que->tag); 1477 que->tag = NULL; 1478 } 1479 if (que->res != NULL) 1480 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 1481 } 1482 1483 1484 /* Clean the Legacy or Link interrupt last */ 1485 if (adapter->vector) /* we are doing MSIX */ 1486 rid = adapter->vector + 1; 1487 else 1488 (adapter->msix != 0) ? (rid = 1):(rid = 0); 1489 1490 if (adapter->tag != NULL) { 1491 bus_teardown_intr(dev, adapter->res, adapter->tag); 1492 adapter->tag = NULL; 1493 } 1494 if (adapter->res != NULL) 1495 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 1496 1497 mem: 1498 if (adapter->msix) 1499 pci_release_msi(dev); 1500 1501 if (adapter->msix_mem != NULL) 1502 bus_release_resource(dev, SYS_RES_MEMORY, 1503 memrid, adapter->msix_mem); 1504 1505 if (adapter->pci_mem != NULL) 1506 bus_release_resource(dev, SYS_RES_MEMORY, 1507 PCIR_BAR(0), adapter->pci_mem); 1508 1509 return; 1510 } 1511 1512 /********************************************************************* 1513 * 1514 * Setup networking device structure and register an interface. 1515 * 1516 **********************************************************************/ 1517 static void 1518 ixv_setup_interface(device_t dev, struct adapter *adapter) 1519 { 1520 struct ifnet *ifp; 1521 1522 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1523 1524 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1525 if (ifp == NULL) 1526 panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); 1527 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1528 ifp->if_baudrate = 1000000000; 1529 ifp->if_init = ixv_init; 1530 ifp->if_softc = adapter; 1531 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1532 ifp->if_ioctl = ixv_ioctl; 1533 #if __FreeBSD_version >= 800000 1534 ifp->if_transmit = ixgbe_mq_start; 1535 ifp->if_qflush = ixgbe_qflush; 1536 #else 1537 ifp->if_start = ixgbe_start; 1538 #endif 1539 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; 1540 1541 ether_ifattach(ifp, adapter->hw.mac.addr); 1542 1543 adapter->max_frame_size = 1544 ifp->if_mtu + IXGBE_MTU_HDR_VLAN; 1545 1546 /* 1547 * Tell the upper layer(s) we support long frames. 1548 */ 1549 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1550 1551 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; 1552 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1553 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING 1554 | IFCAP_VLAN_HWTSO 1555 | IFCAP_VLAN_MTU; 1556 ifp->if_capabilities |= IFCAP_LRO; 1557 ifp->if_capenable = ifp->if_capabilities; 1558 1559 /* 1560 * Specify the media types supported by this adapter and register 1561 * callbacks to update media and link information 1562 */ 1563 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, 1564 ixv_media_status); 1565 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1566 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1567 1568 return; 1569 } 1570 1571 static void 1572 ixv_config_link(struct adapter *adapter) 1573 { 1574 struct ixgbe_hw *hw = &adapter->hw; 1575 u32 autoneg; 1576 1577 if (hw->mac.ops.check_link) 1578 hw->mac.ops.check_link(hw, &autoneg, 1579 &adapter->link_up, FALSE); 1580 } 1581 1582 1583 /********************************************************************* 1584 * 1585 * Enable transmit unit. 1586 * 1587 **********************************************************************/ 1588 static void 1589 ixv_initialize_transmit_units(struct adapter *adapter) 1590 { 1591 struct tx_ring *txr = adapter->tx_rings; 1592 struct ixgbe_hw *hw = &adapter->hw; 1593 1594 1595 for (int i = 0; i < adapter->num_queues; i++, txr++) { 1596 u64 tdba = txr->txdma.dma_paddr; 1597 u32 txctrl, txdctl; 1598 1599 /* Set WTHRESH to 8, burst writeback */ 1600 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1601 txdctl |= (8 << 16); 1602 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1603 1604 /* Set the HW Tx Head and Tail indices */ 1605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0); 1606 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0); 1607 1608 /* Set Tx Tail register */ 1609 txr->tail = IXGBE_VFTDT(i); 1610 1611 /* Set Ring parameters */ 1612 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), 1613 (tdba & 0x00000000ffffffffULL)); 1614 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32)); 1615 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), 1616 adapter->num_tx_desc * 1617 sizeof(struct ixgbe_legacy_tx_desc)); 1618 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i)); 1619 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1620 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl); 1621 1622 /* Now enable */ 1623 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1624 txdctl |= IXGBE_TXDCTL_ENABLE; 1625 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1626 } 1627 1628 return; 1629 } 1630 1631 1632 /********************************************************************* 1633 * 1634 * Setup receive registers and features. 1635 * 1636 **********************************************************************/ 1637 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1638 1639 static void 1640 ixv_initialize_receive_units(struct adapter *adapter) 1641 { 1642 struct rx_ring *rxr = adapter->rx_rings; 1643 struct ixgbe_hw *hw = &adapter->hw; 1644 struct ifnet *ifp = adapter->ifp; 1645 u32 bufsz, rxcsum, psrtype; 1646 1647 if (ifp->if_mtu > ETHERMTU) 1648 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1649 else 1650 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1651 1652 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1653 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1654 IXGBE_PSRTYPE_L2HDR; 1655 1656 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1657 1658 /* Tell PF our max_frame size */ 1659 ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size); 1660 1661 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1662 u64 rdba = rxr->rxdma.dma_paddr; 1663 u32 reg, rxdctl; 1664 1665 /* Disable the queue */ 1666 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1667 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1668 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1669 for (int j = 0; j < 10; j++) { 1670 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1671 IXGBE_RXDCTL_ENABLE) 1672 msec_delay(1); 1673 else 1674 break; 1675 } 1676 wmb(); 1677 /* Setup the Base and Length of the Rx Descriptor Ring */ 1678 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), 1679 (rdba & 0x00000000ffffffffULL)); 1680 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), 1681 (rdba >> 32)); 1682 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), 1683 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1684 1685 /* Reset the ring indices */ 1686 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1687 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1688 1689 /* Set up the SRRCTL register */ 1690 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); 1691 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1692 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1693 reg |= bufsz; 1694 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1695 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg); 1696 1697 /* Capture Rx Tail index */ 1698 rxr->tail = IXGBE_VFRDT(rxr->me); 1699 1700 /* Do the queue enabling last */ 1701 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1702 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1703 for (int k = 0; k < 10; k++) { 1704 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1705 IXGBE_RXDCTL_ENABLE) 1706 break; 1707 else 1708 msec_delay(1); 1709 } 1710 wmb(); 1711 1712 /* Set the Tail Pointer */ 1713 #ifdef DEV_NETMAP 1714 /* 1715 * In netmap mode, we must preserve the buffers made 1716 * available to userspace before the if_init() 1717 * (this is true by default on the TX side, because 1718 * init makes all buffers available to userspace). 1719 * 1720 * netmap_reset() and the device specific routines 1721 * (e.g. ixgbe_setup_receive_rings()) map these 1722 * buffers at the end of the NIC ring, so here we 1723 * must set the RDT (tail) register to make sure 1724 * they are not overwritten. 1725 * 1726 * In this driver the NIC ring starts at RDH = 0, 1727 * RDT points to the last slot available for reception (?), 1728 * so RDT = num_rx_desc - 1 means the whole ring is available. 1729 */ 1730 if (ifp->if_capenable & IFCAP_NETMAP) { 1731 struct netmap_adapter *na = NA(adapter->ifp); 1732 struct netmap_kring *kring = &na->rx_rings[i]; 1733 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1734 1735 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1736 } else 1737 #endif /* DEV_NETMAP */ 1738 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1739 adapter->num_rx_desc - 1); 1740 } 1741 1742 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1743 1744 if (ifp->if_capenable & IFCAP_RXCSUM) 1745 rxcsum |= IXGBE_RXCSUM_PCSD; 1746 1747 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 1748 rxcsum |= IXGBE_RXCSUM_IPPCSE; 1749 1750 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1751 1752 return; 1753 } 1754 1755 static void 1756 ixv_setup_vlan_support(struct adapter *adapter) 1757 { 1758 struct ixgbe_hw *hw = &adapter->hw; 1759 u32 ctrl, vid, vfta, retry; 1760 struct rx_ring *rxr; 1761 1762 /* 1763 ** We get here thru init_locked, meaning 1764 ** a soft reset, this has already cleared 1765 ** the VFTA and other state, so if there 1766 ** have been no vlan's registered do nothing. 1767 */ 1768 if (adapter->num_vlans == 0) 1769 return; 1770 1771 /* Enable the queues */ 1772 for (int i = 0; i < adapter->num_queues; i++) { 1773 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1774 ctrl |= IXGBE_RXDCTL_VME; 1775 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); 1776 /* 1777 * Let Rx path know that it needs to store VLAN tag 1778 * as part of extra mbuf info. 1779 */ 1780 rxr = &adapter->rx_rings[i]; 1781 rxr->vtag_strip = TRUE; 1782 } 1783 1784 /* 1785 ** A soft reset zero's out the VFTA, so 1786 ** we need to repopulate it now. 1787 */ 1788 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 1789 if (ixv_shadow_vfta[i] == 0) 1790 continue; 1791 vfta = ixv_shadow_vfta[i]; 1792 /* 1793 ** Reconstruct the vlan id's 1794 ** based on the bits set in each 1795 ** of the array ints. 1796 */ 1797 for (int j = 0; j < 32; j++) { 1798 retry = 0; 1799 if ((vfta & (1 << j)) == 0) 1800 continue; 1801 vid = (i * 32) + j; 1802 /* Call the shared code mailbox routine */ 1803 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) { 1804 if (++retry > 5) 1805 break; 1806 } 1807 } 1808 } 1809 } 1810 1811 /* 1812 ** This routine is run via an vlan config EVENT, 1813 ** it enables us to use the HW Filter table since 1814 ** we can get the vlan id. This just creates the 1815 ** entry in the soft version of the VFTA, init will 1816 ** repopulate the real table. 1817 */ 1818 static void 1819 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1820 { 1821 struct adapter *adapter = ifp->if_softc; 1822 u16 index, bit; 1823 1824 if (ifp->if_softc != arg) /* Not our event */ 1825 return; 1826 1827 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1828 return; 1829 1830 IXGBE_CORE_LOCK(adapter); 1831 index = (vtag >> 5) & 0x7F; 1832 bit = vtag & 0x1F; 1833 ixv_shadow_vfta[index] |= (1 << bit); 1834 ++adapter->num_vlans; 1835 /* Re-init to load the changes */ 1836 ixv_init_locked(adapter); 1837 IXGBE_CORE_UNLOCK(adapter); 1838 } 1839 1840 /* 1841 ** This routine is run via an vlan 1842 ** unconfig EVENT, remove our entry 1843 ** in the soft vfta. 1844 */ 1845 static void 1846 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1847 { 1848 struct adapter *adapter = ifp->if_softc; 1849 u16 index, bit; 1850 1851 if (ifp->if_softc != arg) 1852 return; 1853 1854 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1855 return; 1856 1857 IXGBE_CORE_LOCK(adapter); 1858 index = (vtag >> 5) & 0x7F; 1859 bit = vtag & 0x1F; 1860 ixv_shadow_vfta[index] &= ~(1 << bit); 1861 --adapter->num_vlans; 1862 /* Re-init to load the changes */ 1863 ixv_init_locked(adapter); 1864 IXGBE_CORE_UNLOCK(adapter); 1865 } 1866 1867 static void 1868 ixv_enable_intr(struct adapter *adapter) 1869 { 1870 struct ixgbe_hw *hw = &adapter->hw; 1871 struct ix_queue *que = adapter->queues; 1872 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1873 1874 1875 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1876 1877 mask = IXGBE_EIMS_ENABLE_MASK; 1878 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1879 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 1880 1881 for (int i = 0; i < adapter->num_queues; i++, que++) 1882 ixv_enable_queue(adapter, que->msix); 1883 1884 IXGBE_WRITE_FLUSH(hw); 1885 1886 return; 1887 } 1888 1889 static void 1890 ixv_disable_intr(struct adapter *adapter) 1891 { 1892 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 1893 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); 1894 IXGBE_WRITE_FLUSH(&adapter->hw); 1895 return; 1896 } 1897 1898 /* 1899 ** Setup the correct IVAR register for a particular MSIX interrupt 1900 ** - entry is the register array entry 1901 ** - vector is the MSIX vector for this queue 1902 ** - type is RX/TX/MISC 1903 */ 1904 static void 1905 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 1906 { 1907 struct ixgbe_hw *hw = &adapter->hw; 1908 u32 ivar, index; 1909 1910 vector |= IXGBE_IVAR_ALLOC_VAL; 1911 1912 if (type == -1) { /* MISC IVAR */ 1913 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 1914 ivar &= ~0xFF; 1915 ivar |= vector; 1916 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 1917 } else { /* RX/TX IVARS */ 1918 index = (16 * (entry & 1)) + (8 * type); 1919 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 1920 ivar &= ~(0xFF << index); 1921 ivar |= (vector << index); 1922 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 1923 } 1924 } 1925 1926 static void 1927 ixv_configure_ivars(struct adapter *adapter) 1928 { 1929 struct ix_queue *que = adapter->queues; 1930 1931 for (int i = 0; i < adapter->num_queues; i++, que++) { 1932 /* First the RX queue entry */ 1933 ixv_set_ivar(adapter, i, que->msix, 0); 1934 /* ... and the TX */ 1935 ixv_set_ivar(adapter, i, que->msix, 1); 1936 /* Set an initial value in EITR */ 1937 IXGBE_WRITE_REG(&adapter->hw, 1938 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT); 1939 } 1940 1941 /* For the mailbox interrupt */ 1942 ixv_set_ivar(adapter, 1, adapter->vector, -1); 1943 } 1944 1945 1946 /* 1947 ** Tasklet handler for MSIX MBX interrupts 1948 ** - do outside interrupt since it might sleep 1949 */ 1950 static void 1951 ixv_handle_mbx(void *context, int pending) 1952 { 1953 struct adapter *adapter = context; 1954 1955 ixgbe_check_link(&adapter->hw, 1956 &adapter->link_speed, &adapter->link_up, 0); 1957 ixv_update_link_status(adapter); 1958 } 1959 1960 /* 1961 ** The VF stats registers never have a truely virgin 1962 ** starting point, so this routine tries to make an 1963 ** artificial one, marking ground zero on attach as 1964 ** it were. 1965 */ 1966 static void 1967 ixv_save_stats(struct adapter *adapter) 1968 { 1969 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { 1970 adapter->stats.vf.saved_reset_vfgprc += 1971 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; 1972 adapter->stats.vf.saved_reset_vfgptc += 1973 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; 1974 adapter->stats.vf.saved_reset_vfgorc += 1975 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; 1976 adapter->stats.vf.saved_reset_vfgotc += 1977 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; 1978 adapter->stats.vf.saved_reset_vfmprc += 1979 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; 1980 } 1981 } 1982 1983 static void 1984 ixv_init_stats(struct adapter *adapter) 1985 { 1986 struct ixgbe_hw *hw = &adapter->hw; 1987 1988 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1989 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1990 adapter->stats.vf.last_vfgorc |= 1991 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1992 1993 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1994 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1995 adapter->stats.vf.last_vfgotc |= 1996 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1997 1998 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1999 2000 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 2001 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 2002 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 2003 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 2004 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 2005 } 2006 2007 #define UPDATE_STAT_32(reg, last, count) \ 2008 { \ 2009 u32 current = IXGBE_READ_REG(hw, reg); \ 2010 if (current < last) \ 2011 count += 0x100000000LL; \ 2012 last = current; \ 2013 count &= 0xFFFFFFFF00000000LL; \ 2014 count |= current; \ 2015 } 2016 2017 #define UPDATE_STAT_36(lsb, msb, last, count) \ 2018 { \ 2019 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ 2020 u64 cur_msb = IXGBE_READ_REG(hw, msb); \ 2021 u64 current = ((cur_msb << 32) | cur_lsb); \ 2022 if (current < last) \ 2023 count += 0x1000000000LL; \ 2024 last = current; \ 2025 count &= 0xFFFFFFF000000000LL; \ 2026 count |= current; \ 2027 } 2028 2029 /* 2030 ** ixv_update_stats - Update the board statistics counters. 2031 */ 2032 void 2033 ixv_update_stats(struct adapter *adapter) 2034 { 2035 struct ixgbe_hw *hw = &adapter->hw; 2036 2037 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, 2038 adapter->stats.vf.vfgprc); 2039 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, 2040 adapter->stats.vf.vfgptc); 2041 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2042 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); 2043 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2044 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); 2045 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, 2046 adapter->stats.vf.vfmprc); 2047 } 2048 2049 /* 2050 * Add statistic sysctls for the VF. 2051 */ 2052 static void 2053 ixv_add_stats_sysctls(struct adapter *adapter) 2054 { 2055 device_t dev = adapter->dev; 2056 struct ix_queue *que = &adapter->queues[0]; 2057 struct tx_ring *txr = que->txr; 2058 struct rx_ring *rxr = que->rxr; 2059 2060 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2061 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2062 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2063 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2064 2065 struct sysctl_oid *stat_node, *queue_node; 2066 struct sysctl_oid_list *stat_list, *queue_list; 2067 2068 /* Driver Statistics */ 2069 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 2070 CTLFLAG_RD, &adapter->dropped_pkts, 2071 "Driver dropped packets"); 2072 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 2073 CTLFLAG_RD, &adapter->mbuf_defrag_failed, 2074 "m_defrag() failed"); 2075 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 2076 CTLFLAG_RD, &adapter->watchdog_events, 2077 "Watchdog timeouts"); 2078 2079 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 2080 CTLFLAG_RD, NULL, 2081 "VF Statistics (read from HW registers)"); 2082 stat_list = SYSCTL_CHILDREN(stat_node); 2083 2084 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 2085 CTLFLAG_RD, &stats->vfgprc, 2086 "Good Packets Received"); 2087 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 2088 CTLFLAG_RD, &stats->vfgorc, 2089 "Good Octets Received"); 2090 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 2091 CTLFLAG_RD, &stats->vfmprc, 2092 "Multicast Packets Received"); 2093 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 2094 CTLFLAG_RD, &stats->vfgptc, 2095 "Good Packets Transmitted"); 2096 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 2097 CTLFLAG_RD, &stats->vfgotc, 2098 "Good Octets Transmitted"); 2099 2100 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que", 2101 CTLFLAG_RD, NULL, 2102 "Queue Statistics (collected by SW)"); 2103 queue_list = SYSCTL_CHILDREN(queue_node); 2104 2105 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 2106 CTLFLAG_RD, &(que->irqs), 2107 "IRQs on queue"); 2108 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs", 2109 CTLFLAG_RD, &(rxr->rx_irq), 2110 "RX irqs on queue"); 2111 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 2112 CTLFLAG_RD, &(rxr->rx_packets), 2113 "RX packets"); 2114 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 2115 CTLFLAG_RD, &(rxr->rx_bytes), 2116 "RX bytes"); 2117 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 2118 CTLFLAG_RD, &(rxr->rx_discarded), 2119 "Discarded RX packets"); 2120 2121 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 2122 CTLFLAG_RD, &(txr->total_packets), 2123 "TX Packets"); 2124 2125 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc", 2126 CTLFLAG_RD, &(txr->no_desc_avail), 2127 "# of times not enough descriptors were available during TX"); 2128 } 2129 2130 static void 2131 ixv_set_sysctl_value(struct adapter *adapter, const char *name, 2132 const char *description, int *limit, int value) 2133 { 2134 *limit = value; 2135 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 2136 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 2137 OID_AUTO, name, CTLFLAG_RW, limit, value, description); 2138 } 2139 2140 /********************************************************************** 2141 * 2142 * This routine is called only when em_display_debug_stats is enabled. 2143 * This routine provides a way to take a look at important statistics 2144 * maintained by the driver and hardware. 2145 * 2146 **********************************************************************/ 2147 static void 2148 ixv_print_debug_info(struct adapter *adapter) 2149 { 2150 device_t dev = adapter->dev; 2151 struct ixgbe_hw *hw = &adapter->hw; 2152 struct ix_queue *que = adapter->queues; 2153 struct rx_ring *rxr; 2154 struct tx_ring *txr; 2155 struct lro_ctrl *lro; 2156 2157 device_printf(dev,"Error Byte Count = %u \n", 2158 IXGBE_READ_REG(hw, IXGBE_ERRBC)); 2159 2160 for (int i = 0; i < adapter->num_queues; i++, que++) { 2161 txr = que->txr; 2162 rxr = que->rxr; 2163 lro = &rxr->lro; 2164 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n", 2165 que->msix, (long)que->irqs); 2166 device_printf(dev,"RX(%d) Packets Received: %lld\n", 2167 rxr->me, (long long)rxr->rx_packets); 2168 device_printf(dev,"RX(%d) Bytes Received: %lu\n", 2169 rxr->me, (long)rxr->rx_bytes); 2170 device_printf(dev,"RX(%d) LRO Queued= %lld\n", 2171 rxr->me, (long long)lro->lro_queued); 2172 device_printf(dev,"RX(%d) LRO Flushed= %lld\n", 2173 rxr->me, (long long)lro->lro_flushed); 2174 device_printf(dev,"TX(%d) Packets Sent: %lu\n", 2175 txr->me, (long)txr->total_packets); 2176 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", 2177 txr->me, (long)txr->no_desc_avail); 2178 } 2179 2180 device_printf(dev,"MBX IRQ Handled: %lu\n", 2181 (long)adapter->link_irq); 2182 return; 2183 } 2184 2185 static int 2186 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) 2187 { 2188 int error, result; 2189 struct adapter *adapter; 2190 2191 result = -1; 2192 error = sysctl_handle_int(oidp, &result, 0, req); 2193 2194 if (error || !req->newptr) 2195 return (error); 2196 2197 if (result == 1) { 2198 adapter = (struct adapter *) arg1; 2199 ixv_print_debug_info(adapter); 2200 } 2201 return error; 2202 } 2203 2204