1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #endif 40 41 #include "ixgbe.h" 42 43 /************************************************************************ 44 * Driver version 45 ************************************************************************/ 46 char ixv_driver_version[] = "1.5.13-k"; 47 48 /************************************************************************ 49 * PCI Device ID Table 50 * 51 * Used by probe to select devices to load on 52 * Last field stores an index into ixv_strings 53 * Last entry must be all 0s 54 * 55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 56 ************************************************************************/ 57 static ixgbe_vendor_info_t ixv_vendor_info_array[] = 58 { 59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, 64 /* required last entry */ 65 {0, 0, 0, 0, 0} 66 }; 67 68 /************************************************************************ 69 * Table of branding strings 70 ************************************************************************/ 71 static char *ixv_strings[] = { 72 "Intel(R) PRO/10GbE Virtual Function Network Driver" 73 }; 74 75 /************************************************************************ 76 * Function prototypes 77 ************************************************************************/ 78 static int ixv_probe(device_t); 79 static int ixv_attach(device_t); 80 static int ixv_detach(device_t); 81 static int ixv_shutdown(device_t); 82 static int ixv_ioctl(struct ifnet *, u_long, caddr_t); 83 static void ixv_init(void *); 84 static void ixv_init_locked(struct adapter *); 85 static void ixv_stop(void *); 86 static uint64_t ixv_get_counter(struct ifnet *, ift_counter); 87 static void ixv_init_device_features(struct adapter *); 88 static void ixv_media_status(struct ifnet *, struct ifmediareq *); 89 static int ixv_media_change(struct ifnet *); 90 static int ixv_allocate_pci_resources(struct adapter *); 91 static int ixv_allocate_msix(struct adapter *); 92 static int ixv_configure_interrupts(struct adapter *); 93 static void ixv_free_pci_resources(struct adapter *); 94 static void ixv_local_timer(void *); 95 static void ixv_setup_interface(device_t, struct adapter *); 96 static int ixv_negotiate_api(struct adapter *); 97 98 static void ixv_initialize_transmit_units(struct adapter *); 99 static void ixv_initialize_receive_units(struct adapter *); 100 static void ixv_initialize_rss_mapping(struct adapter *); 101 static void ixv_check_link(struct adapter *); 102 103 static void ixv_enable_intr(struct adapter *); 104 static void ixv_disable_intr(struct adapter *); 105 static void ixv_set_multi(struct adapter *); 106 static void ixv_update_link_status(struct adapter *); 107 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); 108 static void ixv_set_ivar(struct adapter *, u8, u8, s8); 109 static void ixv_configure_ivars(struct adapter *); 110 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 111 112 static void ixv_setup_vlan_support(struct adapter *); 113 static void ixv_register_vlan(void *, struct ifnet *, u16); 114 static void ixv_unregister_vlan(void *, struct ifnet *, u16); 115 116 static void ixv_save_stats(struct adapter *); 117 static void ixv_init_stats(struct adapter *); 118 static void ixv_update_stats(struct adapter *); 119 static void ixv_add_stats_sysctls(struct adapter *); 120 static void ixv_set_sysctl_value(struct adapter *, const char *, 121 const char *, int *, int); 122 123 /* The MSI-X Interrupt handlers */ 124 static void ixv_msix_que(void *); 125 static void ixv_msix_mbx(void *); 126 127 /* Deferred interrupt tasklets */ 128 static void ixv_handle_que(void *, int); 129 static void ixv_handle_link(void *, int); 130 131 /************************************************************************ 132 * FreeBSD Device Interface Entry Points 133 ************************************************************************/ 134 static device_method_t ixv_methods[] = { 135 /* Device interface */ 136 DEVMETHOD(device_probe, ixv_probe), 137 DEVMETHOD(device_attach, ixv_attach), 138 DEVMETHOD(device_detach, ixv_detach), 139 DEVMETHOD(device_shutdown, ixv_shutdown), 140 DEVMETHOD_END 141 }; 142 143 static driver_t ixv_driver = { 144 "ixv", ixv_methods, sizeof(struct adapter), 145 }; 146 147 devclass_t ixv_devclass; 148 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 149 MODULE_DEPEND(ixv, pci, 1, 1, 1); 150 MODULE_DEPEND(ixv, ether, 1, 1, 1); 151 MODULE_DEPEND(ixv, netmap, 1, 1, 1); 152 153 /* 154 * TUNEABLE PARAMETERS: 155 */ 156 157 /* Number of Queues - do not exceed MSI-X vectors - 1 */ 158 static int ixv_num_queues = 1; 159 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 160 161 /* 162 * AIM: Adaptive Interrupt Moderation 163 * which means that the interrupt rate 164 * is varied over time based on the 165 * traffic for that interrupt vector 166 */ 167 static int ixv_enable_aim = FALSE; 168 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 169 170 /* How many packets rxeof tries to clean at a time */ 171 static int ixv_rx_process_limit = 256; 172 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 173 174 /* How many packets txeof tries to clean at a time */ 175 static int ixv_tx_process_limit = 256; 176 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 177 178 /* Flow control setting, default to full */ 179 static int ixv_flow_control = ixgbe_fc_full; 180 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); 181 182 /* 183 * Header split: this causes the hardware to DMA 184 * the header into a separate mbuf from the payload, 185 * it can be a performance win in some workloads, but 186 * in others it actually hurts, its off by default. 187 */ 188 static int ixv_header_split = FALSE; 189 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); 190 191 /* 192 * Number of TX descriptors per ring, 193 * setting higher than RX as this seems 194 * the better performing choice. 195 */ 196 static int ixv_txd = DEFAULT_TXD; 197 TUNABLE_INT("hw.ixv.txd", &ixv_txd); 198 199 /* Number of RX descriptors per ring */ 200 static int ixv_rxd = DEFAULT_RXD; 201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 202 203 /* Legacy Transmit (single queue) */ 204 static int ixv_enable_legacy_tx = 0; 205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); 206 207 /* 208 * Shadow VFTA table, this is needed because 209 * the real filter table gets cleared during 210 * a soft reset and we need to repopulate it. 211 */ 212 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; 213 214 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); 215 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); 216 217 /************************************************************************ 218 * ixv_probe - Device identification routine 219 * 220 * Determines if the driver should be loaded on 221 * adapter based on its PCI vendor/device ID. 222 * 223 * return BUS_PROBE_DEFAULT on success, positive on failure 224 ************************************************************************/ 225 static int 226 ixv_probe(device_t dev) 227 { 228 ixgbe_vendor_info_t *ent; 229 u16 pci_vendor_id = 0; 230 u16 pci_device_id = 0; 231 u16 pci_subvendor_id = 0; 232 u16 pci_subdevice_id = 0; 233 char adapter_name[256]; 234 235 236 pci_vendor_id = pci_get_vendor(dev); 237 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 238 return (ENXIO); 239 240 pci_device_id = pci_get_device(dev); 241 pci_subvendor_id = pci_get_subvendor(dev); 242 pci_subdevice_id = pci_get_subdevice(dev); 243 244 ent = ixv_vendor_info_array; 245 while (ent->vendor_id != 0) { 246 if ((pci_vendor_id == ent->vendor_id) && 247 (pci_device_id == ent->device_id) && 248 ((pci_subvendor_id == ent->subvendor_id) || 249 (ent->subvendor_id == 0)) && 250 ((pci_subdevice_id == ent->subdevice_id) || 251 (ent->subdevice_id == 0))) { 252 sprintf(adapter_name, "%s, Version - %s", 253 ixv_strings[ent->index], ixv_driver_version); 254 device_set_desc_copy(dev, adapter_name); 255 return (BUS_PROBE_DEFAULT); 256 } 257 ent++; 258 } 259 260 return (ENXIO); 261 } /* ixv_probe */ 262 263 /************************************************************************ 264 * ixv_attach - Device initialization routine 265 * 266 * Called when the driver is being loaded. 267 * Identifies the type of hardware, allocates all resources 268 * and initializes the hardware. 269 * 270 * return 0 on success, positive on failure 271 ************************************************************************/ 272 static int 273 ixv_attach(device_t dev) 274 { 275 struct adapter *adapter; 276 struct ixgbe_hw *hw; 277 int error = 0; 278 279 INIT_DEBUGOUT("ixv_attach: begin"); 280 281 /* 282 * Make sure BUSMASTER is set, on a VM under 283 * KVM it may not be and will break things. 284 */ 285 pci_enable_busmaster(dev); 286 287 /* Allocate, clear, and link in our adapter structure */ 288 adapter = device_get_softc(dev); 289 adapter->dev = dev; 290 adapter->hw.back = adapter; 291 hw = &adapter->hw; 292 293 adapter->init_locked = ixv_init_locked; 294 adapter->stop_locked = ixv_stop; 295 296 /* Core Lock Init*/ 297 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 298 299 /* Do base PCI setup - map BAR0 */ 300 if (ixv_allocate_pci_resources(adapter)) { 301 device_printf(dev, "ixv_allocate_pci_resources() failed!\n"); 302 error = ENXIO; 303 goto err_out; 304 } 305 306 /* SYSCTL APIs */ 307 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 308 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 309 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I", 310 "Debug Info"); 311 312 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 313 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 314 "enable_aim", CTLFLAG_RW, &ixv_enable_aim, 1, 315 "Interrupt Moderation"); 316 317 /* Set up the timer callout */ 318 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 319 320 /* Save off the information about this board */ 321 hw->vendor_id = pci_get_vendor(dev); 322 hw->device_id = pci_get_device(dev); 323 hw->revision_id = pci_get_revid(dev); 324 hw->subsystem_vendor_id = pci_get_subvendor(dev); 325 hw->subsystem_device_id = pci_get_subdevice(dev); 326 327 /* A subset of set_mac_type */ 328 switch (hw->device_id) { 329 case IXGBE_DEV_ID_82599_VF: 330 hw->mac.type = ixgbe_mac_82599_vf; 331 break; 332 case IXGBE_DEV_ID_X540_VF: 333 hw->mac.type = ixgbe_mac_X540_vf; 334 break; 335 case IXGBE_DEV_ID_X550_VF: 336 hw->mac.type = ixgbe_mac_X550_vf; 337 break; 338 case IXGBE_DEV_ID_X550EM_X_VF: 339 hw->mac.type = ixgbe_mac_X550EM_x_vf; 340 break; 341 case IXGBE_DEV_ID_X550EM_A_VF: 342 hw->mac.type = ixgbe_mac_X550EM_a_vf; 343 break; 344 default: 345 /* Shouldn't get here since probe succeeded */ 346 device_printf(dev, "Unknown device ID!\n"); 347 error = ENXIO; 348 goto err_out; 349 break; 350 } 351 352 ixv_init_device_features(adapter); 353 354 /* Initialize the shared code */ 355 error = ixgbe_init_ops_vf(hw); 356 if (error) { 357 device_printf(dev, "ixgbe_init_ops_vf() failed!\n"); 358 error = EIO; 359 goto err_out; 360 } 361 362 /* Setup the mailbox */ 363 ixgbe_init_mbx_params_vf(hw); 364 365 /* Set the right number of segments */ 366 adapter->num_segs = IXGBE_82599_SCATTER; 367 368 error = hw->mac.ops.reset_hw(hw); 369 if (error == IXGBE_ERR_RESET_FAILED) 370 device_printf(dev, "...reset_hw() failure: Reset Failed!\n"); 371 else if (error) 372 device_printf(dev, "...reset_hw() failed with error %d\n", 373 error); 374 if (error) { 375 error = EIO; 376 goto err_out; 377 } 378 379 error = hw->mac.ops.init_hw(hw); 380 if (error) { 381 device_printf(dev, "...init_hw() failed with error %d\n", 382 error); 383 error = EIO; 384 goto err_out; 385 } 386 387 /* Negotiate mailbox API version */ 388 error = ixv_negotiate_api(adapter); 389 if (error) { 390 device_printf(dev, 391 "Mailbox API negotiation failed during attach!\n"); 392 goto err_out; 393 } 394 395 /* If no mac address was assigned, make a random one */ 396 if (!ixv_check_ether_addr(hw->mac.addr)) { 397 u8 addr[ETHER_ADDR_LEN]; 398 arc4rand(&addr, sizeof(addr), 0); 399 addr[0] &= 0xFE; 400 addr[0] |= 0x02; 401 bcopy(addr, hw->mac.addr, sizeof(addr)); 402 bcopy(addr, hw->mac.perm_addr, sizeof(addr)); 403 } 404 405 /* Register for VLAN events */ 406 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 407 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 408 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 409 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 410 411 /* Sysctls for limiting the amount of work done in the taskqueues */ 412 ixv_set_sysctl_value(adapter, "rx_processing_limit", 413 "max number of rx packets to process", 414 &adapter->rx_process_limit, ixv_rx_process_limit); 415 416 ixv_set_sysctl_value(adapter, "tx_processing_limit", 417 "max number of tx packets to process", 418 &adapter->tx_process_limit, ixv_tx_process_limit); 419 420 /* Do descriptor calc and sanity checks */ 421 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 422 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 423 device_printf(dev, "TXD config issue, using default!\n"); 424 adapter->num_tx_desc = DEFAULT_TXD; 425 } else 426 adapter->num_tx_desc = ixv_txd; 427 428 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 429 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 430 device_printf(dev, "RXD config issue, using default!\n"); 431 adapter->num_rx_desc = DEFAULT_RXD; 432 } else 433 adapter->num_rx_desc = ixv_rxd; 434 435 /* Setup MSI-X */ 436 error = ixv_configure_interrupts(adapter); 437 if (error) 438 goto err_out; 439 440 /* Allocate our TX/RX Queues */ 441 if (ixgbe_allocate_queues(adapter)) { 442 device_printf(dev, "ixgbe_allocate_queues() failed!\n"); 443 error = ENOMEM; 444 goto err_out; 445 } 446 447 /* Setup OS specific network interface */ 448 ixv_setup_interface(dev, adapter); 449 450 error = ixv_allocate_msix(adapter); 451 if (error) { 452 device_printf(dev, "ixv_allocate_msix() failed!\n"); 453 goto err_late; 454 } 455 456 /* Do the stats setup */ 457 ixv_save_stats(adapter); 458 ixv_init_stats(adapter); 459 ixv_add_stats_sysctls(adapter); 460 461 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 462 ixgbe_netmap_attach(adapter); 463 464 INIT_DEBUGOUT("ixv_attach: end"); 465 466 return (0); 467 468 err_late: 469 ixgbe_free_transmit_structures(adapter); 470 ixgbe_free_receive_structures(adapter); 471 free(adapter->queues, M_DEVBUF); 472 err_out: 473 ixv_free_pci_resources(adapter); 474 IXGBE_CORE_LOCK_DESTROY(adapter); 475 476 return (error); 477 } /* ixv_attach */ 478 479 /************************************************************************ 480 * ixv_detach - Device removal routine 481 * 482 * Called when the driver is being removed. 483 * Stops the adapter and deallocates all the resources 484 * that were allocated for driver operation. 485 * 486 * return 0 on success, positive on failure 487 ************************************************************************/ 488 static int 489 ixv_detach(device_t dev) 490 { 491 struct adapter *adapter = device_get_softc(dev); 492 struct ix_queue *que = adapter->queues; 493 494 INIT_DEBUGOUT("ixv_detach: begin"); 495 496 /* Make sure VLANS are not using driver */ 497 if (adapter->ifp->if_vlantrunk != NULL) { 498 device_printf(dev, "Vlan in use, detach first\n"); 499 return (EBUSY); 500 } 501 502 ether_ifdetach(adapter->ifp); 503 IXGBE_CORE_LOCK(adapter); 504 ixv_stop(adapter); 505 IXGBE_CORE_UNLOCK(adapter); 506 507 for (int i = 0; i < adapter->num_queues; i++, que++) { 508 if (que->tq) { 509 struct tx_ring *txr = que->txr; 510 taskqueue_drain(que->tq, &txr->txq_task); 511 taskqueue_drain(que->tq, &que->que_task); 512 taskqueue_free(que->tq); 513 } 514 } 515 516 /* Drain the Mailbox(link) queue */ 517 if (adapter->tq) { 518 taskqueue_drain(adapter->tq, &adapter->link_task); 519 taskqueue_free(adapter->tq); 520 } 521 522 /* Unregister VLAN events */ 523 if (adapter->vlan_attach != NULL) 524 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 525 if (adapter->vlan_detach != NULL) 526 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 527 528 callout_drain(&adapter->timer); 529 530 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 531 netmap_detach(adapter->ifp); 532 533 ixv_free_pci_resources(adapter); 534 bus_generic_detach(dev); 535 if_free(adapter->ifp); 536 537 ixgbe_free_transmit_structures(adapter); 538 ixgbe_free_receive_structures(adapter); 539 free(adapter->queues, M_DEVBUF); 540 541 IXGBE_CORE_LOCK_DESTROY(adapter); 542 543 return (0); 544 } /* ixv_detach */ 545 546 /************************************************************************ 547 * ixv_init_locked - Init entry point 548 * 549 * Used in two ways: It is used by the stack as an init entry 550 * point in network interface structure. It is also used 551 * by the driver as a hw/sw initialization routine to get 552 * to a consistent state. 553 * 554 * return 0 on success, positive on failure 555 ************************************************************************/ 556 void 557 ixv_init_locked(struct adapter *adapter) 558 { 559 struct ifnet *ifp = adapter->ifp; 560 device_t dev = adapter->dev; 561 struct ixgbe_hw *hw = &adapter->hw; 562 int error = 0; 563 564 INIT_DEBUGOUT("ixv_init_locked: begin"); 565 mtx_assert(&adapter->core_mtx, MA_OWNED); 566 hw->adapter_stopped = FALSE; 567 hw->mac.ops.stop_adapter(hw); 568 callout_stop(&adapter->timer); 569 570 /* reprogram the RAR[0] in case user changed it. */ 571 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 572 573 /* Get the latest mac address, User can use a LAA */ 574 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, 575 IXGBE_ETH_LENGTH_OF_ADDRESS); 576 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); 577 578 /* Prepare transmit descriptors and buffers */ 579 if (ixgbe_setup_transmit_structures(adapter)) { 580 device_printf(dev, "Could not setup transmit structures\n"); 581 ixv_stop(adapter); 582 return; 583 } 584 585 /* Reset VF and renegotiate mailbox API version */ 586 hw->mac.ops.reset_hw(hw); 587 error = ixv_negotiate_api(adapter); 588 if (error) { 589 device_printf(dev, 590 "Mailbox API negotiation failed in init_locked!\n"); 591 return; 592 } 593 594 ixv_initialize_transmit_units(adapter); 595 596 /* Setup Multicast table */ 597 ixv_set_multi(adapter); 598 599 /* 600 * Determine the correct mbuf pool 601 * for doing jumbo/headersplit 602 */ 603 if (ifp->if_mtu > ETHERMTU) 604 adapter->rx_mbuf_sz = MJUMPAGESIZE; 605 else 606 adapter->rx_mbuf_sz = MCLBYTES; 607 608 /* Prepare receive descriptors and buffers */ 609 if (ixgbe_setup_receive_structures(adapter)) { 610 device_printf(dev, "Could not setup receive structures\n"); 611 ixv_stop(adapter); 612 return; 613 } 614 615 /* Configure RX settings */ 616 ixv_initialize_receive_units(adapter); 617 618 /* Set the various hardware offload abilities */ 619 ifp->if_hwassist = 0; 620 if (ifp->if_capenable & IFCAP_TSO4) 621 ifp->if_hwassist |= CSUM_TSO; 622 if (ifp->if_capenable & IFCAP_TXCSUM) { 623 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 624 #if __FreeBSD_version >= 800000 625 ifp->if_hwassist |= CSUM_SCTP; 626 #endif 627 } 628 629 /* Set up VLAN offload and filter */ 630 ixv_setup_vlan_support(adapter); 631 632 /* Set up MSI-X routing */ 633 ixv_configure_ivars(adapter); 634 635 /* Set up auto-mask */ 636 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); 637 638 /* Set moderation on the Link interrupt */ 639 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); 640 641 /* Stats init */ 642 ixv_init_stats(adapter); 643 644 /* Config/Enable Link */ 645 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, 646 FALSE); 647 648 /* Start watchdog */ 649 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 650 651 /* And now turn on interrupts */ 652 ixv_enable_intr(adapter); 653 654 /* Now inform the stack we're ready */ 655 ifp->if_drv_flags |= IFF_DRV_RUNNING; 656 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 657 658 return; 659 } /* ixv_init_locked */ 660 661 /* 662 * MSI-X Interrupt Handlers and Tasklets 663 */ 664 665 static inline void 666 ixv_enable_queue(struct adapter *adapter, u32 vector) 667 { 668 struct ixgbe_hw *hw = &adapter->hw; 669 u32 queue = 1 << vector; 670 u32 mask; 671 672 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 673 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 674 } /* ixv_enable_queue */ 675 676 static inline void 677 ixv_disable_queue(struct adapter *adapter, u32 vector) 678 { 679 struct ixgbe_hw *hw = &adapter->hw; 680 u64 queue = (u64)(1 << vector); 681 u32 mask; 682 683 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 684 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 685 } /* ixv_disable_queue */ 686 687 static inline void 688 ixv_rearm_queues(struct adapter *adapter, u64 queues) 689 { 690 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 691 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 692 } /* ixv_rearm_queues */ 693 694 695 /************************************************************************ 696 * ixv_msix_que - MSI Queue Interrupt Service routine 697 ************************************************************************/ 698 void 699 ixv_msix_que(void *arg) 700 { 701 struct ix_queue *que = arg; 702 struct adapter *adapter = que->adapter; 703 struct ifnet *ifp = adapter->ifp; 704 struct tx_ring *txr = que->txr; 705 struct rx_ring *rxr = que->rxr; 706 bool more; 707 u32 newitr = 0; 708 709 ixv_disable_queue(adapter, que->msix); 710 ++que->irqs; 711 712 more = ixgbe_rxeof(que); 713 714 IXGBE_TX_LOCK(txr); 715 ixgbe_txeof(txr); 716 /* 717 * Make certain that if the stack 718 * has anything queued the task gets 719 * scheduled to handle it. 720 */ 721 if (!ixv_ring_empty(adapter->ifp, txr->br)) 722 ixv_start_locked(ifp, txr); 723 IXGBE_TX_UNLOCK(txr); 724 725 /* Do AIM now? */ 726 727 if (ixv_enable_aim == FALSE) 728 goto no_calc; 729 /* 730 * Do Adaptive Interrupt Moderation: 731 * - Write out last calculated setting 732 * - Calculate based on average size over 733 * the last interval. 734 */ 735 if (que->eitr_setting) 736 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), 737 que->eitr_setting); 738 739 que->eitr_setting = 0; 740 741 /* Idle, do nothing */ 742 if ((txr->bytes == 0) && (rxr->bytes == 0)) 743 goto no_calc; 744 745 if ((txr->bytes) && (txr->packets)) 746 newitr = txr->bytes/txr->packets; 747 if ((rxr->bytes) && (rxr->packets)) 748 newitr = max(newitr, (rxr->bytes / rxr->packets)); 749 newitr += 24; /* account for hardware frame, crc */ 750 751 /* set an upper boundary */ 752 newitr = min(newitr, 3000); 753 754 /* Be nice to the mid range */ 755 if ((newitr > 300) && (newitr < 1200)) 756 newitr = (newitr / 3); 757 else 758 newitr = (newitr / 2); 759 760 newitr |= newitr << 16; 761 762 /* save for next interrupt */ 763 que->eitr_setting = newitr; 764 765 /* Reset state */ 766 txr->bytes = 0; 767 txr->packets = 0; 768 rxr->bytes = 0; 769 rxr->packets = 0; 770 771 no_calc: 772 if (more) 773 taskqueue_enqueue(que->tq, &que->que_task); 774 else /* Re-enable this interrupt */ 775 ixv_enable_queue(adapter, que->msix); 776 777 return; 778 } /* ixv_msix_que */ 779 780 /************************************************************************ 781 * ixv_msix_mbx 782 ************************************************************************/ 783 static void 784 ixv_msix_mbx(void *arg) 785 { 786 struct adapter *adapter = arg; 787 struct ixgbe_hw *hw = &adapter->hw; 788 u32 reg; 789 790 ++adapter->link_irq; 791 792 /* First get the cause */ 793 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); 794 /* Clear interrupt with write */ 795 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); 796 797 /* Link status change */ 798 if (reg & IXGBE_EICR_LSC) 799 taskqueue_enqueue(adapter->tq, &adapter->link_task); 800 801 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); 802 803 return; 804 } /* ixv_msix_mbx */ 805 806 /************************************************************************ 807 * ixv_media_status - Media Ioctl callback 808 * 809 * Called whenever the user queries the status of 810 * the interface using ifconfig. 811 ************************************************************************/ 812 static void 813 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 814 { 815 struct adapter *adapter = ifp->if_softc; 816 817 INIT_DEBUGOUT("ixv_media_status: begin"); 818 IXGBE_CORE_LOCK(adapter); 819 ixv_update_link_status(adapter); 820 821 ifmr->ifm_status = IFM_AVALID; 822 ifmr->ifm_active = IFM_ETHER; 823 824 if (!adapter->link_active) { 825 IXGBE_CORE_UNLOCK(adapter); 826 return; 827 } 828 829 ifmr->ifm_status |= IFM_ACTIVE; 830 831 switch (adapter->link_speed) { 832 case IXGBE_LINK_SPEED_1GB_FULL: 833 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 834 break; 835 case IXGBE_LINK_SPEED_10GB_FULL: 836 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 837 break; 838 case IXGBE_LINK_SPEED_100_FULL: 839 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 840 break; 841 case IXGBE_LINK_SPEED_10_FULL: 842 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 843 break; 844 } 845 846 IXGBE_CORE_UNLOCK(adapter); 847 848 return; 849 } /* ixv_media_status */ 850 851 /************************************************************************ 852 * ixv_media_change - Media Ioctl callback 853 * 854 * Called when the user changes speed/duplex using 855 * media/mediopt option with ifconfig. 856 ************************************************************************/ 857 static int 858 ixv_media_change(struct ifnet *ifp) 859 { 860 struct adapter *adapter = ifp->if_softc; 861 struct ifmedia *ifm = &adapter->media; 862 863 INIT_DEBUGOUT("ixv_media_change: begin"); 864 865 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 866 return (EINVAL); 867 868 switch (IFM_SUBTYPE(ifm->ifm_media)) { 869 case IFM_AUTO: 870 break; 871 default: 872 device_printf(adapter->dev, "Only auto media type\n"); 873 return (EINVAL); 874 } 875 876 return (0); 877 } /* ixv_media_change */ 878 879 880 /************************************************************************ 881 * ixv_negotiate_api 882 * 883 * Negotiate the Mailbox API with the PF; 884 * start with the most featured API first. 885 ************************************************************************/ 886 static int 887 ixv_negotiate_api(struct adapter *adapter) 888 { 889 struct ixgbe_hw *hw = &adapter->hw; 890 int mbx_api[] = { ixgbe_mbox_api_11, 891 ixgbe_mbox_api_10, 892 ixgbe_mbox_api_unknown }; 893 int i = 0; 894 895 while (mbx_api[i] != ixgbe_mbox_api_unknown) { 896 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) 897 return (0); 898 i++; 899 } 900 901 return (EINVAL); 902 } /* ixv_negotiate_api */ 903 904 905 /************************************************************************ 906 * ixv_set_multi - Multicast Update 907 * 908 * Called whenever multicast address list is updated. 909 ************************************************************************/ 910 static void 911 ixv_set_multi(struct adapter *adapter) 912 { 913 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 914 u8 *update_ptr; 915 struct ifmultiaddr *ifma; 916 struct ifnet *ifp = adapter->ifp; 917 int mcnt = 0; 918 919 IOCTL_DEBUGOUT("ixv_set_multi: begin"); 920 921 #if __FreeBSD_version < 800000 922 IF_ADDR_LOCK(ifp); 923 #else 924 if_maddr_rlock(ifp); 925 #endif 926 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 927 if (ifma->ifma_addr->sa_family != AF_LINK) 928 continue; 929 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 930 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 931 IXGBE_ETH_LENGTH_OF_ADDRESS); 932 mcnt++; 933 } 934 #if __FreeBSD_version < 800000 935 IF_ADDR_UNLOCK(ifp); 936 #else 937 if_maddr_runlock(ifp); 938 #endif 939 940 update_ptr = mta; 941 942 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 943 ixv_mc_array_itr, TRUE); 944 945 return; 946 } /* ixv_set_multi */ 947 948 /************************************************************************ 949 * ixv_mc_array_itr 950 * 951 * An iterator function needed by the multicast shared code. 952 * It feeds the shared code routine the addresses in the 953 * array of ixv_set_multi() one by one. 954 ************************************************************************/ 955 static u8 * 956 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 957 { 958 u8 *addr = *update_ptr; 959 u8 *newptr; 960 *vmdq = 0; 961 962 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 963 *update_ptr = newptr; 964 965 return addr; 966 } /* ixv_mc_array_itr */ 967 968 /************************************************************************ 969 * ixv_local_timer - Timer routine 970 * 971 * Checks for link status, updates statistics, 972 * and runs the watchdog check. 973 ************************************************************************/ 974 static void 975 ixv_local_timer(void *arg) 976 { 977 struct adapter *adapter = arg; 978 device_t dev = adapter->dev; 979 struct ix_queue *que = adapter->queues; 980 u64 queues = 0; 981 int hung = 0; 982 983 mtx_assert(&adapter->core_mtx, MA_OWNED); 984 985 ixv_check_link(adapter); 986 987 /* Stats Update */ 988 ixv_update_stats(adapter); 989 990 /* 991 * Check the TX queues status 992 * - mark hung queues so we don't schedule on them 993 * - watchdog only if all queues show hung 994 */ 995 for (int i = 0; i < adapter->num_queues; i++, que++) { 996 /* Keep track of queues with work for soft irq */ 997 if (que->txr->busy) 998 queues |= ((u64)1 << que->me); 999 /* 1000 * Each time txeof runs without cleaning, but there 1001 * are uncleaned descriptors it increments busy. If 1002 * we get to the MAX we declare it hung. 1003 */ 1004 if (que->busy == IXGBE_QUEUE_HUNG) { 1005 ++hung; 1006 /* Mark the queue as inactive */ 1007 adapter->active_queues &= ~((u64)1 << que->me); 1008 continue; 1009 } else { 1010 /* Check if we've come back from hung */ 1011 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1012 adapter->active_queues |= ((u64)1 << que->me); 1013 } 1014 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1015 device_printf(dev, 1016 "Warning queue %d appears to be hung!\n", i); 1017 que->txr->busy = IXGBE_QUEUE_HUNG; 1018 ++hung; 1019 } 1020 1021 } 1022 1023 /* Only truly watchdog if all queues show hung */ 1024 if (hung == adapter->num_queues) 1025 goto watchdog; 1026 else if (queues != 0) { /* Force an IRQ on queues with work */ 1027 ixv_rearm_queues(adapter, queues); 1028 } 1029 1030 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1031 1032 return; 1033 1034 watchdog: 1035 1036 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1037 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1038 adapter->watchdog_events++; 1039 ixv_init_locked(adapter); 1040 } /* ixv_local_timer */ 1041 1042 /************************************************************************ 1043 * ixv_update_link_status - Update OS on link state 1044 * 1045 * Note: Only updates the OS on the cached link state. 1046 * The real check of the hardware only happens with 1047 * a link interrupt. 1048 ************************************************************************/ 1049 static void 1050 ixv_update_link_status(struct adapter *adapter) 1051 { 1052 struct ifnet *ifp = adapter->ifp; 1053 device_t dev = adapter->dev; 1054 1055 if (adapter->link_up) { 1056 if (adapter->link_active == FALSE) { 1057 if (bootverbose) 1058 device_printf(dev,"Link is up %d Gbps %s \n", 1059 ((adapter->link_speed == 128) ? 10 : 1), 1060 "Full Duplex"); 1061 adapter->link_active = TRUE; 1062 if_link_state_change(ifp, LINK_STATE_UP); 1063 } 1064 } else { /* Link down */ 1065 if (adapter->link_active == TRUE) { 1066 if (bootverbose) 1067 device_printf(dev,"Link is Down\n"); 1068 if_link_state_change(ifp, LINK_STATE_DOWN); 1069 adapter->link_active = FALSE; 1070 } 1071 } 1072 1073 return; 1074 } /* ixv_update_link_status */ 1075 1076 1077 /************************************************************************ 1078 * ixv_stop - Stop the hardware 1079 * 1080 * Disables all traffic on the adapter by issuing a 1081 * global reset on the MAC and deallocates TX/RX buffers. 1082 ************************************************************************/ 1083 static void 1084 ixv_stop(void *arg) 1085 { 1086 struct ifnet *ifp; 1087 struct adapter *adapter = arg; 1088 struct ixgbe_hw *hw = &adapter->hw; 1089 1090 ifp = adapter->ifp; 1091 1092 mtx_assert(&adapter->core_mtx, MA_OWNED); 1093 1094 INIT_DEBUGOUT("ixv_stop: begin\n"); 1095 ixv_disable_intr(adapter); 1096 1097 /* Tell the stack that the interface is no longer active */ 1098 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1099 1100 hw->mac.ops.reset_hw(hw); 1101 adapter->hw.adapter_stopped = FALSE; 1102 hw->mac.ops.stop_adapter(hw); 1103 callout_stop(&adapter->timer); 1104 1105 /* reprogram the RAR[0] in case user changed it. */ 1106 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1107 1108 return; 1109 } /* ixv_stop */ 1110 1111 1112 /************************************************************************ 1113 * ixv_allocate_pci_resources 1114 ************************************************************************/ 1115 static int 1116 ixv_allocate_pci_resources(struct adapter *adapter) 1117 { 1118 device_t dev = adapter->dev; 1119 int rid; 1120 1121 rid = PCIR_BAR(0); 1122 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1123 RF_ACTIVE); 1124 1125 if (!(adapter->pci_mem)) { 1126 device_printf(dev, "Unable to allocate bus resource: memory\n"); 1127 return (ENXIO); 1128 } 1129 1130 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); 1131 adapter->osdep.mem_bus_space_handle = 1132 rman_get_bushandle(adapter->pci_mem); 1133 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 1134 1135 /* Pick up the tuneable queues */ 1136 adapter->num_queues = ixv_num_queues; 1137 1138 return (0); 1139 } /* ixv_allocate_pci_resources */ 1140 1141 /************************************************************************ 1142 * ixv_free_pci_resources 1143 ************************************************************************/ 1144 static void 1145 ixv_free_pci_resources(struct adapter * adapter) 1146 { 1147 struct ix_queue *que = adapter->queues; 1148 device_t dev = adapter->dev; 1149 int rid, memrid; 1150 1151 memrid = PCIR_BAR(MSIX_82598_BAR); 1152 1153 /* 1154 * There is a slight possibility of a failure mode 1155 * in attach that will result in entering this function 1156 * before interrupt resources have been initialized, and 1157 * in that case we do not want to execute the loops below 1158 * We can detect this reliably by the state of the adapter 1159 * res pointer. 1160 */ 1161 if (adapter->res == NULL) 1162 goto mem; 1163 1164 /* 1165 * Release all msix queue resources: 1166 */ 1167 for (int i = 0; i < adapter->num_queues; i++, que++) { 1168 rid = que->msix + 1; 1169 if (que->tag != NULL) { 1170 bus_teardown_intr(dev, que->res, que->tag); 1171 que->tag = NULL; 1172 } 1173 if (que->res != NULL) 1174 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 1175 } 1176 1177 1178 /* Clean the Mailbox interrupt last */ 1179 rid = adapter->vector + 1; 1180 1181 if (adapter->tag != NULL) { 1182 bus_teardown_intr(dev, adapter->res, adapter->tag); 1183 adapter->tag = NULL; 1184 } 1185 if (adapter->res != NULL) 1186 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 1187 1188 mem: 1189 pci_release_msi(dev); 1190 1191 if (adapter->msix_mem != NULL) 1192 bus_release_resource(dev, SYS_RES_MEMORY, memrid, 1193 adapter->msix_mem); 1194 1195 if (adapter->pci_mem != NULL) 1196 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1197 adapter->pci_mem); 1198 1199 return; 1200 } /* ixv_free_pci_resources */ 1201 1202 /************************************************************************ 1203 * ixv_setup_interface 1204 * 1205 * Setup networking device structure and register an interface. 1206 ************************************************************************/ 1207 static void 1208 ixv_setup_interface(device_t dev, struct adapter *adapter) 1209 { 1210 struct ifnet *ifp; 1211 1212 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1213 1214 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1215 if (ifp == NULL) 1216 panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); 1217 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1218 ifp->if_baudrate = 1000000000; 1219 ifp->if_init = ixv_init; 1220 ifp->if_softc = adapter; 1221 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1222 ifp->if_ioctl = ixv_ioctl; 1223 if_setgetcounterfn(ifp, ixv_get_counter); 1224 /* TSO parameters */ 1225 ifp->if_hw_tsomax = 65518; 1226 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 1227 ifp->if_hw_tsomaxsegsize = 2048; 1228 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1229 ifp->if_start = ixgbe_legacy_start; 1230 ixv_start_locked = ixgbe_legacy_start_locked; 1231 ixv_ring_empty = ixgbe_legacy_ring_empty; 1232 } else { 1233 ifp->if_transmit = ixgbe_mq_start; 1234 ifp->if_qflush = ixgbe_qflush; 1235 ixv_start_locked = ixgbe_mq_start_locked; 1236 ixv_ring_empty = drbr_empty; 1237 } 1238 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1239 1240 ether_ifattach(ifp, adapter->hw.mac.addr); 1241 1242 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 1243 1244 /* 1245 * Tell the upper layer(s) we support long frames. 1246 */ 1247 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1248 1249 /* Set capability flags */ 1250 ifp->if_capabilities |= IFCAP_HWCSUM 1251 | IFCAP_HWCSUM_IPV6 1252 | IFCAP_TSO 1253 | IFCAP_LRO 1254 | IFCAP_VLAN_HWTAGGING 1255 | IFCAP_VLAN_HWTSO 1256 | IFCAP_VLAN_HWCSUM 1257 | IFCAP_JUMBO_MTU 1258 | IFCAP_VLAN_MTU; 1259 1260 /* Enable the above capabilities by default */ 1261 ifp->if_capenable = ifp->if_capabilities; 1262 1263 /* 1264 * Specify the media types supported by this adapter and register 1265 * callbacks to update media and link information 1266 */ 1267 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, 1268 ixv_media_status); 1269 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1270 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1271 1272 return; 1273 } /* ixv_setup_interface */ 1274 1275 1276 /************************************************************************ 1277 * ixv_initialize_transmit_units - Enable transmit unit. 1278 ************************************************************************/ 1279 static void 1280 ixv_initialize_transmit_units(struct adapter *adapter) 1281 { 1282 struct tx_ring *txr = adapter->tx_rings; 1283 struct ixgbe_hw *hw = &adapter->hw; 1284 1285 1286 for (int i = 0; i < adapter->num_queues; i++, txr++) { 1287 u64 tdba = txr->txdma.dma_paddr; 1288 u32 txctrl, txdctl; 1289 1290 /* Set WTHRESH to 8, burst writeback */ 1291 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1292 txdctl |= (8 << 16); 1293 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1294 1295 /* Set the HW Tx Head and Tail indices */ 1296 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0); 1297 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0); 1298 1299 /* Set Tx Tail register */ 1300 txr->tail = IXGBE_VFTDT(i); 1301 1302 /* Set Ring parameters */ 1303 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), 1304 (tdba & 0x00000000ffffffffULL)); 1305 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32)); 1306 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), 1307 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); 1308 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i)); 1309 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1310 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl); 1311 1312 /* Now enable */ 1313 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); 1314 txdctl |= IXGBE_TXDCTL_ENABLE; 1315 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); 1316 } 1317 1318 return; 1319 } /* ixv_initialize_transmit_units */ 1320 1321 1322 /************************************************************************ 1323 * ixv_initialize_rss_mapping 1324 ************************************************************************/ 1325 static void 1326 ixv_initialize_rss_mapping(struct adapter *adapter) 1327 { 1328 struct ixgbe_hw *hw = &adapter->hw; 1329 u32 reta = 0, mrqc, rss_key[10]; 1330 int queue_id; 1331 int i, j; 1332 u32 rss_hash_config; 1333 1334 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1335 /* Fetch the configured RSS key */ 1336 rss_getkey((uint8_t *)&rss_key); 1337 } else { 1338 /* set up random bits */ 1339 arc4rand(&rss_key, sizeof(rss_key), 0); 1340 } 1341 1342 /* Now fill out hash function seeds */ 1343 for (i = 0; i < 10; i++) 1344 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); 1345 1346 /* Set up the redirection table */ 1347 for (i = 0, j = 0; i < 64; i++, j++) { 1348 if (j == adapter->num_queues) 1349 j = 0; 1350 1351 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1352 /* 1353 * Fetch the RSS bucket id for the given indirection 1354 * entry. Cap it at the number of configured buckets 1355 * (which is num_queues.) 1356 */ 1357 queue_id = rss_get_indirection_to_bucket(i); 1358 queue_id = queue_id % adapter->num_queues; 1359 } else 1360 queue_id = j; 1361 1362 /* 1363 * The low 8 bits are for hash value (n+0); 1364 * The next 8 bits are for hash value (n+1), etc. 1365 */ 1366 reta >>= 8; 1367 reta |= ((uint32_t)queue_id) << 24; 1368 if ((i & 3) == 3) { 1369 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); 1370 reta = 0; 1371 } 1372 } 1373 1374 /* Perform hash on these packet types */ 1375 if (adapter->feat_en & IXGBE_FEATURE_RSS) 1376 rss_hash_config = rss_gethashconfig(); 1377 else { 1378 /* 1379 * Disable UDP - IP fragments aren't currently being handled 1380 * and so we end up with a mix of 2-tuple and 4-tuple 1381 * traffic. 1382 */ 1383 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 1384 | RSS_HASHTYPE_RSS_TCP_IPV4 1385 | RSS_HASHTYPE_RSS_IPV6 1386 | RSS_HASHTYPE_RSS_TCP_IPV6; 1387 } 1388 1389 mrqc = IXGBE_MRQC_RSSEN; 1390 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1391 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 1392 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1393 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 1394 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1395 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 1396 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1397 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 1398 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1399 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", 1400 __func__); 1401 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 1402 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", 1403 __func__); 1404 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1405 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 1406 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX) 1407 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n", 1408 __func__); 1409 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1410 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1411 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 1412 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", 1413 __func__); 1414 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); 1415 } /* ixv_initialize_rss_mapping */ 1416 1417 1418 /************************************************************************ 1419 * ixv_initialize_receive_units - Setup receive registers and features. 1420 ************************************************************************/ 1421 static void 1422 ixv_initialize_receive_units(struct adapter *adapter) 1423 { 1424 struct rx_ring *rxr = adapter->rx_rings; 1425 struct ixgbe_hw *hw = &adapter->hw; 1426 struct ifnet *ifp = adapter->ifp; 1427 u32 bufsz, rxcsum, psrtype; 1428 1429 if (ifp->if_mtu > ETHERMTU) 1430 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1431 else 1432 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1433 1434 psrtype = IXGBE_PSRTYPE_TCPHDR 1435 | IXGBE_PSRTYPE_UDPHDR 1436 | IXGBE_PSRTYPE_IPV4HDR 1437 | IXGBE_PSRTYPE_IPV6HDR 1438 | IXGBE_PSRTYPE_L2HDR; 1439 1440 if (adapter->num_queues > 1) 1441 psrtype |= 1 << 29; 1442 1443 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1444 1445 /* Tell PF our max_frame size */ 1446 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) { 1447 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); 1448 } 1449 1450 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1451 u64 rdba = rxr->rxdma.dma_paddr; 1452 u32 reg, rxdctl; 1453 1454 /* Disable the queue */ 1455 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1456 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1457 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1458 for (int j = 0; j < 10; j++) { 1459 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1460 IXGBE_RXDCTL_ENABLE) 1461 msec_delay(1); 1462 else 1463 break; 1464 } 1465 wmb(); 1466 /* Setup the Base and Length of the Rx Descriptor Ring */ 1467 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), 1468 (rdba & 0x00000000ffffffffULL)); 1469 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32)); 1470 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), 1471 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1472 1473 /* Reset the ring indices */ 1474 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1475 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1476 1477 /* Set up the SRRCTL register */ 1478 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); 1479 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1480 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1481 reg |= bufsz; 1482 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1483 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg); 1484 1485 /* Capture Rx Tail index */ 1486 rxr->tail = IXGBE_VFRDT(rxr->me); 1487 1488 /* Do the queue enabling last */ 1489 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1490 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); 1491 for (int k = 0; k < 10; k++) { 1492 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & 1493 IXGBE_RXDCTL_ENABLE) 1494 break; 1495 msec_delay(1); 1496 } 1497 wmb(); 1498 1499 /* Set the Tail Pointer */ 1500 /* 1501 * In netmap mode, we must preserve the buffers made 1502 * available to userspace before the if_init() 1503 * (this is true by default on the TX side, because 1504 * init makes all buffers available to userspace). 1505 * 1506 * netmap_reset() and the device specific routines 1507 * (e.g. ixgbe_setup_receive_rings()) map these 1508 * buffers at the end of the NIC ring, so here we 1509 * must set the RDT (tail) register to make sure 1510 * they are not overwritten. 1511 * 1512 * In this driver the NIC ring starts at RDH = 0, 1513 * RDT points to the last slot available for reception (?), 1514 * so RDT = num_rx_desc - 1 means the whole ring is available. 1515 */ 1516 #ifdef DEV_NETMAP 1517 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 1518 (ifp->if_capenable & IFCAP_NETMAP)) { 1519 struct netmap_adapter *na = NA(adapter->ifp); 1520 struct netmap_kring *kring = &na->rx_rings[i]; 1521 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1522 1523 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1524 } else 1525 #endif /* DEV_NETMAP */ 1526 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1527 adapter->num_rx_desc - 1); 1528 } 1529 1530 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1531 1532 ixv_initialize_rss_mapping(adapter); 1533 1534 if (adapter->num_queues > 1) { 1535 /* RSS and RX IPP Checksum are mutually exclusive */ 1536 rxcsum |= IXGBE_RXCSUM_PCSD; 1537 } 1538 1539 if (ifp->if_capenable & IFCAP_RXCSUM) 1540 rxcsum |= IXGBE_RXCSUM_PCSD; 1541 1542 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 1543 rxcsum |= IXGBE_RXCSUM_IPPCSE; 1544 1545 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1546 1547 return; 1548 } /* ixv_initialize_receive_units */ 1549 1550 /************************************************************************ 1551 * ixv_setup_vlan_support 1552 ************************************************************************/ 1553 static void 1554 ixv_setup_vlan_support(struct adapter *adapter) 1555 { 1556 struct ixgbe_hw *hw = &adapter->hw; 1557 u32 ctrl, vid, vfta, retry; 1558 1559 /* 1560 * We get here thru init_locked, meaning 1561 * a soft reset, this has already cleared 1562 * the VFTA and other state, so if there 1563 * have been no vlan's registered do nothing. 1564 */ 1565 if (adapter->num_vlans == 0) 1566 return; 1567 1568 /* Enable the queues */ 1569 for (int i = 0; i < adapter->num_queues; i++) { 1570 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1571 ctrl |= IXGBE_RXDCTL_VME; 1572 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); 1573 /* 1574 * Let Rx path know that it needs to store VLAN tag 1575 * as part of extra mbuf info. 1576 */ 1577 adapter->rx_rings[i].vtag_strip = TRUE; 1578 } 1579 1580 /* 1581 * A soft reset zero's out the VFTA, so 1582 * we need to repopulate it now. 1583 */ 1584 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 1585 if (ixv_shadow_vfta[i] == 0) 1586 continue; 1587 vfta = ixv_shadow_vfta[i]; 1588 /* 1589 * Reconstruct the vlan id's 1590 * based on the bits set in each 1591 * of the array ints. 1592 */ 1593 for (int j = 0; j < 32; j++) { 1594 retry = 0; 1595 if ((vfta & (1 << j)) == 0) 1596 continue; 1597 vid = (i * 32) + j; 1598 /* Call the shared code mailbox routine */ 1599 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) { 1600 if (++retry > 5) 1601 break; 1602 } 1603 } 1604 } 1605 } /* ixv_setup_vlan_support */ 1606 1607 /************************************************************************ 1608 * ixv_register_vlan 1609 * 1610 * Run via a vlan config EVENT, it enables us to use the 1611 * HW Filter table since we can get the vlan id. This just 1612 * creates the entry in the soft version of the VFTA, init 1613 * will repopulate the real table. 1614 ************************************************************************/ 1615 static void 1616 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1617 { 1618 struct adapter *adapter = ifp->if_softc; 1619 u16 index, bit; 1620 1621 if (ifp->if_softc != arg) /* Not our event */ 1622 return; 1623 1624 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1625 return; 1626 1627 IXGBE_CORE_LOCK(adapter); 1628 index = (vtag >> 5) & 0x7F; 1629 bit = vtag & 0x1F; 1630 ixv_shadow_vfta[index] |= (1 << bit); 1631 ++adapter->num_vlans; 1632 /* Re-init to load the changes */ 1633 ixv_init_locked(adapter); 1634 IXGBE_CORE_UNLOCK(adapter); 1635 } /* ixv_register_vlan */ 1636 1637 /************************************************************************ 1638 * ixv_unregister_vlan 1639 * 1640 * Run via a vlan unconfig EVENT, remove our entry 1641 * in the soft vfta. 1642 ************************************************************************/ 1643 static void 1644 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1645 { 1646 struct adapter *adapter = ifp->if_softc; 1647 u16 index, bit; 1648 1649 if (ifp->if_softc != arg) 1650 return; 1651 1652 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1653 return; 1654 1655 IXGBE_CORE_LOCK(adapter); 1656 index = (vtag >> 5) & 0x7F; 1657 bit = vtag & 0x1F; 1658 ixv_shadow_vfta[index] &= ~(1 << bit); 1659 --adapter->num_vlans; 1660 /* Re-init to load the changes */ 1661 ixv_init_locked(adapter); 1662 IXGBE_CORE_UNLOCK(adapter); 1663 } /* ixv_unregister_vlan */ 1664 1665 /************************************************************************ 1666 * ixv_enable_intr 1667 ************************************************************************/ 1668 static void 1669 ixv_enable_intr(struct adapter *adapter) 1670 { 1671 struct ixgbe_hw *hw = &adapter->hw; 1672 struct ix_queue *que = adapter->queues; 1673 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1674 1675 1676 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1677 1678 mask = IXGBE_EIMS_ENABLE_MASK; 1679 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1680 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 1681 1682 for (int i = 0; i < adapter->num_queues; i++, que++) 1683 ixv_enable_queue(adapter, que->msix); 1684 1685 IXGBE_WRITE_FLUSH(hw); 1686 1687 return; 1688 } /* ixv_enable_intr */ 1689 1690 /************************************************************************ 1691 * ixv_disable_intr 1692 ************************************************************************/ 1693 static void 1694 ixv_disable_intr(struct adapter *adapter) 1695 { 1696 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 1697 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); 1698 IXGBE_WRITE_FLUSH(&adapter->hw); 1699 1700 return; 1701 } /* ixv_disable_intr */ 1702 1703 /************************************************************************ 1704 * ixv_set_ivar 1705 * 1706 * Setup the correct IVAR register for a particular MSI-X interrupt 1707 * - entry is the register array entry 1708 * - vector is the MSI-X vector for this queue 1709 * - type is RX/TX/MISC 1710 ************************************************************************/ 1711 static void 1712 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 1713 { 1714 struct ixgbe_hw *hw = &adapter->hw; 1715 u32 ivar, index; 1716 1717 vector |= IXGBE_IVAR_ALLOC_VAL; 1718 1719 if (type == -1) { /* MISC IVAR */ 1720 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 1721 ivar &= ~0xFF; 1722 ivar |= vector; 1723 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 1724 } else { /* RX/TX IVARS */ 1725 index = (16 * (entry & 1)) + (8 * type); 1726 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 1727 ivar &= ~(0xFF << index); 1728 ivar |= (vector << index); 1729 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 1730 } 1731 } /* ixv_set_ivar */ 1732 1733 /************************************************************************ 1734 * ixv_configure_ivars 1735 ************************************************************************/ 1736 static void 1737 ixv_configure_ivars(struct adapter *adapter) 1738 { 1739 struct ix_queue *que = adapter->queues; 1740 1741 for (int i = 0; i < adapter->num_queues; i++, que++) { 1742 /* First the RX queue entry */ 1743 ixv_set_ivar(adapter, i, que->msix, 0); 1744 /* ... and the TX */ 1745 ixv_set_ivar(adapter, i, que->msix, 1); 1746 /* Set an initial value in EITR */ 1747 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), 1748 IXGBE_EITR_DEFAULT); 1749 } 1750 1751 /* For the mailbox interrupt */ 1752 ixv_set_ivar(adapter, 1, adapter->vector, -1); 1753 } /* ixv_configure_ivars */ 1754 1755 1756 /************************************************************************ 1757 * ixv_get_counter 1758 ************************************************************************/ 1759 static uint64_t 1760 ixv_get_counter(struct ifnet *ifp, ift_counter cnt) 1761 { 1762 struct adapter *adapter; 1763 1764 adapter = if_getsoftc(ifp); 1765 1766 switch (cnt) { 1767 case IFCOUNTER_IPACKETS: 1768 return (adapter->ipackets); 1769 case IFCOUNTER_OPACKETS: 1770 return (adapter->opackets); 1771 case IFCOUNTER_IBYTES: 1772 return (adapter->ibytes); 1773 case IFCOUNTER_OBYTES: 1774 return (adapter->obytes); 1775 case IFCOUNTER_IMCASTS: 1776 return (adapter->imcasts); 1777 default: 1778 return (if_get_counter_default(ifp, cnt)); 1779 } 1780 } /* ixv_get_counter */ 1781 1782 /************************************************************************ 1783 * ixv_save_stats 1784 * 1785 * The VF stats registers never have a truly virgin 1786 * starting point, so this routine tries to make an 1787 * artificial one, marking ground zero on attach as 1788 * it were. 1789 ************************************************************************/ 1790 static void 1791 ixv_save_stats(struct adapter *adapter) 1792 { 1793 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { 1794 adapter->stats.vf.saved_reset_vfgprc += 1795 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; 1796 adapter->stats.vf.saved_reset_vfgptc += 1797 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; 1798 adapter->stats.vf.saved_reset_vfgorc += 1799 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; 1800 adapter->stats.vf.saved_reset_vfgotc += 1801 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; 1802 adapter->stats.vf.saved_reset_vfmprc += 1803 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; 1804 } 1805 } /* ixv_save_stats */ 1806 1807 /************************************************************************ 1808 * ixv_init_stats 1809 ************************************************************************/ 1810 static void 1811 ixv_init_stats(struct adapter *adapter) 1812 { 1813 struct ixgbe_hw *hw = &adapter->hw; 1814 1815 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1816 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1817 adapter->stats.vf.last_vfgorc |= 1818 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1819 1820 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1821 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1822 adapter->stats.vf.last_vfgotc |= 1823 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1824 1825 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1826 1827 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 1828 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 1829 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 1830 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 1831 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 1832 } /* ixv_init_stats */ 1833 1834 #define UPDATE_STAT_32(reg, last, count) \ 1835 { \ 1836 u32 current = IXGBE_READ_REG(hw, reg); \ 1837 if (current < last) \ 1838 count += 0x100000000LL; \ 1839 last = current; \ 1840 count &= 0xFFFFFFFF00000000LL; \ 1841 count |= current; \ 1842 } 1843 1844 #define UPDATE_STAT_36(lsb, msb, last, count) \ 1845 { \ 1846 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ 1847 u64 cur_msb = IXGBE_READ_REG(hw, msb); \ 1848 u64 current = ((cur_msb << 32) | cur_lsb); \ 1849 if (current < last) \ 1850 count += 0x1000000000LL; \ 1851 last = current; \ 1852 count &= 0xFFFFFFF000000000LL; \ 1853 count |= current; \ 1854 } 1855 1856 /************************************************************************ 1857 * ixv_update_stats - Update the board statistics counters. 1858 ************************************************************************/ 1859 void 1860 ixv_update_stats(struct adapter *adapter) 1861 { 1862 struct ixgbe_hw *hw = &adapter->hw; 1863 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 1864 1865 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, 1866 adapter->stats.vf.vfgprc); 1867 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, 1868 adapter->stats.vf.vfgptc); 1869 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 1870 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); 1871 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 1872 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); 1873 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, 1874 adapter->stats.vf.vfmprc); 1875 1876 /* Fill out the OS statistics structure */ 1877 IXGBE_SET_IPACKETS(adapter, stats->vfgprc); 1878 IXGBE_SET_OPACKETS(adapter, stats->vfgptc); 1879 IXGBE_SET_IBYTES(adapter, stats->vfgorc); 1880 IXGBE_SET_OBYTES(adapter, stats->vfgotc); 1881 IXGBE_SET_IMCASTS(adapter, stats->vfmprc); 1882 } /* ixv_update_stats */ 1883 1884 /************************************************************************ 1885 * ixv_add_stats_sysctls - Add statistic sysctls for the VF. 1886 ************************************************************************/ 1887 static void 1888 ixv_add_stats_sysctls(struct adapter *adapter) 1889 { 1890 device_t dev = adapter->dev; 1891 struct tx_ring *txr = adapter->tx_rings; 1892 struct rx_ring *rxr = adapter->rx_rings; 1893 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1894 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1895 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1896 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 1897 struct sysctl_oid *stat_node, *queue_node; 1898 struct sysctl_oid_list *stat_list, *queue_list; 1899 1900 #define QUEUE_NAME_LEN 32 1901 char namebuf[QUEUE_NAME_LEN]; 1902 1903 /* Driver Statistics */ 1904 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1905 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); 1906 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 1907 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed"); 1908 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1909 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); 1910 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1911 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); 1912 1913 for (int i = 0; i < adapter->num_queues; i++, txr++) { 1914 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1915 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1916 CTLFLAG_RD, NULL, "Queue Name"); 1917 queue_list = SYSCTL_CHILDREN(queue_node); 1918 1919 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1920 CTLFLAG_RD, &(adapter->queues[i].irqs), "IRQs on queue"); 1921 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup", 1922 CTLFLAG_RD, &(txr->no_tx_dma_setup), 1923 "Driver Tx DMA failure in Tx"); 1924 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc", 1925 CTLFLAG_RD, &(txr->no_desc_avail), 1926 "Not-enough-descriptors count: TX"); 1927 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1928 CTLFLAG_RD, &(txr->total_packets), "TX Packets"); 1929 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops", 1930 CTLFLAG_RD, &(txr->br->br_drops), 1931 "Packets dropped in buf_ring"); 1932 } 1933 1934 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1935 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1936 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1937 CTLFLAG_RD, NULL, "Queue Name"); 1938 queue_list = SYSCTL_CHILDREN(queue_node); 1939 1940 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1941 CTLFLAG_RD, &(rxr->rx_packets), "RX packets"); 1942 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1943 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes"); 1944 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1945 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets"); 1946 } 1947 1948 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 1949 CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)"); 1950 stat_list = SYSCTL_CHILDREN(stat_node); 1951 1952 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1953 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received"); 1954 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1955 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received"); 1956 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1957 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received"); 1958 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1959 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted"); 1960 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1961 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted"); 1962 } /* ixv_add_stats_sysctls */ 1963 1964 /************************************************************************ 1965 * ixv_set_sysctl_value 1966 ************************************************************************/ 1967 static void 1968 ixv_set_sysctl_value(struct adapter *adapter, const char *name, 1969 const char *description, int *limit, int value) 1970 { 1971 *limit = value; 1972 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 1973 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 1974 OID_AUTO, name, CTLFLAG_RW, limit, value, description); 1975 } /* ixv_set_sysctl_value */ 1976 1977 /************************************************************************ 1978 * ixv_print_debug_info 1979 * 1980 * Called only when em_display_debug_stats is enabled. 1981 * Provides a way to take a look at important statistics 1982 * maintained by the driver and hardware. 1983 ************************************************************************/ 1984 static void 1985 ixv_print_debug_info(struct adapter *adapter) 1986 { 1987 device_t dev = adapter->dev; 1988 struct ixgbe_hw *hw = &adapter->hw; 1989 struct ix_queue *que = adapter->queues; 1990 struct rx_ring *rxr; 1991 struct tx_ring *txr; 1992 struct lro_ctrl *lro; 1993 1994 device_printf(dev, "Error Byte Count = %u \n", 1995 IXGBE_READ_REG(hw, IXGBE_ERRBC)); 1996 1997 for (int i = 0; i < adapter->num_queues; i++, que++) { 1998 txr = que->txr; 1999 rxr = que->rxr; 2000 lro = &rxr->lro; 2001 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n", 2002 que->msix, (long)que->irqs); 2003 device_printf(dev, "RX(%d) Packets Received: %lld\n", 2004 rxr->me, (long long)rxr->rx_packets); 2005 device_printf(dev, "RX(%d) Bytes Received: %lu\n", 2006 rxr->me, (long)rxr->rx_bytes); 2007 device_printf(dev, "RX(%d) LRO Queued= %lld\n", 2008 rxr->me, (long long)lro->lro_queued); 2009 device_printf(dev, "RX(%d) LRO Flushed= %lld\n", 2010 rxr->me, (long long)lro->lro_flushed); 2011 device_printf(dev, "TX(%d) Packets Sent: %lu\n", 2012 txr->me, (long)txr->total_packets); 2013 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n", 2014 txr->me, (long)txr->no_desc_avail); 2015 } 2016 2017 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq); 2018 } /* ixv_print_debug_info */ 2019 2020 /************************************************************************ 2021 * ixv_sysctl_debug 2022 ************************************************************************/ 2023 static int 2024 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) 2025 { 2026 struct adapter *adapter; 2027 int error, result; 2028 2029 result = -1; 2030 error = sysctl_handle_int(oidp, &result, 0, req); 2031 2032 if (error || !req->newptr) 2033 return (error); 2034 2035 if (result == 1) { 2036 adapter = (struct adapter *)arg1; 2037 ixv_print_debug_info(adapter); 2038 } 2039 2040 return error; 2041 } /* ixv_sysctl_debug */ 2042 2043 /************************************************************************ 2044 * ixv_init_device_features 2045 ************************************************************************/ 2046 static void 2047 ixv_init_device_features(struct adapter *adapter) 2048 { 2049 adapter->feat_cap = IXGBE_FEATURE_NETMAP 2050 | IXGBE_FEATURE_VF 2051 | IXGBE_FEATURE_RSS 2052 | IXGBE_FEATURE_LEGACY_TX; 2053 2054 /* A tad short on feature flags for VFs, atm. */ 2055 switch (adapter->hw.mac.type) { 2056 case ixgbe_mac_82599_vf: 2057 break; 2058 case ixgbe_mac_X540_vf: 2059 break; 2060 case ixgbe_mac_X550_vf: 2061 case ixgbe_mac_X550EM_x_vf: 2062 case ixgbe_mac_X550EM_a_vf: 2063 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; 2064 break; 2065 default: 2066 break; 2067 } 2068 2069 /* Enabled by default... */ 2070 /* Is a virtual function (VF) */ 2071 if (adapter->feat_cap & IXGBE_FEATURE_VF) 2072 adapter->feat_en |= IXGBE_FEATURE_VF; 2073 /* Netmap */ 2074 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 2075 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 2076 /* Receive-Side Scaling (RSS) */ 2077 if (adapter->feat_cap & IXGBE_FEATURE_RSS) 2078 adapter->feat_en |= IXGBE_FEATURE_RSS; 2079 /* Needs advanced context descriptor regardless of offloads req'd */ 2080 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) 2081 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; 2082 2083 /* Enabled via sysctl... */ 2084 /* Legacy (single queue) transmit */ 2085 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 2086 ixv_enable_legacy_tx) 2087 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; 2088 } /* ixv_init_device_features */ 2089 2090 /************************************************************************ 2091 * ixv_shutdown - Shutdown entry point 2092 ************************************************************************/ 2093 static int 2094 ixv_shutdown(device_t dev) 2095 { 2096 struct adapter *adapter = device_get_softc(dev); 2097 IXGBE_CORE_LOCK(adapter); 2098 ixv_stop(adapter); 2099 IXGBE_CORE_UNLOCK(adapter); 2100 2101 return (0); 2102 } /* ixv_shutdown */ 2103 2104 2105 /************************************************************************ 2106 * ixv_ioctl - Ioctl entry point 2107 * 2108 * Called when the user wants to configure the interface. 2109 * 2110 * return 0 on success, positive on failure 2111 ************************************************************************/ 2112 static int 2113 ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2114 { 2115 struct adapter *adapter = ifp->if_softc; 2116 struct ifreq *ifr = (struct ifreq *)data; 2117 #if defined(INET) || defined(INET6) 2118 struct ifaddr *ifa = (struct ifaddr *)data; 2119 bool avoid_reset = FALSE; 2120 #endif 2121 int error = 0; 2122 2123 switch (command) { 2124 2125 case SIOCSIFADDR: 2126 #ifdef INET 2127 if (ifa->ifa_addr->sa_family == AF_INET) 2128 avoid_reset = TRUE; 2129 #endif 2130 #ifdef INET6 2131 if (ifa->ifa_addr->sa_family == AF_INET6) 2132 avoid_reset = TRUE; 2133 #endif 2134 #if defined(INET) || defined(INET6) 2135 /* 2136 * Calling init results in link renegotiation, 2137 * so we avoid doing it when possible. 2138 */ 2139 if (avoid_reset) { 2140 ifp->if_flags |= IFF_UP; 2141 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 2142 ixv_init(adapter); 2143 if (!(ifp->if_flags & IFF_NOARP)) 2144 arp_ifinit(ifp, ifa); 2145 } else 2146 error = ether_ioctl(ifp, command, data); 2147 break; 2148 #endif 2149 case SIOCSIFMTU: 2150 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 2151 if (ifr->ifr_mtu > IXGBE_MAX_MTU) { 2152 error = EINVAL; 2153 } else { 2154 IXGBE_CORE_LOCK(adapter); 2155 ifp->if_mtu = ifr->ifr_mtu; 2156 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 2157 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2158 ixv_init_locked(adapter); 2159 IXGBE_CORE_UNLOCK(adapter); 2160 } 2161 break; 2162 case SIOCSIFFLAGS: 2163 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 2164 IXGBE_CORE_LOCK(adapter); 2165 if (ifp->if_flags & IFF_UP) { 2166 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2167 ixv_init_locked(adapter); 2168 } else 2169 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2170 ixv_stop(adapter); 2171 adapter->if_flags = ifp->if_flags; 2172 IXGBE_CORE_UNLOCK(adapter); 2173 break; 2174 case SIOCADDMULTI: 2175 case SIOCDELMULTI: 2176 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 2177 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2178 IXGBE_CORE_LOCK(adapter); 2179 ixv_disable_intr(adapter); 2180 ixv_set_multi(adapter); 2181 ixv_enable_intr(adapter); 2182 IXGBE_CORE_UNLOCK(adapter); 2183 } 2184 break; 2185 case SIOCSIFMEDIA: 2186 case SIOCGIFMEDIA: 2187 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 2188 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 2189 break; 2190 case SIOCSIFCAP: 2191 { 2192 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2193 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 2194 if (mask & IFCAP_HWCSUM) 2195 ifp->if_capenable ^= IFCAP_HWCSUM; 2196 if (mask & IFCAP_TSO4) 2197 ifp->if_capenable ^= IFCAP_TSO4; 2198 if (mask & IFCAP_LRO) 2199 ifp->if_capenable ^= IFCAP_LRO; 2200 if (mask & IFCAP_VLAN_HWTAGGING) 2201 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2202 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2203 IXGBE_CORE_LOCK(adapter); 2204 ixv_init_locked(adapter); 2205 IXGBE_CORE_UNLOCK(adapter); 2206 } 2207 VLAN_CAPABILITIES(ifp); 2208 break; 2209 } 2210 2211 default: 2212 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 2213 error = ether_ioctl(ifp, command, data); 2214 break; 2215 } 2216 2217 return (error); 2218 } /* ixv_ioctl */ 2219 2220 /************************************************************************ 2221 * ixv_init 2222 ************************************************************************/ 2223 static void 2224 ixv_init(void *arg) 2225 { 2226 struct adapter *adapter = arg; 2227 2228 IXGBE_CORE_LOCK(adapter); 2229 ixv_init_locked(adapter); 2230 IXGBE_CORE_UNLOCK(adapter); 2231 2232 return; 2233 } /* ixv_init */ 2234 2235 2236 /************************************************************************ 2237 * ixv_handle_que 2238 ************************************************************************/ 2239 static void 2240 ixv_handle_que(void *context, int pending) 2241 { 2242 struct ix_queue *que = context; 2243 struct adapter *adapter = que->adapter; 2244 struct tx_ring *txr = que->txr; 2245 struct ifnet *ifp = adapter->ifp; 2246 bool more; 2247 2248 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2249 more = ixgbe_rxeof(que); 2250 IXGBE_TX_LOCK(txr); 2251 ixgbe_txeof(txr); 2252 if (!ixv_ring_empty(ifp, txr->br)) 2253 ixv_start_locked(ifp, txr); 2254 IXGBE_TX_UNLOCK(txr); 2255 if (more) { 2256 taskqueue_enqueue(que->tq, &que->que_task); 2257 return; 2258 } 2259 } 2260 2261 /* Re-enable this interrupt */ 2262 ixv_enable_queue(adapter, que->msix); 2263 2264 return; 2265 } /* ixv_handle_que */ 2266 2267 /************************************************************************ 2268 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers 2269 ************************************************************************/ 2270 static int 2271 ixv_allocate_msix(struct adapter *adapter) 2272 { 2273 device_t dev = adapter->dev; 2274 struct ix_queue *que = adapter->queues; 2275 struct tx_ring *txr = adapter->tx_rings; 2276 int error, msix_ctrl, rid, vector = 0; 2277 2278 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 2279 rid = vector + 1; 2280 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2281 RF_SHAREABLE | RF_ACTIVE); 2282 if (que->res == NULL) { 2283 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n", 2284 vector); 2285 return (ENXIO); 2286 } 2287 /* Set the handler function */ 2288 error = bus_setup_intr(dev, que->res, 2289 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2290 ixv_msix_que, que, &que->tag); 2291 if (error) { 2292 que->res = NULL; 2293 device_printf(dev, "Failed to register QUE handler"); 2294 return (error); 2295 } 2296 #if __FreeBSD_version >= 800504 2297 bus_describe_intr(dev, que->res, que->tag, "que %d", i); 2298 #endif 2299 que->msix = vector; 2300 adapter->active_queues |= (u64)(1 << que->msix); 2301 /* 2302 * Bind the MSI-X vector, and thus the 2303 * ring to the corresponding CPU. 2304 */ 2305 if (adapter->num_queues > 1) 2306 bus_bind_intr(dev, que->res, i); 2307 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2308 TASK_INIT(&que->que_task, 0, ixv_handle_que, que); 2309 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, 2310 taskqueue_thread_enqueue, &que->tq); 2311 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", 2312 device_get_nameunit(adapter->dev)); 2313 } 2314 2315 /* and Mailbox */ 2316 rid = vector + 1; 2317 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2318 RF_SHAREABLE | RF_ACTIVE); 2319 if (!adapter->res) { 2320 device_printf(dev, 2321 "Unable to allocate bus resource: MBX interrupt [%d]\n", 2322 rid); 2323 return (ENXIO); 2324 } 2325 /* Set the mbx handler function */ 2326 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, 2327 NULL, ixv_msix_mbx, adapter, &adapter->tag); 2328 if (error) { 2329 adapter->res = NULL; 2330 device_printf(dev, "Failed to register LINK handler"); 2331 return (error); 2332 } 2333 #if __FreeBSD_version >= 800504 2334 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx"); 2335 #endif 2336 adapter->vector = vector; 2337 /* Tasklets for Mailbox */ 2338 TASK_INIT(&adapter->link_task, 0, ixv_handle_link, adapter); 2339 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, 2340 taskqueue_thread_enqueue, &adapter->tq); 2341 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", 2342 device_get_nameunit(adapter->dev)); 2343 /* 2344 * Due to a broken design QEMU will fail to properly 2345 * enable the guest for MSI-X unless the vectors in 2346 * the table are all set up, so we must rewrite the 2347 * ENABLE in the MSI-X control register again at this 2348 * point to cause it to successfully initialize us. 2349 */ 2350 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { 2351 pci_find_cap(dev, PCIY_MSIX, &rid); 2352 rid += PCIR_MSIX_CTRL; 2353 msix_ctrl = pci_read_config(dev, rid, 2); 2354 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 2355 pci_write_config(dev, rid, msix_ctrl, 2); 2356 } 2357 2358 return (0); 2359 } /* ixv_allocate_msix */ 2360 2361 /************************************************************************ 2362 * ixv_configure_interrupts - Setup MSI-X resources 2363 * 2364 * Note: The VF device MUST use MSI-X, there is no fallback. 2365 ************************************************************************/ 2366 static int 2367 ixv_configure_interrupts(struct adapter *adapter) 2368 { 2369 device_t dev = adapter->dev; 2370 int rid, want, msgs; 2371 2372 /* Must have at least 2 MSI-X vectors */ 2373 msgs = pci_msix_count(dev); 2374 if (msgs < 2) 2375 goto out; 2376 rid = PCIR_BAR(3); 2377 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2378 RF_ACTIVE); 2379 if (adapter->msix_mem == NULL) { 2380 device_printf(adapter->dev, "Unable to map MSI-X table \n"); 2381 goto out; 2382 } 2383 2384 /* 2385 * Want vectors for the queues, 2386 * plus an additional for mailbox. 2387 */ 2388 want = adapter->num_queues + 1; 2389 if (want > msgs) { 2390 want = msgs; 2391 adapter->num_queues = msgs - 1; 2392 } else 2393 msgs = want; 2394 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 2395 device_printf(adapter->dev, 2396 "Using MSI-X interrupts with %d vectors\n", want); 2397 /* reflect correct sysctl value */ 2398 ixv_num_queues = adapter->num_queues; 2399 2400 return (0); 2401 } 2402 /* Release in case alloc was insufficient */ 2403 pci_release_msi(dev); 2404 out: 2405 if (adapter->msix_mem != NULL) { 2406 bus_release_resource(dev, SYS_RES_MEMORY, rid, 2407 adapter->msix_mem); 2408 adapter->msix_mem = NULL; 2409 } 2410 device_printf(adapter->dev, "MSI-X config error\n"); 2411 2412 return (ENXIO); 2413 } /* ixv_configure_interrupts */ 2414 2415 2416 /************************************************************************ 2417 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts 2418 * 2419 * Done outside of interrupt context since the driver might sleep 2420 ************************************************************************/ 2421 static void 2422 ixv_handle_link(void *context, int pending) 2423 { 2424 struct adapter *adapter = context; 2425 2426 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed, 2427 &adapter->link_up, FALSE); 2428 ixv_update_link_status(adapter); 2429 } /* ixv_handle_link */ 2430 2431 /************************************************************************ 2432 * ixv_check_link - Used in the local timer to poll for link changes 2433 ************************************************************************/ 2434 static void 2435 ixv_check_link(struct adapter *adapter) 2436 { 2437 adapter->hw.mac.get_link_status = TRUE; 2438 2439 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed, 2440 &adapter->link_up, FALSE); 2441 ixv_update_link_status(adapter); 2442 } /* ixv_check_link */ 2443 2444