1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_rss.h" 40 #endif 41 42 #include "ixgbe.h" 43 44 #ifdef RSS 45 #include <net/rss_config.h> 46 #include <netinet/in_rss.h> 47 #endif 48 49 /********************************************************************* 50 * Set this to one to display debug statistics 51 *********************************************************************/ 52 int ixgbe_display_debug_stats = 0; 53 54 /********************************************************************* 55 * Driver version 56 *********************************************************************/ 57 char ixgbe_driver_version[] = "3.1.0"; 58 59 /********************************************************************* 60 * PCI Device ID Table 61 * 62 * Used by probe to select devices to load on 63 * Last field stores an index into ixgbe_strings 64 * Last entry must be all 0s 65 * 66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 67 *********************************************************************/ 68 69 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 70 { 71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 101 /* required last entry */ 102 {0, 0, 0, 0, 0} 103 }; 104 105 /********************************************************************* 106 * Table of branding strings 107 *********************************************************************/ 108 109 static char *ixgbe_strings[] = { 110 "Intel(R) PRO/10GbE PCI-Express Network Driver" 111 }; 112 113 /********************************************************************* 114 * Function prototypes 115 *********************************************************************/ 116 static int ixgbe_probe(device_t); 117 static int ixgbe_attach(device_t); 118 static int ixgbe_detach(device_t); 119 static int ixgbe_shutdown(device_t); 120 static int ixgbe_suspend(device_t); 121 static int ixgbe_resume(device_t); 122 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); 123 static void ixgbe_init(void *); 124 static void ixgbe_init_locked(struct adapter *); 125 static void ixgbe_stop(void *); 126 #if __FreeBSD_version >= 1100036 127 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter); 128 #endif 129 static void ixgbe_add_media_types(struct adapter *); 130 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 131 static int ixgbe_media_change(struct ifnet *); 132 static void ixgbe_identify_hardware(struct adapter *); 133 static int ixgbe_allocate_pci_resources(struct adapter *); 134 static void ixgbe_get_slot_info(struct ixgbe_hw *); 135 static int ixgbe_allocate_msix(struct adapter *); 136 static int ixgbe_allocate_legacy(struct adapter *); 137 static int ixgbe_setup_msix(struct adapter *); 138 static void ixgbe_free_pci_resources(struct adapter *); 139 static void ixgbe_local_timer(void *); 140 static int ixgbe_setup_interface(device_t, struct adapter *); 141 static void ixgbe_config_gpie(struct adapter *); 142 static void ixgbe_config_dmac(struct adapter *); 143 static void ixgbe_config_delay_values(struct adapter *); 144 static void ixgbe_config_link(struct adapter *); 145 static void ixgbe_check_eee_support(struct adapter *); 146 static void ixgbe_check_wol_support(struct adapter *); 147 static int ixgbe_setup_low_power_mode(struct adapter *); 148 static void ixgbe_rearm_queues(struct adapter *, u64); 149 150 static void ixgbe_initialize_transmit_units(struct adapter *); 151 static void ixgbe_initialize_receive_units(struct adapter *); 152 static void ixgbe_enable_rx_drop(struct adapter *); 153 static void ixgbe_disable_rx_drop(struct adapter *); 154 155 static void ixgbe_enable_intr(struct adapter *); 156 static void ixgbe_disable_intr(struct adapter *); 157 static void ixgbe_update_stats_counters(struct adapter *); 158 static void ixgbe_set_promisc(struct adapter *); 159 static void ixgbe_set_multi(struct adapter *); 160 static void ixgbe_update_link_status(struct adapter *); 161 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 162 static void ixgbe_configure_ivars(struct adapter *); 163 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 164 165 static void ixgbe_setup_vlan_hw_support(struct adapter *); 166 static void ixgbe_register_vlan(void *, struct ifnet *, u16); 167 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16); 168 169 static void ixgbe_add_device_sysctls(struct adapter *); 170 static void ixgbe_add_hw_stats(struct adapter *); 171 172 /* Sysctl handlers */ 173 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS); 174 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS); 175 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS); 176 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 177 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 178 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 179 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 180 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 181 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 182 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS); 183 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS); 184 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS); 185 186 /* Support for pluggable optic modules */ 187 static bool ixgbe_sfp_probe(struct adapter *); 188 static void ixgbe_setup_optics(struct adapter *); 189 190 /* Legacy (single vector interrupt handler */ 191 static void ixgbe_legacy_irq(void *); 192 193 /* The MSI/X Interrupt handlers */ 194 static void ixgbe_msix_que(void *); 195 static void ixgbe_msix_link(void *); 196 197 /* Deferred interrupt tasklets */ 198 static void ixgbe_handle_que(void *, int); 199 static void ixgbe_handle_link(void *, int); 200 static void ixgbe_handle_msf(void *, int); 201 static void ixgbe_handle_mod(void *, int); 202 static void ixgbe_handle_phy(void *, int); 203 204 #ifdef IXGBE_FDIR 205 static void ixgbe_reinit_fdir(void *, int); 206 #endif 207 208 #ifdef PCI_IOV 209 static void ixgbe_ping_all_vfs(struct adapter *); 210 static void ixgbe_handle_mbx(void *, int); 211 static int ixgbe_init_iov(device_t, u16, const nvlist_t *); 212 static void ixgbe_uninit_iov(device_t); 213 static int ixgbe_add_vf(device_t, u16, const nvlist_t *); 214 static void ixgbe_initialize_iov(struct adapter *); 215 static void ixgbe_recalculate_max_frame(struct adapter *); 216 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *); 217 #endif /* PCI_IOV */ 218 219 220 /********************************************************************* 221 * FreeBSD Device Interface Entry Points 222 *********************************************************************/ 223 224 static device_method_t ix_methods[] = { 225 /* Device interface */ 226 DEVMETHOD(device_probe, ixgbe_probe), 227 DEVMETHOD(device_attach, ixgbe_attach), 228 DEVMETHOD(device_detach, ixgbe_detach), 229 DEVMETHOD(device_shutdown, ixgbe_shutdown), 230 DEVMETHOD(device_suspend, ixgbe_suspend), 231 DEVMETHOD(device_resume, ixgbe_resume), 232 #ifdef PCI_IOV 233 DEVMETHOD(pci_iov_init, ixgbe_init_iov), 234 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov), 235 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf), 236 #endif /* PCI_IOV */ 237 DEVMETHOD_END 238 }; 239 240 static driver_t ix_driver = { 241 "ix", ix_methods, sizeof(struct adapter), 242 }; 243 244 devclass_t ix_devclass; 245 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 246 247 MODULE_DEPEND(ix, pci, 1, 1, 1); 248 MODULE_DEPEND(ix, ether, 1, 1, 1); 249 #ifdef DEV_NETMAP 250 MODULE_DEPEND(ix, netmap, 1, 1, 1); 251 #endif /* DEV_NETMAP */ 252 253 /* 254 ** TUNEABLE PARAMETERS: 255 */ 256 257 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, 258 "IXGBE driver parameters"); 259 260 /* 261 ** AIM: Adaptive Interrupt Moderation 262 ** which means that the interrupt rate 263 ** is varied over time based on the 264 ** traffic for that interrupt vector 265 */ 266 static int ixgbe_enable_aim = TRUE; 267 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 268 "Enable adaptive interrupt moderation"); 269 270 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 271 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 272 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 273 274 /* How many packets rxeof tries to clean at a time */ 275 static int ixgbe_rx_process_limit = 256; 276 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 277 &ixgbe_rx_process_limit, 0, 278 "Maximum number of received packets to process at a time," 279 "-1 means unlimited"); 280 281 /* How many packets txeof tries to clean at a time */ 282 static int ixgbe_tx_process_limit = 256; 283 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 284 &ixgbe_tx_process_limit, 0, 285 "Maximum number of sent packets to process at a time," 286 "-1 means unlimited"); 287 288 /* 289 ** Smart speed setting, default to on 290 ** this only works as a compile option 291 ** right now as its during attach, set 292 ** this to 'ixgbe_smart_speed_off' to 293 ** disable. 294 */ 295 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 296 297 /* 298 * MSIX should be the default for best performance, 299 * but this allows it to be forced off for testing. 300 */ 301 static int ixgbe_enable_msix = 1; 302 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 303 "Enable MSI-X interrupts"); 304 305 /* 306 * Number of Queues, can be set to 0, 307 * it then autoconfigures based on the 308 * number of cpus with a max of 8. This 309 * can be overriden manually here. 310 */ 311 static int ixgbe_num_queues = 0; 312 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 313 "Number of queues to configure, 0 indicates autoconfigure"); 314 315 /* 316 ** Number of TX descriptors per ring, 317 ** setting higher than RX as this seems 318 ** the better performing choice. 319 */ 320 static int ixgbe_txd = PERFORM_TXD; 321 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 322 "Number of transmit descriptors per queue"); 323 324 /* Number of RX descriptors per ring */ 325 static int ixgbe_rxd = PERFORM_RXD; 326 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 327 "Number of receive descriptors per queue"); 328 329 /* 330 ** Defining this on will allow the use 331 ** of unsupported SFP+ modules, note that 332 ** doing so you are on your own :) 333 */ 334 static int allow_unsupported_sfp = FALSE; 335 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 336 337 /* Keep running tab on them for sanity check */ 338 static int ixgbe_total_ports; 339 340 #ifdef IXGBE_FDIR 341 /* 342 ** Flow Director actually 'steals' 343 ** part of the packet buffer as its 344 ** filter pool, this variable controls 345 ** how much it uses: 346 ** 0 = 64K, 1 = 128K, 2 = 256K 347 */ 348 static int fdir_pballoc = 1; 349 #endif 350 351 #ifdef DEV_NETMAP 352 /* 353 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to 354 * be a reference on how to implement netmap support in a driver. 355 * Additional comments are in ixgbe_netmap.h . 356 * 357 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support 358 * that extend the standard driver. 359 */ 360 #include <dev/netmap/ixgbe_netmap.h> 361 #endif /* DEV_NETMAP */ 362 363 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 364 365 /********************************************************************* 366 * Device identification routine 367 * 368 * ixgbe_probe determines if the driver should be loaded on 369 * adapter based on PCI vendor/device id of the adapter. 370 * 371 * return BUS_PROBE_DEFAULT on success, positive on failure 372 *********************************************************************/ 373 374 static int 375 ixgbe_probe(device_t dev) 376 { 377 ixgbe_vendor_info_t *ent; 378 379 u16 pci_vendor_id = 0; 380 u16 pci_device_id = 0; 381 u16 pci_subvendor_id = 0; 382 u16 pci_subdevice_id = 0; 383 char adapter_name[256]; 384 385 INIT_DEBUGOUT("ixgbe_probe: begin"); 386 387 pci_vendor_id = pci_get_vendor(dev); 388 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 389 return (ENXIO); 390 391 pci_device_id = pci_get_device(dev); 392 pci_subvendor_id = pci_get_subvendor(dev); 393 pci_subdevice_id = pci_get_subdevice(dev); 394 395 ent = ixgbe_vendor_info_array; 396 while (ent->vendor_id != 0) { 397 if ((pci_vendor_id == ent->vendor_id) && 398 (pci_device_id == ent->device_id) && 399 400 ((pci_subvendor_id == ent->subvendor_id) || 401 (ent->subvendor_id == 0)) && 402 403 ((pci_subdevice_id == ent->subdevice_id) || 404 (ent->subdevice_id == 0))) { 405 sprintf(adapter_name, "%s, Version - %s", 406 ixgbe_strings[ent->index], 407 ixgbe_driver_version); 408 device_set_desc_copy(dev, adapter_name); 409 ++ixgbe_total_ports; 410 return (BUS_PROBE_DEFAULT); 411 } 412 ent++; 413 } 414 return (ENXIO); 415 } 416 417 /********************************************************************* 418 * Device initialization routine 419 * 420 * The attach entry point is called when the driver is being loaded. 421 * This routine identifies the type of hardware, allocates all resources 422 * and initializes the hardware. 423 * 424 * return 0 on success, positive on failure 425 *********************************************************************/ 426 427 static int 428 ixgbe_attach(device_t dev) 429 { 430 struct adapter *adapter; 431 struct ixgbe_hw *hw; 432 int error = 0; 433 u16 csum; 434 u32 ctrl_ext; 435 436 INIT_DEBUGOUT("ixgbe_attach: begin"); 437 438 /* Allocate, clear, and link in our adapter structure */ 439 adapter = device_get_softc(dev); 440 adapter->dev = adapter->osdep.dev = dev; 441 hw = &adapter->hw; 442 443 #ifdef DEV_NETMAP 444 adapter->init_locked = ixgbe_init_locked; 445 adapter->stop_locked = ixgbe_stop; 446 #endif 447 448 /* Core Lock Init*/ 449 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 450 451 /* Set up the timer callout */ 452 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 453 454 /* Determine hardware revision */ 455 ixgbe_identify_hardware(adapter); 456 457 /* Do base PCI setup - map BAR0 */ 458 if (ixgbe_allocate_pci_resources(adapter)) { 459 device_printf(dev, "Allocation of PCI resources failed\n"); 460 error = ENXIO; 461 goto err_out; 462 } 463 464 /* Do descriptor calc and sanity checks */ 465 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 466 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 467 device_printf(dev, "TXD config issue, using default!\n"); 468 adapter->num_tx_desc = DEFAULT_TXD; 469 } else 470 adapter->num_tx_desc = ixgbe_txd; 471 472 /* 473 ** With many RX rings it is easy to exceed the 474 ** system mbuf allocation. Tuning nmbclusters 475 ** can alleviate this. 476 */ 477 if (nmbclusters > 0) { 478 int s; 479 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports; 480 if (s > nmbclusters) { 481 device_printf(dev, "RX Descriptors exceed " 482 "system mbuf max, using default instead!\n"); 483 ixgbe_rxd = DEFAULT_RXD; 484 } 485 } 486 487 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 488 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 489 device_printf(dev, "RXD config issue, using default!\n"); 490 adapter->num_rx_desc = DEFAULT_RXD; 491 } else 492 adapter->num_rx_desc = ixgbe_rxd; 493 494 /* Allocate our TX/RX Queues */ 495 if (ixgbe_allocate_queues(adapter)) { 496 error = ENOMEM; 497 goto err_out; 498 } 499 500 /* Allocate multicast array memory. */ 501 adapter->mta = malloc(sizeof(*adapter->mta) * 502 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 503 if (adapter->mta == NULL) { 504 device_printf(dev, "Can not allocate multicast setup array\n"); 505 error = ENOMEM; 506 goto err_late; 507 } 508 509 /* Initialize the shared code */ 510 hw->allow_unsupported_sfp = allow_unsupported_sfp; 511 error = ixgbe_init_shared_code(hw); 512 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 513 /* 514 ** No optics in this port, set up 515 ** so the timer routine will probe 516 ** for later insertion. 517 */ 518 adapter->sfp_probe = TRUE; 519 error = 0; 520 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 521 device_printf(dev,"Unsupported SFP+ module detected!\n"); 522 error = EIO; 523 goto err_late; 524 } else if (error) { 525 device_printf(dev,"Unable to initialize the shared code\n"); 526 error = EIO; 527 goto err_late; 528 } 529 530 /* Make sure we have a good EEPROM before we read from it */ 531 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) { 532 device_printf(dev,"The EEPROM Checksum Is Not Valid\n"); 533 error = EIO; 534 goto err_late; 535 } 536 537 error = ixgbe_init_hw(hw); 538 switch (error) { 539 case IXGBE_ERR_EEPROM_VERSION: 540 device_printf(dev, "This device is a pre-production adapter/" 541 "LOM. Please be aware there may be issues associated " 542 "with your hardware.\n If you are experiencing problems " 543 "please contact your Intel or hardware representative " 544 "who provided you with this hardware.\n"); 545 break; 546 case IXGBE_ERR_SFP_NOT_SUPPORTED: 547 device_printf(dev,"Unsupported SFP+ Module\n"); 548 error = EIO; 549 goto err_late; 550 case IXGBE_ERR_SFP_NOT_PRESENT: 551 device_printf(dev,"No SFP+ Module found\n"); 552 /* falls thru */ 553 default: 554 break; 555 } 556 557 /* Detect and set physical type */ 558 ixgbe_setup_optics(adapter); 559 560 if ((adapter->msix > 1) && (ixgbe_enable_msix)) 561 error = ixgbe_allocate_msix(adapter); 562 else 563 error = ixgbe_allocate_legacy(adapter); 564 if (error) 565 goto err_late; 566 567 /* Setup OS specific network interface */ 568 if (ixgbe_setup_interface(dev, adapter) != 0) 569 goto err_late; 570 571 /* Initialize statistics */ 572 ixgbe_update_stats_counters(adapter); 573 574 /* Register for VLAN events */ 575 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 576 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 577 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 578 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 579 580 /* Check PCIE slot type/speed/width */ 581 ixgbe_get_slot_info(hw); 582 583 584 /* Set an initial default flow control value */ 585 adapter->fc = ixgbe_fc_full; 586 587 #ifdef PCI_IOV 588 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) { 589 nvlist_t *pf_schema, *vf_schema; 590 591 hw->mbx.ops.init_params(hw); 592 pf_schema = pci_iov_schema_alloc_node(); 593 vf_schema = pci_iov_schema_alloc_node(); 594 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 595 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", 596 IOV_SCHEMA_HASDEFAULT, TRUE); 597 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 598 IOV_SCHEMA_HASDEFAULT, FALSE); 599 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 600 IOV_SCHEMA_HASDEFAULT, FALSE); 601 error = pci_iov_attach(dev, pf_schema, vf_schema); 602 if (error != 0) { 603 device_printf(dev, 604 "Error %d setting up SR-IOV\n", error); 605 } 606 } 607 #endif /* PCI_IOV */ 608 609 /* Check for certain supported features */ 610 ixgbe_check_wol_support(adapter); 611 ixgbe_check_eee_support(adapter); 612 613 /* Add sysctls */ 614 ixgbe_add_device_sysctls(adapter); 615 ixgbe_add_hw_stats(adapter); 616 617 /* let hardware know driver is loaded */ 618 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 619 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 620 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 621 622 #ifdef DEV_NETMAP 623 ixgbe_netmap_attach(adapter); 624 #endif /* DEV_NETMAP */ 625 INIT_DEBUGOUT("ixgbe_attach: end"); 626 return (0); 627 628 err_late: 629 ixgbe_free_transmit_structures(adapter); 630 ixgbe_free_receive_structures(adapter); 631 err_out: 632 if (adapter->ifp != NULL) 633 if_free(adapter->ifp); 634 ixgbe_free_pci_resources(adapter); 635 free(adapter->mta, M_DEVBUF); 636 return (error); 637 } 638 639 /********************************************************************* 640 * Device removal routine 641 * 642 * The detach entry point is called when the driver is being removed. 643 * This routine stops the adapter and deallocates all the resources 644 * that were allocated for driver operation. 645 * 646 * return 0 on success, positive on failure 647 *********************************************************************/ 648 649 static int 650 ixgbe_detach(device_t dev) 651 { 652 struct adapter *adapter = device_get_softc(dev); 653 struct ix_queue *que = adapter->queues; 654 struct tx_ring *txr = adapter->tx_rings; 655 u32 ctrl_ext; 656 657 INIT_DEBUGOUT("ixgbe_detach: begin"); 658 659 /* Make sure VLANS are not using driver */ 660 if (adapter->ifp->if_vlantrunk != NULL) { 661 device_printf(dev,"Vlan in use, detach first\n"); 662 return (EBUSY); 663 } 664 665 #ifdef PCI_IOV 666 if (pci_iov_detach(dev) != 0) { 667 device_printf(dev, "SR-IOV in use; detach first.\n"); 668 return (EBUSY); 669 } 670 #endif /* PCI_IOV */ 671 672 /* Stop the adapter */ 673 IXGBE_CORE_LOCK(adapter); 674 ixgbe_setup_low_power_mode(adapter); 675 IXGBE_CORE_UNLOCK(adapter); 676 677 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 678 if (que->tq) { 679 #ifndef IXGBE_LEGACY_TX 680 taskqueue_drain(que->tq, &txr->txq_task); 681 #endif 682 taskqueue_drain(que->tq, &que->que_task); 683 taskqueue_free(que->tq); 684 } 685 } 686 687 /* Drain the Link queue */ 688 if (adapter->tq) { 689 taskqueue_drain(adapter->tq, &adapter->link_task); 690 taskqueue_drain(adapter->tq, &adapter->mod_task); 691 taskqueue_drain(adapter->tq, &adapter->msf_task); 692 #ifdef PCI_IOV 693 taskqueue_drain(adapter->tq, &adapter->mbx_task); 694 #endif 695 taskqueue_drain(adapter->tq, &adapter->phy_task); 696 #ifdef IXGBE_FDIR 697 taskqueue_drain(adapter->tq, &adapter->fdir_task); 698 #endif 699 taskqueue_free(adapter->tq); 700 } 701 702 /* let hardware know driver is unloading */ 703 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 704 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 705 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 706 707 /* Unregister VLAN events */ 708 if (adapter->vlan_attach != NULL) 709 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 710 if (adapter->vlan_detach != NULL) 711 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 712 713 ether_ifdetach(adapter->ifp); 714 callout_drain(&adapter->timer); 715 #ifdef DEV_NETMAP 716 netmap_detach(adapter->ifp); 717 #endif /* DEV_NETMAP */ 718 ixgbe_free_pci_resources(adapter); 719 bus_generic_detach(dev); 720 if_free(adapter->ifp); 721 722 ixgbe_free_transmit_structures(adapter); 723 ixgbe_free_receive_structures(adapter); 724 free(adapter->mta, M_DEVBUF); 725 726 IXGBE_CORE_LOCK_DESTROY(adapter); 727 return (0); 728 } 729 730 /********************************************************************* 731 * 732 * Shutdown entry point 733 * 734 **********************************************************************/ 735 736 static int 737 ixgbe_shutdown(device_t dev) 738 { 739 struct adapter *adapter = device_get_softc(dev); 740 int error = 0; 741 742 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 743 744 IXGBE_CORE_LOCK(adapter); 745 error = ixgbe_setup_low_power_mode(adapter); 746 IXGBE_CORE_UNLOCK(adapter); 747 748 return (error); 749 } 750 751 /** 752 * Methods for going from: 753 * D0 -> D3: ixgbe_suspend 754 * D3 -> D0: ixgbe_resume 755 */ 756 static int 757 ixgbe_suspend(device_t dev) 758 { 759 struct adapter *adapter = device_get_softc(dev); 760 int error = 0; 761 762 INIT_DEBUGOUT("ixgbe_suspend: begin"); 763 764 IXGBE_CORE_LOCK(adapter); 765 766 error = ixgbe_setup_low_power_mode(adapter); 767 768 /* Save state and power down */ 769 pci_save_state(dev); 770 pci_set_powerstate(dev, PCI_POWERSTATE_D3); 771 772 IXGBE_CORE_UNLOCK(adapter); 773 774 return (error); 775 } 776 777 static int 778 ixgbe_resume(device_t dev) 779 { 780 struct adapter *adapter = device_get_softc(dev); 781 struct ifnet *ifp = adapter->ifp; 782 struct ixgbe_hw *hw = &adapter->hw; 783 u32 wus; 784 785 INIT_DEBUGOUT("ixgbe_resume: begin"); 786 787 IXGBE_CORE_LOCK(adapter); 788 789 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 790 pci_restore_state(dev); 791 792 /* Read & clear WUS register */ 793 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 794 if (wus) 795 device_printf(dev, "Woken up by (WUS): %#010x\n", 796 IXGBE_READ_REG(hw, IXGBE_WUS)); 797 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 798 /* And clear WUFC until next low-power transition */ 799 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 800 801 /* 802 * Required after D3->D0 transition; 803 * will re-advertise all previous advertised speeds 804 */ 805 if (ifp->if_flags & IFF_UP) 806 ixgbe_init_locked(adapter); 807 808 IXGBE_CORE_UNLOCK(adapter); 809 810 INIT_DEBUGOUT("ixgbe_resume: end"); 811 return (0); 812 } 813 814 815 /********************************************************************* 816 * Ioctl entry point 817 * 818 * ixgbe_ioctl is called when the user wants to configure the 819 * interface. 820 * 821 * return 0 on success, positive on failure 822 **********************************************************************/ 823 824 static int 825 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 826 { 827 struct adapter *adapter = ifp->if_softc; 828 struct ifreq *ifr = (struct ifreq *) data; 829 #if defined(INET) || defined(INET6) 830 struct ifaddr *ifa = (struct ifaddr *)data; 831 bool avoid_reset = FALSE; 832 #endif 833 int error = 0; 834 835 switch (command) { 836 837 case SIOCSIFADDR: 838 #ifdef INET 839 if (ifa->ifa_addr->sa_family == AF_INET) 840 avoid_reset = TRUE; 841 #endif 842 #ifdef INET6 843 if (ifa->ifa_addr->sa_family == AF_INET6) 844 avoid_reset = TRUE; 845 #endif 846 #if defined(INET) || defined(INET6) 847 /* 848 ** Calling init results in link renegotiation, 849 ** so we avoid doing it when possible. 850 */ 851 if (avoid_reset) { 852 ifp->if_flags |= IFF_UP; 853 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 854 ixgbe_init(adapter); 855 if (!(ifp->if_flags & IFF_NOARP)) 856 arp_ifinit(ifp, ifa); 857 } else 858 error = ether_ioctl(ifp, command, data); 859 #endif 860 break; 861 case SIOCSIFMTU: 862 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 863 if (ifr->ifr_mtu > IXGBE_MAX_MTU) { 864 error = EINVAL; 865 } else { 866 IXGBE_CORE_LOCK(adapter); 867 ifp->if_mtu = ifr->ifr_mtu; 868 adapter->max_frame_size = 869 ifp->if_mtu + IXGBE_MTU_HDR; 870 ixgbe_init_locked(adapter); 871 #ifdef PCI_IOV 872 ixgbe_recalculate_max_frame(adapter); 873 #endif 874 IXGBE_CORE_UNLOCK(adapter); 875 } 876 break; 877 case SIOCSIFFLAGS: 878 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 879 IXGBE_CORE_LOCK(adapter); 880 if (ifp->if_flags & IFF_UP) { 881 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 882 if ((ifp->if_flags ^ adapter->if_flags) & 883 (IFF_PROMISC | IFF_ALLMULTI)) { 884 ixgbe_set_promisc(adapter); 885 } 886 } else 887 ixgbe_init_locked(adapter); 888 } else 889 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 890 ixgbe_stop(adapter); 891 adapter->if_flags = ifp->if_flags; 892 IXGBE_CORE_UNLOCK(adapter); 893 break; 894 case SIOCADDMULTI: 895 case SIOCDELMULTI: 896 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 897 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 898 IXGBE_CORE_LOCK(adapter); 899 ixgbe_disable_intr(adapter); 900 ixgbe_set_multi(adapter); 901 ixgbe_enable_intr(adapter); 902 IXGBE_CORE_UNLOCK(adapter); 903 } 904 break; 905 case SIOCSIFMEDIA: 906 case SIOCGIFMEDIA: 907 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 908 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 909 break; 910 case SIOCSIFCAP: 911 { 912 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 913 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 914 if (mask & IFCAP_HWCSUM) 915 ifp->if_capenable ^= IFCAP_HWCSUM; 916 if (mask & IFCAP_TSO4) 917 ifp->if_capenable ^= IFCAP_TSO4; 918 if (mask & IFCAP_TSO6) 919 ifp->if_capenable ^= IFCAP_TSO6; 920 if (mask & IFCAP_LRO) 921 ifp->if_capenable ^= IFCAP_LRO; 922 if (mask & IFCAP_VLAN_HWTAGGING) 923 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 924 if (mask & IFCAP_VLAN_HWFILTER) 925 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 926 if (mask & IFCAP_VLAN_HWTSO) 927 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 928 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 929 IXGBE_CORE_LOCK(adapter); 930 ixgbe_init_locked(adapter); 931 IXGBE_CORE_UNLOCK(adapter); 932 } 933 VLAN_CAPABILITIES(ifp); 934 break; 935 } 936 #if __FreeBSD_version >= 1100036 937 case SIOCGI2C: 938 { 939 struct ixgbe_hw *hw = &adapter->hw; 940 struct ifi2creq i2c; 941 int i; 942 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 943 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 944 if (error != 0) 945 break; 946 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 947 error = EINVAL; 948 break; 949 } 950 if (i2c.len > sizeof(i2c.data)) { 951 error = EINVAL; 952 break; 953 } 954 955 for (i = 0; i < i2c.len; i++) 956 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i, 957 i2c.dev_addr, &i2c.data[i]); 958 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 959 break; 960 } 961 #endif 962 default: 963 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 964 error = ether_ioctl(ifp, command, data); 965 break; 966 } 967 968 return (error); 969 } 970 971 /********************************************************************* 972 * Init entry point 973 * 974 * This routine is used in two ways. It is used by the stack as 975 * init entry point in network interface structure. It is also used 976 * by the driver as a hw/sw initialization routine to get to a 977 * consistent state. 978 * 979 * return 0 on success, positive on failure 980 **********************************************************************/ 981 #define IXGBE_MHADD_MFS_SHIFT 16 982 983 static void 984 ixgbe_init_locked(struct adapter *adapter) 985 { 986 struct ifnet *ifp = adapter->ifp; 987 device_t dev = adapter->dev; 988 struct ixgbe_hw *hw = &adapter->hw; 989 struct tx_ring *txr; 990 struct rx_ring *rxr; 991 u32 txdctl, mhadd; 992 u32 rxdctl, rxctrl; 993 #ifdef PCI_IOV 994 enum ixgbe_iov_mode mode; 995 #endif 996 997 mtx_assert(&adapter->core_mtx, MA_OWNED); 998 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 999 1000 hw->adapter_stopped = FALSE; 1001 ixgbe_stop_adapter(hw); 1002 callout_stop(&adapter->timer); 1003 1004 #ifdef PCI_IOV 1005 mode = ixgbe_get_iov_mode(adapter); 1006 adapter->pool = ixgbe_max_vfs(mode); 1007 /* Queue indices may change with IOV mode */ 1008 for (int i = 0; i < adapter->num_queues; i++) { 1009 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i); 1010 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i); 1011 } 1012 #endif 1013 /* reprogram the RAR[0] in case user changed it. */ 1014 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 1015 1016 /* Get the latest mac address, User can use a LAA */ 1017 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 1018 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 1019 hw->addr_ctrl.rar_used_count = 1; 1020 1021 /* Set the various hardware offload abilities */ 1022 ifp->if_hwassist = 0; 1023 if (ifp->if_capenable & IFCAP_TSO) 1024 ifp->if_hwassist |= CSUM_TSO; 1025 if (ifp->if_capenable & IFCAP_TXCSUM) { 1026 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 1027 #if __FreeBSD_version >= 800000 1028 if (hw->mac.type != ixgbe_mac_82598EB) 1029 ifp->if_hwassist |= CSUM_SCTP; 1030 #endif 1031 } 1032 1033 /* Prepare transmit descriptors and buffers */ 1034 if (ixgbe_setup_transmit_structures(adapter)) { 1035 device_printf(dev, "Could not setup transmit structures\n"); 1036 ixgbe_stop(adapter); 1037 return; 1038 } 1039 1040 ixgbe_init_hw(hw); 1041 #ifdef PCI_IOV 1042 ixgbe_initialize_iov(adapter); 1043 #endif 1044 ixgbe_initialize_transmit_units(adapter); 1045 1046 /* Setup Multicast table */ 1047 ixgbe_set_multi(adapter); 1048 1049 /* 1050 ** Determine the correct mbuf pool 1051 ** for doing jumbo frames 1052 */ 1053 if (adapter->max_frame_size <= MCLBYTES) 1054 adapter->rx_mbuf_sz = MCLBYTES; 1055 else 1056 adapter->rx_mbuf_sz = MJUMPAGESIZE; 1057 1058 /* Prepare receive descriptors and buffers */ 1059 if (ixgbe_setup_receive_structures(adapter)) { 1060 device_printf(dev, "Could not setup receive structures\n"); 1061 ixgbe_stop(adapter); 1062 return; 1063 } 1064 1065 /* Configure RX settings */ 1066 ixgbe_initialize_receive_units(adapter); 1067 1068 /* Enable SDP & MSIX interrupts based on adapter */ 1069 ixgbe_config_gpie(adapter); 1070 1071 /* Set MTU size */ 1072 if (ifp->if_mtu > ETHERMTU) { 1073 /* aka IXGBE_MAXFRS on 82599 and newer */ 1074 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 1075 mhadd &= ~IXGBE_MHADD_MFS_MASK; 1076 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 1077 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 1078 } 1079 1080 /* Now enable all the queues */ 1081 for (int i = 0; i < adapter->num_queues; i++) { 1082 txr = &adapter->tx_rings[i]; 1083 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 1084 txdctl |= IXGBE_TXDCTL_ENABLE; 1085 /* Set WTHRESH to 8, burst writeback */ 1086 txdctl |= (8 << 16); 1087 /* 1088 * When the internal queue falls below PTHRESH (32), 1089 * start prefetching as long as there are at least 1090 * HTHRESH (1) buffers ready. The values are taken 1091 * from the Intel linux driver 3.8.21. 1092 * Prefetching enables tx line rate even with 1 queue. 1093 */ 1094 txdctl |= (32 << 0) | (1 << 8); 1095 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 1096 } 1097 1098 for (int i = 0, j = 0; i < adapter->num_queues; i++) { 1099 rxr = &adapter->rx_rings[i]; 1100 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1101 if (hw->mac.type == ixgbe_mac_82598EB) { 1102 /* 1103 ** PTHRESH = 21 1104 ** HTHRESH = 4 1105 ** WTHRESH = 8 1106 */ 1107 rxdctl &= ~0x3FFFFF; 1108 rxdctl |= 0x080420; 1109 } 1110 rxdctl |= IXGBE_RXDCTL_ENABLE; 1111 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 1112 for (; j < 10; j++) { 1113 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 1114 IXGBE_RXDCTL_ENABLE) 1115 break; 1116 else 1117 msec_delay(1); 1118 } 1119 wmb(); 1120 #ifdef DEV_NETMAP 1121 /* 1122 * In netmap mode, we must preserve the buffers made 1123 * available to userspace before the if_init() 1124 * (this is true by default on the TX side, because 1125 * init makes all buffers available to userspace). 1126 * 1127 * netmap_reset() and the device specific routines 1128 * (e.g. ixgbe_setup_receive_rings()) map these 1129 * buffers at the end of the NIC ring, so here we 1130 * must set the RDT (tail) register to make sure 1131 * they are not overwritten. 1132 * 1133 * In this driver the NIC ring starts at RDH = 0, 1134 * RDT points to the last slot available for reception (?), 1135 * so RDT = num_rx_desc - 1 means the whole ring is available. 1136 */ 1137 if (ifp->if_capenable & IFCAP_NETMAP) { 1138 struct netmap_adapter *na = NA(adapter->ifp); 1139 struct netmap_kring *kring = &na->rx_rings[i]; 1140 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1141 1142 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 1143 } else 1144 #endif /* DEV_NETMAP */ 1145 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1); 1146 } 1147 1148 /* Enable Receive engine */ 1149 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1150 if (hw->mac.type == ixgbe_mac_82598EB) 1151 rxctrl |= IXGBE_RXCTRL_DMBYPS; 1152 rxctrl |= IXGBE_RXCTRL_RXEN; 1153 ixgbe_enable_rx_dma(hw, rxctrl); 1154 1155 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 1156 1157 /* Set up MSI/X routing */ 1158 if (ixgbe_enable_msix) { 1159 ixgbe_configure_ivars(adapter); 1160 /* Set up auto-mask */ 1161 if (hw->mac.type == ixgbe_mac_82598EB) 1162 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1163 else { 1164 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 1165 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 1166 } 1167 } else { /* Simple settings for Legacy/MSI */ 1168 ixgbe_set_ivar(adapter, 0, 0, 0); 1169 ixgbe_set_ivar(adapter, 0, 0, 1); 1170 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1171 } 1172 1173 #ifdef IXGBE_FDIR 1174 /* Init Flow director */ 1175 if (hw->mac.type != ixgbe_mac_82598EB) { 1176 u32 hdrm = 32 << fdir_pballoc; 1177 1178 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL); 1179 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc); 1180 } 1181 #endif 1182 1183 /* 1184 * Check on any SFP devices that 1185 * need to be kick-started 1186 */ 1187 if (hw->phy.type == ixgbe_phy_none) { 1188 int err = hw->phy.ops.identify(hw); 1189 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 1190 device_printf(dev, 1191 "Unsupported SFP+ module type was detected.\n"); 1192 return; 1193 } 1194 } 1195 1196 /* Set moderation on the Link interrupt */ 1197 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 1198 1199 /* Configure Energy Efficient Ethernet for supported devices */ 1200 ixgbe_setup_eee(hw, adapter->eee_enabled); 1201 1202 /* Config/Enable Link */ 1203 ixgbe_config_link(adapter); 1204 1205 /* Hardware Packet Buffer & Flow Control setup */ 1206 ixgbe_config_delay_values(adapter); 1207 1208 /* Initialize the FC settings */ 1209 ixgbe_start_hw(hw); 1210 1211 /* Set up VLAN support and filter */ 1212 ixgbe_setup_vlan_hw_support(adapter); 1213 1214 /* Setup DMA Coalescing */ 1215 ixgbe_config_dmac(adapter); 1216 1217 /* And now turn on interrupts */ 1218 ixgbe_enable_intr(adapter); 1219 1220 #ifdef PCI_IOV 1221 /* Enable the use of the MBX by the VF's */ 1222 { 1223 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1224 reg |= IXGBE_CTRL_EXT_PFRSTD; 1225 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg); 1226 } 1227 #endif 1228 1229 /* Now inform the stack we're ready */ 1230 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1231 1232 return; 1233 } 1234 1235 static void 1236 ixgbe_init(void *arg) 1237 { 1238 struct adapter *adapter = arg; 1239 1240 IXGBE_CORE_LOCK(adapter); 1241 ixgbe_init_locked(adapter); 1242 IXGBE_CORE_UNLOCK(adapter); 1243 return; 1244 } 1245 1246 static void 1247 ixgbe_config_gpie(struct adapter *adapter) 1248 { 1249 struct ixgbe_hw *hw = &adapter->hw; 1250 u32 gpie; 1251 1252 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 1253 1254 /* Fan Failure Interrupt */ 1255 if (hw->device_id == IXGBE_DEV_ID_82598AT) 1256 gpie |= IXGBE_SDP1_GPIEN; 1257 1258 /* 1259 * Module detection (SDP2) 1260 * Media ready (SDP1) 1261 */ 1262 if (hw->mac.type == ixgbe_mac_82599EB) { 1263 gpie |= IXGBE_SDP2_GPIEN; 1264 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP) 1265 gpie |= IXGBE_SDP1_GPIEN; 1266 } 1267 1268 /* 1269 * Thermal Failure Detection (X540) 1270 * Link Detection (X557) 1271 */ 1272 if (hw->mac.type == ixgbe_mac_X540 || 1273 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1274 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 1275 gpie |= IXGBE_SDP0_GPIEN_X540; 1276 1277 if (adapter->msix > 1) { 1278 /* Enable Enhanced MSIX mode */ 1279 gpie |= IXGBE_GPIE_MSIX_MODE; 1280 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | 1281 IXGBE_GPIE_OCD; 1282 } 1283 1284 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 1285 return; 1286 } 1287 1288 /* 1289 * Requires adapter->max_frame_size to be set. 1290 */ 1291 static void 1292 ixgbe_config_delay_values(struct adapter *adapter) 1293 { 1294 struct ixgbe_hw *hw = &adapter->hw; 1295 u32 rxpb, frame, size, tmp; 1296 1297 frame = adapter->max_frame_size; 1298 1299 /* Calculate High Water */ 1300 switch (hw->mac.type) { 1301 case ixgbe_mac_X540: 1302 case ixgbe_mac_X550: 1303 case ixgbe_mac_X550EM_x: 1304 tmp = IXGBE_DV_X540(frame, frame); 1305 break; 1306 default: 1307 tmp = IXGBE_DV(frame, frame); 1308 break; 1309 } 1310 size = IXGBE_BT2KB(tmp); 1311 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 1312 hw->fc.high_water[0] = rxpb - size; 1313 1314 /* Now calculate Low Water */ 1315 switch (hw->mac.type) { 1316 case ixgbe_mac_X540: 1317 case ixgbe_mac_X550: 1318 case ixgbe_mac_X550EM_x: 1319 tmp = IXGBE_LOW_DV_X540(frame); 1320 break; 1321 default: 1322 tmp = IXGBE_LOW_DV(frame); 1323 break; 1324 } 1325 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 1326 1327 hw->fc.requested_mode = adapter->fc; 1328 hw->fc.pause_time = IXGBE_FC_PAUSE; 1329 hw->fc.send_xon = TRUE; 1330 } 1331 1332 /* 1333 ** 1334 ** MSIX Interrupt Handlers and Tasklets 1335 ** 1336 */ 1337 1338 static inline void 1339 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 1340 { 1341 struct ixgbe_hw *hw = &adapter->hw; 1342 u64 queue = (u64)(1 << vector); 1343 u32 mask; 1344 1345 if (hw->mac.type == ixgbe_mac_82598EB) { 1346 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1347 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 1348 } else { 1349 mask = (queue & 0xFFFFFFFF); 1350 if (mask) 1351 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 1352 mask = (queue >> 32); 1353 if (mask) 1354 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 1355 } 1356 } 1357 1358 static inline void 1359 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 1360 { 1361 struct ixgbe_hw *hw = &adapter->hw; 1362 u64 queue = (u64)(1 << vector); 1363 u32 mask; 1364 1365 if (hw->mac.type == ixgbe_mac_82598EB) { 1366 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1367 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 1368 } else { 1369 mask = (queue & 0xFFFFFFFF); 1370 if (mask) 1371 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 1372 mask = (queue >> 32); 1373 if (mask) 1374 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 1375 } 1376 } 1377 1378 static void 1379 ixgbe_handle_que(void *context, int pending) 1380 { 1381 struct ix_queue *que = context; 1382 struct adapter *adapter = que->adapter; 1383 struct tx_ring *txr = que->txr; 1384 struct ifnet *ifp = adapter->ifp; 1385 1386 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1387 ixgbe_rxeof(que); 1388 IXGBE_TX_LOCK(txr); 1389 ixgbe_txeof(txr); 1390 #ifndef IXGBE_LEGACY_TX 1391 if (!drbr_empty(ifp, txr->br)) 1392 ixgbe_mq_start_locked(ifp, txr); 1393 #else 1394 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1395 ixgbe_start_locked(txr, ifp); 1396 #endif 1397 IXGBE_TX_UNLOCK(txr); 1398 } 1399 1400 /* Reenable this interrupt */ 1401 if (que->res != NULL) 1402 ixgbe_enable_queue(adapter, que->msix); 1403 else 1404 ixgbe_enable_intr(adapter); 1405 return; 1406 } 1407 1408 1409 /********************************************************************* 1410 * 1411 * Legacy Interrupt Service routine 1412 * 1413 **********************************************************************/ 1414 1415 static void 1416 ixgbe_legacy_irq(void *arg) 1417 { 1418 struct ix_queue *que = arg; 1419 struct adapter *adapter = que->adapter; 1420 struct ixgbe_hw *hw = &adapter->hw; 1421 struct ifnet *ifp = adapter->ifp; 1422 struct tx_ring *txr = adapter->tx_rings; 1423 bool more; 1424 u32 reg_eicr; 1425 1426 1427 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1428 1429 ++que->irqs; 1430 if (reg_eicr == 0) { 1431 ixgbe_enable_intr(adapter); 1432 return; 1433 } 1434 1435 more = ixgbe_rxeof(que); 1436 1437 IXGBE_TX_LOCK(txr); 1438 ixgbe_txeof(txr); 1439 #ifdef IXGBE_LEGACY_TX 1440 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1441 ixgbe_start_locked(txr, ifp); 1442 #else 1443 if (!drbr_empty(ifp, txr->br)) 1444 ixgbe_mq_start_locked(ifp, txr); 1445 #endif 1446 IXGBE_TX_UNLOCK(txr); 1447 1448 /* Check for fan failure */ 1449 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 1450 (reg_eicr & IXGBE_EICR_GPI_SDP1)) { 1451 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " 1452 "REPLACE IMMEDIATELY!!\n"); 1453 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 1454 } 1455 1456 /* Link status change */ 1457 if (reg_eicr & IXGBE_EICR_LSC) 1458 taskqueue_enqueue(adapter->tq, &adapter->link_task); 1459 1460 /* External PHY interrupt */ 1461 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 1462 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) 1463 taskqueue_enqueue(adapter->tq, &adapter->phy_task); 1464 1465 if (more) 1466 taskqueue_enqueue(que->tq, &que->que_task); 1467 else 1468 ixgbe_enable_intr(adapter); 1469 return; 1470 } 1471 1472 1473 /********************************************************************* 1474 * 1475 * MSIX Queue Interrupt Service routine 1476 * 1477 **********************************************************************/ 1478 void 1479 ixgbe_msix_que(void *arg) 1480 { 1481 struct ix_queue *que = arg; 1482 struct adapter *adapter = que->adapter; 1483 struct ifnet *ifp = adapter->ifp; 1484 struct tx_ring *txr = que->txr; 1485 struct rx_ring *rxr = que->rxr; 1486 bool more; 1487 u32 newitr = 0; 1488 1489 1490 /* Protect against spurious interrupts */ 1491 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1492 return; 1493 1494 ixgbe_disable_queue(adapter, que->msix); 1495 ++que->irqs; 1496 1497 more = ixgbe_rxeof(que); 1498 1499 IXGBE_TX_LOCK(txr); 1500 ixgbe_txeof(txr); 1501 #ifdef IXGBE_LEGACY_TX 1502 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd)) 1503 ixgbe_start_locked(txr, ifp); 1504 #else 1505 if (!drbr_empty(ifp, txr->br)) 1506 ixgbe_mq_start_locked(ifp, txr); 1507 #endif 1508 IXGBE_TX_UNLOCK(txr); 1509 1510 /* Do AIM now? */ 1511 1512 if (ixgbe_enable_aim == FALSE) 1513 goto no_calc; 1514 /* 1515 ** Do Adaptive Interrupt Moderation: 1516 ** - Write out last calculated setting 1517 ** - Calculate based on average size over 1518 ** the last interval. 1519 */ 1520 if (que->eitr_setting) 1521 IXGBE_WRITE_REG(&adapter->hw, 1522 IXGBE_EITR(que->msix), que->eitr_setting); 1523 1524 que->eitr_setting = 0; 1525 1526 /* Idle, do nothing */ 1527 if ((txr->bytes == 0) && (rxr->bytes == 0)) 1528 goto no_calc; 1529 1530 if ((txr->bytes) && (txr->packets)) 1531 newitr = txr->bytes/txr->packets; 1532 if ((rxr->bytes) && (rxr->packets)) 1533 newitr = max(newitr, 1534 (rxr->bytes / rxr->packets)); 1535 newitr += 24; /* account for hardware frame, crc */ 1536 1537 /* set an upper boundary */ 1538 newitr = min(newitr, 3000); 1539 1540 /* Be nice to the mid range */ 1541 if ((newitr > 300) && (newitr < 1200)) 1542 newitr = (newitr / 3); 1543 else 1544 newitr = (newitr / 2); 1545 1546 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1547 newitr |= newitr << 16; 1548 else 1549 newitr |= IXGBE_EITR_CNT_WDIS; 1550 1551 /* save for next interrupt */ 1552 que->eitr_setting = newitr; 1553 1554 /* Reset state */ 1555 txr->bytes = 0; 1556 txr->packets = 0; 1557 rxr->bytes = 0; 1558 rxr->packets = 0; 1559 1560 no_calc: 1561 if (more) 1562 taskqueue_enqueue(que->tq, &que->que_task); 1563 else 1564 ixgbe_enable_queue(adapter, que->msix); 1565 return; 1566 } 1567 1568 1569 static void 1570 ixgbe_msix_link(void *arg) 1571 { 1572 struct adapter *adapter = arg; 1573 struct ixgbe_hw *hw = &adapter->hw; 1574 u32 reg_eicr, mod_mask; 1575 1576 ++adapter->link_irq; 1577 1578 /* First get the cause */ 1579 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 1580 /* Be sure the queue bits are not cleared */ 1581 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE; 1582 /* Clear interrupt with write */ 1583 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr); 1584 1585 /* Link status change */ 1586 if (reg_eicr & IXGBE_EICR_LSC) 1587 taskqueue_enqueue(adapter->tq, &adapter->link_task); 1588 1589 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 1590 #ifdef IXGBE_FDIR 1591 if (reg_eicr & IXGBE_EICR_FLOW_DIR) { 1592 /* This is probably overkill :) */ 1593 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 1594 return; 1595 /* Disable the interrupt */ 1596 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 1597 taskqueue_enqueue(adapter->tq, &adapter->fdir_task); 1598 } else 1599 #endif 1600 if (reg_eicr & IXGBE_EICR_ECC) { 1601 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! " 1602 "Please Reboot!!\n"); 1603 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 1604 } 1605 1606 /* Check for over temp condition */ 1607 if (reg_eicr & IXGBE_EICR_TS) { 1608 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! " 1609 "PHY IS SHUT DOWN!!\n"); 1610 device_printf(adapter->dev, "System shutdown required!\n"); 1611 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 1612 } 1613 #ifdef PCI_IOV 1614 if (reg_eicr & IXGBE_EICR_MAILBOX) 1615 taskqueue_enqueue(adapter->tq, &adapter->mbx_task); 1616 #endif 1617 } 1618 1619 /* Pluggable optics-related interrupt */ 1620 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1621 mod_mask = IXGBE_EICR_GPI_SDP0_X540; 1622 else 1623 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 1624 1625 if (ixgbe_is_sfp(hw)) { 1626 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 1627 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 1628 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 1629 } else if (reg_eicr & mod_mask) { 1630 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask); 1631 taskqueue_enqueue(adapter->tq, &adapter->mod_task); 1632 } 1633 } 1634 1635 /* Check for fan failure */ 1636 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 1637 (reg_eicr & IXGBE_EICR_GPI_SDP1)) { 1638 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1639 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " 1640 "REPLACE IMMEDIATELY!!\n"); 1641 } 1642 1643 /* External PHY interrupt */ 1644 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 1645 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) { 1646 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 1647 taskqueue_enqueue(adapter->tq, &adapter->phy_task); 1648 } 1649 1650 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1651 return; 1652 } 1653 1654 /********************************************************************* 1655 * 1656 * Media Ioctl callback 1657 * 1658 * This routine is called whenever the user queries the status of 1659 * the interface using ifconfig. 1660 * 1661 **********************************************************************/ 1662 static void 1663 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 1664 { 1665 struct adapter *adapter = ifp->if_softc; 1666 struct ixgbe_hw *hw = &adapter->hw; 1667 int layer; 1668 1669 INIT_DEBUGOUT("ixgbe_media_status: begin"); 1670 IXGBE_CORE_LOCK(adapter); 1671 ixgbe_update_link_status(adapter); 1672 1673 ifmr->ifm_status = IFM_AVALID; 1674 ifmr->ifm_active = IFM_ETHER; 1675 1676 if (!adapter->link_active) { 1677 IXGBE_CORE_UNLOCK(adapter); 1678 return; 1679 } 1680 1681 ifmr->ifm_status |= IFM_ACTIVE; 1682 layer = adapter->phy_layer; 1683 1684 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 1685 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 1686 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1687 switch (adapter->link_speed) { 1688 case IXGBE_LINK_SPEED_10GB_FULL: 1689 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 1690 break; 1691 case IXGBE_LINK_SPEED_1GB_FULL: 1692 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 1693 break; 1694 case IXGBE_LINK_SPEED_100_FULL: 1695 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1696 break; 1697 } 1698 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1699 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1700 switch (adapter->link_speed) { 1701 case IXGBE_LINK_SPEED_10GB_FULL: 1702 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 1703 break; 1704 } 1705 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 1706 switch (adapter->link_speed) { 1707 case IXGBE_LINK_SPEED_10GB_FULL: 1708 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 1709 break; 1710 case IXGBE_LINK_SPEED_1GB_FULL: 1711 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1712 break; 1713 } 1714 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 1715 switch (adapter->link_speed) { 1716 case IXGBE_LINK_SPEED_10GB_FULL: 1717 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 1718 break; 1719 case IXGBE_LINK_SPEED_1GB_FULL: 1720 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1721 break; 1722 } 1723 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 1724 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1725 switch (adapter->link_speed) { 1726 case IXGBE_LINK_SPEED_10GB_FULL: 1727 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 1728 break; 1729 case IXGBE_LINK_SPEED_1GB_FULL: 1730 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1731 break; 1732 } 1733 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1734 switch (adapter->link_speed) { 1735 case IXGBE_LINK_SPEED_10GB_FULL: 1736 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 1737 break; 1738 } 1739 /* 1740 ** XXX: These need to use the proper media types once 1741 ** they're added. 1742 */ 1743 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1744 switch (adapter->link_speed) { 1745 case IXGBE_LINK_SPEED_10GB_FULL: 1746 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 1747 break; 1748 case IXGBE_LINK_SPEED_2_5GB_FULL: 1749 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 1750 break; 1751 case IXGBE_LINK_SPEED_1GB_FULL: 1752 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 1753 break; 1754 } 1755 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 1756 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1757 switch (adapter->link_speed) { 1758 case IXGBE_LINK_SPEED_10GB_FULL: 1759 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 1760 break; 1761 case IXGBE_LINK_SPEED_2_5GB_FULL: 1762 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 1763 break; 1764 case IXGBE_LINK_SPEED_1GB_FULL: 1765 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 1766 break; 1767 } 1768 1769 /* If nothing is recognized... */ 1770 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 1771 ifmr->ifm_active |= IFM_UNKNOWN; 1772 1773 #if __FreeBSD_version >= 900025 1774 /* Display current flow control setting used on link */ 1775 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 1776 hw->fc.current_mode == ixgbe_fc_full) 1777 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1778 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 1779 hw->fc.current_mode == ixgbe_fc_full) 1780 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1781 #endif 1782 1783 IXGBE_CORE_UNLOCK(adapter); 1784 1785 return; 1786 } 1787 1788 /********************************************************************* 1789 * 1790 * Media Ioctl callback 1791 * 1792 * This routine is called when the user changes speed/duplex using 1793 * media/mediopt option with ifconfig. 1794 * 1795 **********************************************************************/ 1796 static int 1797 ixgbe_media_change(struct ifnet * ifp) 1798 { 1799 struct adapter *adapter = ifp->if_softc; 1800 struct ifmedia *ifm = &adapter->media; 1801 struct ixgbe_hw *hw = &adapter->hw; 1802 ixgbe_link_speed speed = 0; 1803 1804 INIT_DEBUGOUT("ixgbe_media_change: begin"); 1805 1806 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1807 return (EINVAL); 1808 1809 if (hw->phy.media_type == ixgbe_media_type_backplane) 1810 return (EPERM); 1811 1812 /* 1813 ** We don't actually need to check against the supported 1814 ** media types of the adapter; ifmedia will take care of 1815 ** that for us. 1816 */ 1817 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1818 case IFM_AUTO: 1819 case IFM_10G_T: 1820 speed |= IXGBE_LINK_SPEED_100_FULL; 1821 case IFM_10G_LRM: 1822 case IFM_10G_SR: /* KR, too */ 1823 case IFM_10G_LR: 1824 case IFM_10G_CX4: /* KX4 */ 1825 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1826 case IFM_10G_TWINAX: 1827 speed |= IXGBE_LINK_SPEED_10GB_FULL; 1828 break; 1829 case IFM_1000_T: 1830 speed |= IXGBE_LINK_SPEED_100_FULL; 1831 case IFM_1000_LX: 1832 case IFM_1000_SX: 1833 case IFM_1000_CX: /* KX */ 1834 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1835 break; 1836 case IFM_100_TX: 1837 speed |= IXGBE_LINK_SPEED_100_FULL; 1838 break; 1839 default: 1840 goto invalid; 1841 } 1842 1843 hw->mac.autotry_restart = TRUE; 1844 hw->mac.ops.setup_link(hw, speed, TRUE); 1845 adapter->advertise = 1846 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) | 1847 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) | 1848 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0); 1849 1850 return (0); 1851 1852 invalid: 1853 device_printf(adapter->dev, "Invalid media type!\n"); 1854 return (EINVAL); 1855 } 1856 1857 static void 1858 ixgbe_set_promisc(struct adapter *adapter) 1859 { 1860 u_int32_t reg_rctl; 1861 struct ifnet *ifp = adapter->ifp; 1862 int mcnt = 0; 1863 1864 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1865 reg_rctl &= (~IXGBE_FCTRL_UPE); 1866 if (ifp->if_flags & IFF_ALLMULTI) 1867 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1868 else { 1869 struct ifmultiaddr *ifma; 1870 #if __FreeBSD_version < 800000 1871 IF_ADDR_LOCK(ifp); 1872 #else 1873 if_maddr_rlock(ifp); 1874 #endif 1875 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1876 if (ifma->ifma_addr->sa_family != AF_LINK) 1877 continue; 1878 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1879 break; 1880 mcnt++; 1881 } 1882 #if __FreeBSD_version < 800000 1883 IF_ADDR_UNLOCK(ifp); 1884 #else 1885 if_maddr_runlock(ifp); 1886 #endif 1887 } 1888 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1889 reg_rctl &= (~IXGBE_FCTRL_MPE); 1890 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 1891 1892 if (ifp->if_flags & IFF_PROMISC) { 1893 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1894 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 1895 } else if (ifp->if_flags & IFF_ALLMULTI) { 1896 reg_rctl |= IXGBE_FCTRL_MPE; 1897 reg_rctl &= ~IXGBE_FCTRL_UPE; 1898 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 1899 } 1900 return; 1901 } 1902 1903 1904 /********************************************************************* 1905 * Multicast Update 1906 * 1907 * This routine is called whenever multicast address list is updated. 1908 * 1909 **********************************************************************/ 1910 #define IXGBE_RAR_ENTRIES 16 1911 1912 static void 1913 ixgbe_set_multi(struct adapter *adapter) 1914 { 1915 u32 fctrl; 1916 u8 *update_ptr; 1917 struct ifmultiaddr *ifma; 1918 struct ixgbe_mc_addr *mta; 1919 int mcnt = 0; 1920 struct ifnet *ifp = adapter->ifp; 1921 1922 IOCTL_DEBUGOUT("ixgbe_set_multi: begin"); 1923 1924 mta = adapter->mta; 1925 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 1926 1927 #if __FreeBSD_version < 800000 1928 IF_ADDR_LOCK(ifp); 1929 #else 1930 if_maddr_rlock(ifp); 1931 #endif 1932 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1933 if (ifma->ifma_addr->sa_family != AF_LINK) 1934 continue; 1935 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1936 break; 1937 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1938 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 1939 mta[mcnt].vmdq = adapter->pool; 1940 mcnt++; 1941 } 1942 #if __FreeBSD_version < 800000 1943 IF_ADDR_UNLOCK(ifp); 1944 #else 1945 if_maddr_runlock(ifp); 1946 #endif 1947 1948 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1949 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1950 if (ifp->if_flags & IFF_PROMISC) 1951 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1952 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 1953 ifp->if_flags & IFF_ALLMULTI) { 1954 fctrl |= IXGBE_FCTRL_MPE; 1955 fctrl &= ~IXGBE_FCTRL_UPE; 1956 } else 1957 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1958 1959 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 1960 1961 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 1962 update_ptr = (u8 *)mta; 1963 ixgbe_update_mc_addr_list(&adapter->hw, 1964 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE); 1965 } 1966 1967 return; 1968 } 1969 1970 /* 1971 * This is an iterator function now needed by the multicast 1972 * shared code. It simply feeds the shared code routine the 1973 * addresses in the array of ixgbe_set_multi() one by one. 1974 */ 1975 static u8 * 1976 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1977 { 1978 struct ixgbe_mc_addr *mta; 1979 1980 mta = (struct ixgbe_mc_addr *)*update_ptr; 1981 *vmdq = mta->vmdq; 1982 1983 *update_ptr = (u8*)(mta + 1);; 1984 return (mta->addr); 1985 } 1986 1987 1988 /********************************************************************* 1989 * Timer routine 1990 * 1991 * This routine checks for link status,updates statistics, 1992 * and runs the watchdog check. 1993 * 1994 **********************************************************************/ 1995 1996 static void 1997 ixgbe_local_timer(void *arg) 1998 { 1999 struct adapter *adapter = arg; 2000 device_t dev = adapter->dev; 2001 struct ix_queue *que = adapter->queues; 2002 u64 queues = 0; 2003 int hung = 0; 2004 2005 mtx_assert(&adapter->core_mtx, MA_OWNED); 2006 2007 /* Check for pluggable optics */ 2008 if (adapter->sfp_probe) 2009 if (!ixgbe_sfp_probe(adapter)) 2010 goto out; /* Nothing to do */ 2011 2012 ixgbe_update_link_status(adapter); 2013 ixgbe_update_stats_counters(adapter); 2014 2015 /* 2016 ** Check the TX queues status 2017 ** - mark hung queues so we don't schedule on them 2018 ** - watchdog only if all queues show hung 2019 */ 2020 for (int i = 0; i < adapter->num_queues; i++, que++) { 2021 /* Keep track of queues with work for soft irq */ 2022 if (que->txr->busy) 2023 queues |= ((u64)1 << que->me); 2024 /* 2025 ** Each time txeof runs without cleaning, but there 2026 ** are uncleaned descriptors it increments busy. If 2027 ** we get to the MAX we declare it hung. 2028 */ 2029 if (que->busy == IXGBE_QUEUE_HUNG) { 2030 ++hung; 2031 /* Mark the queue as inactive */ 2032 adapter->active_queues &= ~((u64)1 << que->me); 2033 continue; 2034 } else { 2035 /* Check if we've come back from hung */ 2036 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 2037 adapter->active_queues |= ((u64)1 << que->me); 2038 } 2039 if (que->busy >= IXGBE_MAX_TX_BUSY) { 2040 device_printf(dev,"Warning queue %d " 2041 "appears to be hung!\n", i); 2042 que->txr->busy = IXGBE_QUEUE_HUNG; 2043 ++hung; 2044 } 2045 2046 } 2047 2048 /* Only truly watchdog if all queues show hung */ 2049 if (hung == adapter->num_queues) 2050 goto watchdog; 2051 else if (queues != 0) { /* Force an IRQ on queues with work */ 2052 ixgbe_rearm_queues(adapter, queues); 2053 } 2054 2055 out: 2056 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 2057 return; 2058 2059 watchdog: 2060 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 2061 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2062 adapter->watchdog_events++; 2063 ixgbe_init_locked(adapter); 2064 } 2065 2066 2067 /* 2068 ** Note: this routine updates the OS on the link state 2069 ** the real check of the hardware only happens with 2070 ** a link interrupt. 2071 */ 2072 static void 2073 ixgbe_update_link_status(struct adapter *adapter) 2074 { 2075 struct ifnet *ifp = adapter->ifp; 2076 device_t dev = adapter->dev; 2077 2078 if (adapter->link_up){ 2079 if (adapter->link_active == FALSE) { 2080 if (bootverbose) 2081 device_printf(dev,"Link is up %d Gbps %s \n", 2082 ((adapter->link_speed == 128)? 10:1), 2083 "Full Duplex"); 2084 adapter->link_active = TRUE; 2085 /* Update any Flow Control changes */ 2086 ixgbe_fc_enable(&adapter->hw); 2087 /* Update DMA coalescing config */ 2088 ixgbe_config_dmac(adapter); 2089 if_link_state_change(ifp, LINK_STATE_UP); 2090 #ifdef PCI_IOV 2091 ixgbe_ping_all_vfs(adapter); 2092 #endif 2093 } 2094 } else { /* Link down */ 2095 if (adapter->link_active == TRUE) { 2096 if (bootverbose) 2097 device_printf(dev,"Link is Down\n"); 2098 if_link_state_change(ifp, LINK_STATE_DOWN); 2099 adapter->link_active = FALSE; 2100 #ifdef PCI_IOV 2101 ixgbe_ping_all_vfs(adapter); 2102 #endif 2103 } 2104 } 2105 2106 return; 2107 } 2108 2109 2110 /********************************************************************* 2111 * 2112 * This routine disables all traffic on the adapter by issuing a 2113 * global reset on the MAC and deallocates TX/RX buffers. 2114 * 2115 **********************************************************************/ 2116 2117 static void 2118 ixgbe_stop(void *arg) 2119 { 2120 struct ifnet *ifp; 2121 struct adapter *adapter = arg; 2122 struct ixgbe_hw *hw = &adapter->hw; 2123 ifp = adapter->ifp; 2124 2125 mtx_assert(&adapter->core_mtx, MA_OWNED); 2126 2127 INIT_DEBUGOUT("ixgbe_stop: begin\n"); 2128 ixgbe_disable_intr(adapter); 2129 callout_stop(&adapter->timer); 2130 2131 /* Let the stack know...*/ 2132 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2133 2134 ixgbe_reset_hw(hw); 2135 hw->adapter_stopped = FALSE; 2136 ixgbe_stop_adapter(hw); 2137 if (hw->mac.type == ixgbe_mac_82599EB) 2138 ixgbe_stop_mac_link_on_d3_82599(hw); 2139 /* Turn off the laser - noop with no optics */ 2140 ixgbe_disable_tx_laser(hw); 2141 2142 /* Update the stack */ 2143 adapter->link_up = FALSE; 2144 ixgbe_update_link_status(adapter); 2145 2146 /* reprogram the RAR[0] in case user changed it. */ 2147 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 2148 2149 return; 2150 } 2151 2152 2153 /********************************************************************* 2154 * 2155 * Determine hardware revision. 2156 * 2157 **********************************************************************/ 2158 static void 2159 ixgbe_identify_hardware(struct adapter *adapter) 2160 { 2161 device_t dev = adapter->dev; 2162 struct ixgbe_hw *hw = &adapter->hw; 2163 2164 /* Save off the information about this board */ 2165 hw->vendor_id = pci_get_vendor(dev); 2166 hw->device_id = pci_get_device(dev); 2167 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 2168 hw->subsystem_vendor_id = 2169 pci_read_config(dev, PCIR_SUBVEND_0, 2); 2170 hw->subsystem_device_id = 2171 pci_read_config(dev, PCIR_SUBDEV_0, 2); 2172 2173 /* 2174 ** Make sure BUSMASTER is set 2175 */ 2176 pci_enable_busmaster(dev); 2177 2178 /* We need this here to set the num_segs below */ 2179 ixgbe_set_mac_type(hw); 2180 2181 /* Pick up the 82599 settings */ 2182 if (hw->mac.type != ixgbe_mac_82598EB) { 2183 hw->phy.smart_speed = ixgbe_smart_speed; 2184 adapter->num_segs = IXGBE_82599_SCATTER; 2185 } else 2186 adapter->num_segs = IXGBE_82598_SCATTER; 2187 2188 return; 2189 } 2190 2191 /********************************************************************* 2192 * 2193 * Determine optic type 2194 * 2195 **********************************************************************/ 2196 static void 2197 ixgbe_setup_optics(struct adapter *adapter) 2198 { 2199 struct ixgbe_hw *hw = &adapter->hw; 2200 int layer; 2201 2202 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 2203 2204 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 2205 adapter->optics = IFM_10G_T; 2206 return; 2207 } 2208 2209 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 2210 adapter->optics = IFM_1000_T; 2211 return; 2212 } 2213 2214 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 2215 adapter->optics = IFM_1000_SX; 2216 return; 2217 } 2218 2219 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR | 2220 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) { 2221 adapter->optics = IFM_10G_LR; 2222 return; 2223 } 2224 2225 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 2226 adapter->optics = IFM_10G_SR; 2227 return; 2228 } 2229 2230 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) { 2231 adapter->optics = IFM_10G_TWINAX; 2232 return; 2233 } 2234 2235 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | 2236 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) { 2237 adapter->optics = IFM_10G_CX4; 2238 return; 2239 } 2240 2241 /* If we get here just set the default */ 2242 adapter->optics = IFM_ETHER | IFM_AUTO; 2243 return; 2244 } 2245 2246 /********************************************************************* 2247 * 2248 * Setup the Legacy or MSI Interrupt handler 2249 * 2250 **********************************************************************/ 2251 static int 2252 ixgbe_allocate_legacy(struct adapter *adapter) 2253 { 2254 device_t dev = adapter->dev; 2255 struct ix_queue *que = adapter->queues; 2256 #ifndef IXGBE_LEGACY_TX 2257 struct tx_ring *txr = adapter->tx_rings; 2258 #endif 2259 int error, rid = 0; 2260 2261 /* MSI RID at 1 */ 2262 if (adapter->msix == 1) 2263 rid = 1; 2264 2265 /* We allocate a single interrupt resource */ 2266 adapter->res = bus_alloc_resource_any(dev, 2267 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2268 if (adapter->res == NULL) { 2269 device_printf(dev, "Unable to allocate bus resource: " 2270 "interrupt\n"); 2271 return (ENXIO); 2272 } 2273 2274 /* 2275 * Try allocating a fast interrupt and the associated deferred 2276 * processing contexts. 2277 */ 2278 #ifndef IXGBE_LEGACY_TX 2279 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2280 #endif 2281 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 2282 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 2283 taskqueue_thread_enqueue, &que->tq); 2284 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq", 2285 device_get_nameunit(adapter->dev)); 2286 2287 /* Tasklets for Link, SFP and Multispeed Fiber */ 2288 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); 2289 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); 2290 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); 2291 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); 2292 #ifdef IXGBE_FDIR 2293 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); 2294 #endif 2295 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, 2296 taskqueue_thread_enqueue, &adapter->tq); 2297 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", 2298 device_get_nameunit(adapter->dev)); 2299 2300 if ((error = bus_setup_intr(dev, adapter->res, 2301 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, 2302 que, &adapter->tag)) != 0) { 2303 device_printf(dev, "Failed to register fast interrupt " 2304 "handler: %d\n", error); 2305 taskqueue_free(que->tq); 2306 taskqueue_free(adapter->tq); 2307 que->tq = NULL; 2308 adapter->tq = NULL; 2309 return (error); 2310 } 2311 /* For simplicity in the handlers */ 2312 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK; 2313 2314 return (0); 2315 } 2316 2317 2318 /********************************************************************* 2319 * 2320 * Setup MSIX Interrupt resources and handlers 2321 * 2322 **********************************************************************/ 2323 static int 2324 ixgbe_allocate_msix(struct adapter *adapter) 2325 { 2326 device_t dev = adapter->dev; 2327 struct ix_queue *que = adapter->queues; 2328 struct tx_ring *txr = adapter->tx_rings; 2329 int error, rid, vector = 0; 2330 int cpu_id = 0; 2331 #ifdef RSS 2332 cpuset_t cpu_mask; 2333 #endif 2334 2335 #ifdef RSS 2336 /* 2337 * If we're doing RSS, the number of queues needs to 2338 * match the number of RSS buckets that are configured. 2339 * 2340 * + If there's more queues than RSS buckets, we'll end 2341 * up with queues that get no traffic. 2342 * 2343 * + If there's more RSS buckets than queues, we'll end 2344 * up having multiple RSS buckets map to the same queue, 2345 * so there'll be some contention. 2346 */ 2347 if (adapter->num_queues != rss_getnumbuckets()) { 2348 device_printf(dev, 2349 "%s: number of queues (%d) != number of RSS buckets (%d)" 2350 "; performance will be impacted.\n", 2351 __func__, 2352 adapter->num_queues, 2353 rss_getnumbuckets()); 2354 } 2355 #endif 2356 2357 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 2358 rid = vector + 1; 2359 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2360 RF_SHAREABLE | RF_ACTIVE); 2361 if (que->res == NULL) { 2362 device_printf(dev,"Unable to allocate" 2363 " bus resource: que interrupt [%d]\n", vector); 2364 return (ENXIO); 2365 } 2366 /* Set the handler function */ 2367 error = bus_setup_intr(dev, que->res, 2368 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2369 ixgbe_msix_que, que, &que->tag); 2370 if (error) { 2371 que->res = NULL; 2372 device_printf(dev, "Failed to register QUE handler"); 2373 return (error); 2374 } 2375 #if __FreeBSD_version >= 800504 2376 bus_describe_intr(dev, que->res, que->tag, "que %d", i); 2377 #endif 2378 que->msix = vector; 2379 adapter->active_queues |= (u64)(1 << que->msix); 2380 #ifdef RSS 2381 /* 2382 * The queue ID is used as the RSS layer bucket ID. 2383 * We look up the queue ID -> RSS CPU ID and select 2384 * that. 2385 */ 2386 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2387 #else 2388 /* 2389 * Bind the msix vector, and thus the 2390 * rings to the corresponding cpu. 2391 * 2392 * This just happens to match the default RSS round-robin 2393 * bucket -> queue -> CPU allocation. 2394 */ 2395 if (adapter->num_queues > 1) 2396 cpu_id = i; 2397 #endif 2398 if (adapter->num_queues > 1) 2399 bus_bind_intr(dev, que->res, cpu_id); 2400 #ifdef IXGBE_DEBUG 2401 #ifdef RSS 2402 device_printf(dev, 2403 "Bound RSS bucket %d to CPU %d\n", 2404 i, cpu_id); 2405 #else 2406 device_printf(dev, 2407 "Bound queue %d to cpu %d\n", 2408 i, cpu_id); 2409 #endif 2410 #endif /* IXGBE_DEBUG */ 2411 2412 2413 #ifndef IXGBE_LEGACY_TX 2414 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2415 #endif 2416 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 2417 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 2418 taskqueue_thread_enqueue, &que->tq); 2419 #ifdef RSS 2420 CPU_SETOF(cpu_id, &cpu_mask); 2421 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, 2422 &cpu_mask, 2423 "%s (bucket %d)", 2424 device_get_nameunit(adapter->dev), 2425 cpu_id); 2426 #else 2427 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", 2428 device_get_nameunit(adapter->dev)); 2429 #endif 2430 } 2431 2432 /* and Link */ 2433 rid = vector + 1; 2434 adapter->res = bus_alloc_resource_any(dev, 2435 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2436 if (!adapter->res) { 2437 device_printf(dev,"Unable to allocate" 2438 " bus resource: Link interrupt [%d]\n", rid); 2439 return (ENXIO); 2440 } 2441 /* Set the link handler function */ 2442 error = bus_setup_intr(dev, adapter->res, 2443 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2444 ixgbe_msix_link, adapter, &adapter->tag); 2445 if (error) { 2446 adapter->res = NULL; 2447 device_printf(dev, "Failed to register LINK handler"); 2448 return (error); 2449 } 2450 #if __FreeBSD_version >= 800504 2451 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); 2452 #endif 2453 adapter->vector = vector; 2454 /* Tasklets for Link, SFP and Multispeed Fiber */ 2455 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); 2456 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); 2457 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); 2458 #ifdef PCI_IOV 2459 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter); 2460 #endif 2461 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); 2462 #ifdef IXGBE_FDIR 2463 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); 2464 #endif 2465 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, 2466 taskqueue_thread_enqueue, &adapter->tq); 2467 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", 2468 device_get_nameunit(adapter->dev)); 2469 2470 return (0); 2471 } 2472 2473 /* 2474 * Setup Either MSI/X or MSI 2475 */ 2476 static int 2477 ixgbe_setup_msix(struct adapter *adapter) 2478 { 2479 device_t dev = adapter->dev; 2480 int rid, want, queues, msgs; 2481 2482 /* Override by tuneable */ 2483 if (ixgbe_enable_msix == 0) 2484 goto msi; 2485 2486 /* First try MSI/X */ 2487 msgs = pci_msix_count(dev); 2488 if (msgs == 0) 2489 goto msi; 2490 rid = PCIR_BAR(MSIX_82598_BAR); 2491 adapter->msix_mem = bus_alloc_resource_any(dev, 2492 SYS_RES_MEMORY, &rid, RF_ACTIVE); 2493 if (adapter->msix_mem == NULL) { 2494 rid += 4; /* 82599 maps in higher BAR */ 2495 adapter->msix_mem = bus_alloc_resource_any(dev, 2496 SYS_RES_MEMORY, &rid, RF_ACTIVE); 2497 } 2498 if (adapter->msix_mem == NULL) { 2499 /* May not be enabled */ 2500 device_printf(adapter->dev, 2501 "Unable to map MSIX table \n"); 2502 goto msi; 2503 } 2504 2505 /* Figure out a reasonable auto config value */ 2506 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; 2507 2508 #ifdef RSS 2509 /* If we're doing RSS, clamp at the number of RSS buckets */ 2510 if (queues > rss_getnumbuckets()) 2511 queues = rss_getnumbuckets(); 2512 #endif 2513 2514 if (ixgbe_num_queues != 0) 2515 queues = ixgbe_num_queues; 2516 2517 /* reflect correct sysctl value */ 2518 ixgbe_num_queues = queues; 2519 2520 /* 2521 ** Want one vector (RX/TX pair) per queue 2522 ** plus an additional for Link. 2523 */ 2524 want = queues + 1; 2525 if (msgs >= want) 2526 msgs = want; 2527 else { 2528 device_printf(adapter->dev, 2529 "MSIX Configuration Problem, " 2530 "%d vectors but %d queues wanted!\n", 2531 msgs, want); 2532 goto msi; 2533 } 2534 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 2535 device_printf(adapter->dev, 2536 "Using MSIX interrupts with %d vectors\n", msgs); 2537 adapter->num_queues = queues; 2538 return (msgs); 2539 } 2540 /* 2541 ** If MSIX alloc failed or provided us with 2542 ** less than needed, free and fall through to MSI 2543 */ 2544 pci_release_msi(dev); 2545 2546 msi: 2547 if (adapter->msix_mem != NULL) { 2548 bus_release_resource(dev, SYS_RES_MEMORY, 2549 rid, adapter->msix_mem); 2550 adapter->msix_mem = NULL; 2551 } 2552 msgs = 1; 2553 if (pci_alloc_msi(dev, &msgs) == 0) { 2554 device_printf(adapter->dev,"Using an MSI interrupt\n"); 2555 return (msgs); 2556 } 2557 device_printf(adapter->dev,"Using a Legacy interrupt\n"); 2558 return (0); 2559 } 2560 2561 2562 static int 2563 ixgbe_allocate_pci_resources(struct adapter *adapter) 2564 { 2565 int rid; 2566 device_t dev = adapter->dev; 2567 2568 rid = PCIR_BAR(0); 2569 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2570 &rid, RF_ACTIVE); 2571 2572 if (!(adapter->pci_mem)) { 2573 device_printf(dev,"Unable to allocate bus resource: memory\n"); 2574 return (ENXIO); 2575 } 2576 2577 adapter->osdep.mem_bus_space_tag = 2578 rman_get_bustag(adapter->pci_mem); 2579 adapter->osdep.mem_bus_space_handle = 2580 rman_get_bushandle(adapter->pci_mem); 2581 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; 2582 2583 /* Legacy defaults */ 2584 adapter->num_queues = 1; 2585 adapter->hw.back = &adapter->osdep; 2586 2587 /* 2588 ** Now setup MSI or MSI/X, should 2589 ** return us the number of supported 2590 ** vectors. (Will be 1 for MSI) 2591 */ 2592 adapter->msix = ixgbe_setup_msix(adapter); 2593 return (0); 2594 } 2595 2596 static void 2597 ixgbe_free_pci_resources(struct adapter * adapter) 2598 { 2599 struct ix_queue *que = adapter->queues; 2600 device_t dev = adapter->dev; 2601 int rid, memrid; 2602 2603 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2604 memrid = PCIR_BAR(MSIX_82598_BAR); 2605 else 2606 memrid = PCIR_BAR(MSIX_82599_BAR); 2607 2608 /* 2609 ** There is a slight possibility of a failure mode 2610 ** in attach that will result in entering this function 2611 ** before interrupt resources have been initialized, and 2612 ** in that case we do not want to execute the loops below 2613 ** We can detect this reliably by the state of the adapter 2614 ** res pointer. 2615 */ 2616 if (adapter->res == NULL) 2617 goto mem; 2618 2619 /* 2620 ** Release all msix queue resources: 2621 */ 2622 for (int i = 0; i < adapter->num_queues; i++, que++) { 2623 rid = que->msix + 1; 2624 if (que->tag != NULL) { 2625 bus_teardown_intr(dev, que->res, que->tag); 2626 que->tag = NULL; 2627 } 2628 if (que->res != NULL) 2629 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 2630 } 2631 2632 2633 /* Clean the Legacy or Link interrupt last */ 2634 if (adapter->vector) /* we are doing MSIX */ 2635 rid = adapter->vector + 1; 2636 else 2637 (adapter->msix != 0) ? (rid = 1):(rid = 0); 2638 2639 if (adapter->tag != NULL) { 2640 bus_teardown_intr(dev, adapter->res, adapter->tag); 2641 adapter->tag = NULL; 2642 } 2643 if (adapter->res != NULL) 2644 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 2645 2646 mem: 2647 if (adapter->msix) 2648 pci_release_msi(dev); 2649 2650 if (adapter->msix_mem != NULL) 2651 bus_release_resource(dev, SYS_RES_MEMORY, 2652 memrid, adapter->msix_mem); 2653 2654 if (adapter->pci_mem != NULL) 2655 bus_release_resource(dev, SYS_RES_MEMORY, 2656 PCIR_BAR(0), adapter->pci_mem); 2657 2658 return; 2659 } 2660 2661 /********************************************************************* 2662 * 2663 * Setup networking device structure and register an interface. 2664 * 2665 **********************************************************************/ 2666 static int 2667 ixgbe_setup_interface(device_t dev, struct adapter *adapter) 2668 { 2669 struct ifnet *ifp; 2670 2671 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 2672 2673 ifp = adapter->ifp = if_alloc(IFT_ETHER); 2674 if (ifp == NULL) { 2675 device_printf(dev, "can not allocate ifnet structure\n"); 2676 return (-1); 2677 } 2678 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2679 ifp->if_baudrate = IF_Gbps(10); 2680 ifp->if_init = ixgbe_init; 2681 ifp->if_softc = adapter; 2682 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2683 ifp->if_ioctl = ixgbe_ioctl; 2684 #if __FreeBSD_version >= 1100036 2685 if_setgetcounterfn(ifp, ixgbe_get_counter); 2686 #endif 2687 #if __FreeBSD_version >= 1100045 2688 /* TSO parameters */ 2689 ifp->if_hw_tsomax = 65518; 2690 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 2691 ifp->if_hw_tsomaxsegsize = 2048; 2692 #endif 2693 #ifndef IXGBE_LEGACY_TX 2694 ifp->if_transmit = ixgbe_mq_start; 2695 ifp->if_qflush = ixgbe_qflush; 2696 #else 2697 ifp->if_start = ixgbe_start; 2698 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 2699 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2; 2700 IFQ_SET_READY(&ifp->if_snd); 2701 #endif 2702 2703 ether_ifattach(ifp, adapter->hw.mac.addr); 2704 2705 adapter->max_frame_size = 2706 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2707 2708 /* 2709 * Tell the upper layer(s) we support long frames. 2710 */ 2711 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2712 2713 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM; 2714 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 2715 ifp->if_capabilities |= IFCAP_LRO; 2716 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING 2717 | IFCAP_VLAN_HWTSO 2718 | IFCAP_VLAN_MTU 2719 | IFCAP_HWSTATS; 2720 ifp->if_capenable = ifp->if_capabilities; 2721 2722 /* 2723 ** Don't turn this on by default, if vlans are 2724 ** created on another pseudo device (eg. lagg) 2725 ** then vlan events are not passed thru, breaking 2726 ** operation, but with HW FILTER off it works. If 2727 ** using vlans directly on the ixgbe driver you can 2728 ** enable this and get full hardware tag filtering. 2729 */ 2730 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2731 2732 /* 2733 * Specify the media types supported by this adapter and register 2734 * callbacks to update media and link information 2735 */ 2736 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, 2737 ixgbe_media_status); 2738 2739 ixgbe_add_media_types(adapter); 2740 2741 /* Autoselect media by default */ 2742 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2743 2744 return (0); 2745 } 2746 2747 static void 2748 ixgbe_add_media_types(struct adapter *adapter) 2749 { 2750 struct ixgbe_hw *hw = &adapter->hw; 2751 device_t dev = adapter->dev; 2752 int layer; 2753 2754 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 2755 2756 /* Media types with matching FreeBSD media defines */ 2757 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 2758 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 2759 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 2760 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 2761 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 2762 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2763 2764 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2765 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2766 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 2767 2768 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2769 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 2770 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) 2771 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 2772 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2773 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 2774 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2775 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2776 2777 /* 2778 ** Other (no matching FreeBSD media type): 2779 ** To workaround this, we'll assign these completely 2780 ** inappropriate media types. 2781 */ 2782 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 2783 device_printf(dev, "Media supported: 10GbaseKR\n"); 2784 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 2785 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 2786 } 2787 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 2788 device_printf(dev, "Media supported: 10GbaseKX4\n"); 2789 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 2790 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 2791 } 2792 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 2793 device_printf(dev, "Media supported: 1000baseKX\n"); 2794 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 2795 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 2796 } 2797 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) { 2798 /* Someday, someone will care about you... */ 2799 device_printf(dev, "Media supported: 1000baseBX\n"); 2800 } 2801 2802 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 2803 ifmedia_add(&adapter->media, 2804 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2805 ifmedia_add(&adapter->media, 2806 IFM_ETHER | IFM_1000_T, 0, NULL); 2807 } 2808 2809 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2810 } 2811 2812 static void 2813 ixgbe_config_link(struct adapter *adapter) 2814 { 2815 struct ixgbe_hw *hw = &adapter->hw; 2816 u32 autoneg, err = 0; 2817 bool sfp, negotiate; 2818 2819 sfp = ixgbe_is_sfp(hw); 2820 2821 if (sfp) { 2822 if (hw->phy.multispeed_fiber) { 2823 hw->mac.ops.setup_sfp(hw); 2824 ixgbe_enable_tx_laser(hw); 2825 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 2826 } else 2827 taskqueue_enqueue(adapter->tq, &adapter->mod_task); 2828 } else { 2829 if (hw->mac.ops.check_link) 2830 err = ixgbe_check_link(hw, &adapter->link_speed, 2831 &adapter->link_up, FALSE); 2832 if (err) 2833 goto out; 2834 autoneg = hw->phy.autoneg_advertised; 2835 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 2836 err = hw->mac.ops.get_link_capabilities(hw, 2837 &autoneg, &negotiate); 2838 if (err) 2839 goto out; 2840 if (hw->mac.ops.setup_link) 2841 err = hw->mac.ops.setup_link(hw, 2842 autoneg, adapter->link_up); 2843 } 2844 out: 2845 return; 2846 } 2847 2848 2849 /********************************************************************* 2850 * 2851 * Enable transmit units. 2852 * 2853 **********************************************************************/ 2854 static void 2855 ixgbe_initialize_transmit_units(struct adapter *adapter) 2856 { 2857 struct tx_ring *txr = adapter->tx_rings; 2858 struct ixgbe_hw *hw = &adapter->hw; 2859 2860 /* Setup the Base and Length of the Tx Descriptor Ring */ 2861 2862 for (int i = 0; i < adapter->num_queues; i++, txr++) { 2863 u64 tdba = txr->txdma.dma_paddr; 2864 u32 txctrl = 0; 2865 int j = txr->me; 2866 2867 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 2868 (tdba & 0x00000000ffffffffULL)); 2869 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 2870 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 2871 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 2872 2873 /* Setup the HW Tx Head and Tail descriptor pointers */ 2874 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 2875 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 2876 2877 /* Cache the tail address */ 2878 txr->tail = IXGBE_TDT(j); 2879 2880 /* Set the processing limit */ 2881 txr->process_limit = ixgbe_tx_process_limit; 2882 2883 /* Disable Head Writeback */ 2884 switch (hw->mac.type) { 2885 case ixgbe_mac_82598EB: 2886 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 2887 break; 2888 case ixgbe_mac_82599EB: 2889 case ixgbe_mac_X540: 2890 default: 2891 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 2892 break; 2893 } 2894 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2895 switch (hw->mac.type) { 2896 case ixgbe_mac_82598EB: 2897 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 2898 break; 2899 case ixgbe_mac_82599EB: 2900 case ixgbe_mac_X540: 2901 default: 2902 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 2903 break; 2904 } 2905 2906 } 2907 2908 if (hw->mac.type != ixgbe_mac_82598EB) { 2909 u32 dmatxctl, rttdcs; 2910 #ifdef PCI_IOV 2911 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter); 2912 #endif 2913 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2914 dmatxctl |= IXGBE_DMATXCTL_TE; 2915 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 2916 /* Disable arbiter to set MTQC */ 2917 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2918 rttdcs |= IXGBE_RTTDCS_ARBDIS; 2919 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2920 #ifdef PCI_IOV 2921 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode)); 2922 #else 2923 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2924 #endif 2925 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2926 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2927 } 2928 2929 return; 2930 } 2931 2932 static void 2933 ixgbe_initialise_rss_mapping(struct adapter *adapter) 2934 { 2935 struct ixgbe_hw *hw = &adapter->hw; 2936 u32 reta = 0, mrqc, rss_key[10]; 2937 int queue_id, table_size, index_mult; 2938 #ifdef RSS 2939 u32 rss_hash_config; 2940 #endif 2941 #ifdef PCI_IOV 2942 enum ixgbe_iov_mode mode; 2943 #endif 2944 2945 #ifdef RSS 2946 /* Fetch the configured RSS key */ 2947 rss_getkey((uint8_t *) &rss_key); 2948 #else 2949 /* set up random bits */ 2950 arc4rand(&rss_key, sizeof(rss_key), 0); 2951 #endif 2952 2953 /* Set multiplier for RETA setup and table size based on MAC */ 2954 index_mult = 0x1; 2955 table_size = 128; 2956 switch (adapter->hw.mac.type) { 2957 case ixgbe_mac_82598EB: 2958 index_mult = 0x11; 2959 break; 2960 case ixgbe_mac_X550: 2961 case ixgbe_mac_X550EM_x: 2962 table_size = 512; 2963 break; 2964 default: 2965 break; 2966 } 2967 2968 /* Set up the redirection table */ 2969 for (int i = 0, j = 0; i < table_size; i++, j++) { 2970 if (j == adapter->num_queues) j = 0; 2971 #ifdef RSS 2972 /* 2973 * Fetch the RSS bucket id for the given indirection entry. 2974 * Cap it at the number of configured buckets (which is 2975 * num_queues.) 2976 */ 2977 queue_id = rss_get_indirection_to_bucket(i); 2978 queue_id = queue_id % adapter->num_queues; 2979 #else 2980 queue_id = (j * index_mult); 2981 #endif 2982 /* 2983 * The low 8 bits are for hash value (n+0); 2984 * The next 8 bits are for hash value (n+1), etc. 2985 */ 2986 reta = reta >> 8; 2987 reta = reta | ( ((uint32_t) queue_id) << 24); 2988 if ((i & 3) == 3) { 2989 if (i < 128) 2990 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2991 else 2992 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); 2993 reta = 0; 2994 } 2995 } 2996 2997 /* Now fill our hash function seeds */ 2998 for (int i = 0; i < 10; i++) 2999 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 3000 3001 /* Perform hash on these packet types */ 3002 #ifdef RSS 3003 mrqc = IXGBE_MRQC_RSSEN; 3004 rss_hash_config = rss_gethashconfig(); 3005 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 3006 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 3007 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 3008 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 3009 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 3010 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 3011 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 3012 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 3013 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 3014 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 3015 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 3016 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 3017 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 3018 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 3019 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX) 3020 device_printf(adapter->dev, 3021 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, " 3022 "but not supported\n", __func__); 3023 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 3024 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3025 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 3026 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 3027 #else 3028 /* 3029 * Disable UDP - IP fragments aren't currently being handled 3030 * and so we end up with a mix of 2-tuple and 4-tuple 3031 * traffic. 3032 */ 3033 mrqc = IXGBE_MRQC_RSSEN 3034 | IXGBE_MRQC_RSS_FIELD_IPV4 3035 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 3036 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 3037 | IXGBE_MRQC_RSS_FIELD_IPV6_EX 3038 | IXGBE_MRQC_RSS_FIELD_IPV6 3039 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 3040 ; 3041 #endif /* RSS */ 3042 #ifdef PCI_IOV 3043 mode = ixgbe_get_iov_mode(adapter); 3044 mrqc |= ixgbe_get_mrqc(mode); 3045 #endif 3046 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3047 } 3048 3049 3050 /********************************************************************* 3051 * 3052 * Setup receive registers and features. 3053 * 3054 **********************************************************************/ 3055 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 3056 3057 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 3058 3059 static void 3060 ixgbe_initialize_receive_units(struct adapter *adapter) 3061 { 3062 struct rx_ring *rxr = adapter->rx_rings; 3063 struct ixgbe_hw *hw = &adapter->hw; 3064 struct ifnet *ifp = adapter->ifp; 3065 u32 bufsz, fctrl, srrctl, rxcsum; 3066 u32 hlreg; 3067 3068 3069 /* 3070 * Make sure receives are disabled while 3071 * setting up the descriptor ring 3072 */ 3073 ixgbe_disable_rx(hw); 3074 3075 /* Enable broadcasts */ 3076 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3077 fctrl |= IXGBE_FCTRL_BAM; 3078 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3079 fctrl |= IXGBE_FCTRL_DPF; 3080 fctrl |= IXGBE_FCTRL_PMCF; 3081 } 3082 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3083 3084 /* Set for Jumbo Frames? */ 3085 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3086 if (ifp->if_mtu > ETHERMTU) 3087 hlreg |= IXGBE_HLREG0_JUMBOEN; 3088 else 3089 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 3090 #ifdef DEV_NETMAP 3091 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */ 3092 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip) 3093 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 3094 else 3095 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 3096 #endif /* DEV_NETMAP */ 3097 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 3098 3099 bufsz = (adapter->rx_mbuf_sz + 3100 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3101 3102 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 3103 u64 rdba = rxr->rxdma.dma_paddr; 3104 int j = rxr->me; 3105 3106 /* Setup the Base and Length of the Rx Descriptor Ring */ 3107 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 3108 (rdba & 0x00000000ffffffffULL)); 3109 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 3110 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 3111 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 3112 3113 /* Set up the SRRCTL register */ 3114 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 3115 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 3116 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 3117 srrctl |= bufsz; 3118 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 3119 3120 /* 3121 * Set DROP_EN iff we have no flow control and >1 queue. 3122 * Note that srrctl was cleared shortly before during reset, 3123 * so we do not need to clear the bit, but do it just in case 3124 * this code is moved elsewhere. 3125 */ 3126 if (adapter->num_queues > 1 && 3127 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 3128 srrctl |= IXGBE_SRRCTL_DROP_EN; 3129 } else { 3130 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3131 } 3132 3133 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 3134 3135 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 3136 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 3137 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 3138 3139 /* Set the processing limit */ 3140 rxr->process_limit = ixgbe_rx_process_limit; 3141 3142 /* Set the driver rx tail address */ 3143 rxr->tail = IXGBE_RDT(rxr->me); 3144 } 3145 3146 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 3147 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 3148 IXGBE_PSRTYPE_UDPHDR | 3149 IXGBE_PSRTYPE_IPV4HDR | 3150 IXGBE_PSRTYPE_IPV6HDR; 3151 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 3152 } 3153 3154 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3155 3156 ixgbe_initialise_rss_mapping(adapter); 3157 3158 if (adapter->num_queues > 1) { 3159 /* RSS and RX IPP Checksum are mutually exclusive */ 3160 rxcsum |= IXGBE_RXCSUM_PCSD; 3161 } 3162 3163 if (ifp->if_capenable & IFCAP_RXCSUM) 3164 rxcsum |= IXGBE_RXCSUM_PCSD; 3165 3166 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 3167 rxcsum |= IXGBE_RXCSUM_IPPCSE; 3168 3169 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3170 3171 return; 3172 } 3173 3174 3175 /* 3176 ** This routine is run via an vlan config EVENT, 3177 ** it enables us to use the HW Filter table since 3178 ** we can get the vlan id. This just creates the 3179 ** entry in the soft version of the VFTA, init will 3180 ** repopulate the real table. 3181 */ 3182 static void 3183 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3184 { 3185 struct adapter *adapter = ifp->if_softc; 3186 u16 index, bit; 3187 3188 if (ifp->if_softc != arg) /* Not our event */ 3189 return; 3190 3191 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3192 return; 3193 3194 IXGBE_CORE_LOCK(adapter); 3195 index = (vtag >> 5) & 0x7F; 3196 bit = vtag & 0x1F; 3197 adapter->shadow_vfta[index] |= (1 << bit); 3198 ++adapter->num_vlans; 3199 ixgbe_setup_vlan_hw_support(adapter); 3200 IXGBE_CORE_UNLOCK(adapter); 3201 } 3202 3203 /* 3204 ** This routine is run via an vlan 3205 ** unconfig EVENT, remove our entry 3206 ** in the soft vfta. 3207 */ 3208 static void 3209 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3210 { 3211 struct adapter *adapter = ifp->if_softc; 3212 u16 index, bit; 3213 3214 if (ifp->if_softc != arg) 3215 return; 3216 3217 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3218 return; 3219 3220 IXGBE_CORE_LOCK(adapter); 3221 index = (vtag >> 5) & 0x7F; 3222 bit = vtag & 0x1F; 3223 adapter->shadow_vfta[index] &= ~(1 << bit); 3224 --adapter->num_vlans; 3225 /* Re-init to load the changes */ 3226 ixgbe_setup_vlan_hw_support(adapter); 3227 IXGBE_CORE_UNLOCK(adapter); 3228 } 3229 3230 static void 3231 ixgbe_setup_vlan_hw_support(struct adapter *adapter) 3232 { 3233 struct ifnet *ifp = adapter->ifp; 3234 struct ixgbe_hw *hw = &adapter->hw; 3235 struct rx_ring *rxr; 3236 u32 ctrl; 3237 3238 3239 /* 3240 ** We get here thru init_locked, meaning 3241 ** a soft reset, this has already cleared 3242 ** the VFTA and other state, so if there 3243 ** have been no vlan's registered do nothing. 3244 */ 3245 if (adapter->num_vlans == 0) 3246 return; 3247 3248 /* Setup the queues for vlans */ 3249 for (int i = 0; i < adapter->num_queues; i++) { 3250 rxr = &adapter->rx_rings[i]; 3251 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 3252 if (hw->mac.type != ixgbe_mac_82598EB) { 3253 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3254 ctrl |= IXGBE_RXDCTL_VME; 3255 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 3256 } 3257 rxr->vtag_strip = TRUE; 3258 } 3259 3260 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 3261 return; 3262 /* 3263 ** A soft reset zero's out the VFTA, so 3264 ** we need to repopulate it now. 3265 */ 3266 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) 3267 if (adapter->shadow_vfta[i] != 0) 3268 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 3269 adapter->shadow_vfta[i]); 3270 3271 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3272 /* Enable the Filter Table if enabled */ 3273 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 3274 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 3275 ctrl |= IXGBE_VLNCTRL_VFE; 3276 } 3277 if (hw->mac.type == ixgbe_mac_82598EB) 3278 ctrl |= IXGBE_VLNCTRL_VME; 3279 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 3280 } 3281 3282 static void 3283 ixgbe_enable_intr(struct adapter *adapter) 3284 { 3285 struct ixgbe_hw *hw = &adapter->hw; 3286 struct ix_queue *que = adapter->queues; 3287 u32 mask, fwsm; 3288 3289 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3290 /* Enable Fan Failure detection */ 3291 if (hw->device_id == IXGBE_DEV_ID_82598AT) 3292 mask |= IXGBE_EIMS_GPI_SDP1; 3293 3294 switch (adapter->hw.mac.type) { 3295 case ixgbe_mac_82599EB: 3296 mask |= IXGBE_EIMS_ECC; 3297 /* Temperature sensor on some adapters */ 3298 mask |= IXGBE_EIMS_GPI_SDP0; 3299 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3300 mask |= IXGBE_EIMS_GPI_SDP1; 3301 mask |= IXGBE_EIMS_GPI_SDP2; 3302 #ifdef IXGBE_FDIR 3303 mask |= IXGBE_EIMS_FLOW_DIR; 3304 #endif 3305 #ifdef PCI_IOV 3306 mask |= IXGBE_EIMS_MAILBOX; 3307 #endif 3308 break; 3309 case ixgbe_mac_X540: 3310 /* Detect if Thermal Sensor is enabled */ 3311 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3312 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3313 mask |= IXGBE_EIMS_TS; 3314 mask |= IXGBE_EIMS_ECC; 3315 #ifdef IXGBE_FDIR 3316 mask |= IXGBE_EIMS_FLOW_DIR; 3317 #endif 3318 break; 3319 case ixgbe_mac_X550: 3320 case ixgbe_mac_X550EM_x: 3321 /* MAC thermal sensor is automatically enabled */ 3322 mask |= IXGBE_EIMS_TS; 3323 /* Some devices use SDP0 for important information */ 3324 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3325 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3326 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3327 mask |= IXGBE_EIMS_ECC; 3328 #ifdef IXGBE_FDIR 3329 mask |= IXGBE_EIMS_FLOW_DIR; 3330 #endif 3331 #ifdef PCI_IOV 3332 mask |= IXGBE_EIMS_MAILBOX; 3333 #endif 3334 /* falls through */ 3335 default: 3336 break; 3337 } 3338 3339 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3340 3341 /* With MSI-X we use auto clear */ 3342 if (adapter->msix_mem) { 3343 mask = IXGBE_EIMS_ENABLE_MASK; 3344 /* Don't autoclear Link */ 3345 mask &= ~IXGBE_EIMS_OTHER; 3346 mask &= ~IXGBE_EIMS_LSC; 3347 #ifdef PCI_IOV 3348 mask &= ~IXGBE_EIMS_MAILBOX; 3349 #endif 3350 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3351 } 3352 3353 /* 3354 ** Now enable all queues, this is done separately to 3355 ** allow for handling the extended (beyond 32) MSIX 3356 ** vectors that can be used by 82599 3357 */ 3358 for (int i = 0; i < adapter->num_queues; i++, que++) 3359 ixgbe_enable_queue(adapter, que->msix); 3360 3361 IXGBE_WRITE_FLUSH(hw); 3362 3363 return; 3364 } 3365 3366 static void 3367 ixgbe_disable_intr(struct adapter *adapter) 3368 { 3369 if (adapter->msix_mem) 3370 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3371 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3372 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3373 } else { 3374 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3375 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3376 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3377 } 3378 IXGBE_WRITE_FLUSH(&adapter->hw); 3379 return; 3380 } 3381 3382 /* 3383 ** Get the width and transaction speed of 3384 ** the slot this adapter is plugged into. 3385 */ 3386 static void 3387 ixgbe_get_slot_info(struct ixgbe_hw *hw) 3388 { 3389 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev; 3390 struct ixgbe_mac_info *mac = &hw->mac; 3391 u16 link; 3392 u32 offset; 3393 3394 /* For most devices simply call the shared code routine */ 3395 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) { 3396 ixgbe_get_bus_info(hw); 3397 /* These devices don't use PCI-E */ 3398 switch (hw->mac.type) { 3399 case ixgbe_mac_X550EM_x: 3400 return; 3401 default: 3402 goto display; 3403 } 3404 } 3405 3406 /* 3407 ** For the Quad port adapter we need to parse back 3408 ** up the PCI tree to find the speed of the expansion 3409 ** slot into which this adapter is plugged. A bit more work. 3410 */ 3411 dev = device_get_parent(device_get_parent(dev)); 3412 #ifdef IXGBE_DEBUG 3413 device_printf(dev, "parent pcib = %x,%x,%x\n", 3414 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 3415 #endif 3416 dev = device_get_parent(device_get_parent(dev)); 3417 #ifdef IXGBE_DEBUG 3418 device_printf(dev, "slot pcib = %x,%x,%x\n", 3419 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 3420 #endif 3421 /* Now get the PCI Express Capabilities offset */ 3422 pci_find_cap(dev, PCIY_EXPRESS, &offset); 3423 /* ...and read the Link Status Register */ 3424 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 3425 switch (link & IXGBE_PCI_LINK_WIDTH) { 3426 case IXGBE_PCI_LINK_WIDTH_1: 3427 hw->bus.width = ixgbe_bus_width_pcie_x1; 3428 break; 3429 case IXGBE_PCI_LINK_WIDTH_2: 3430 hw->bus.width = ixgbe_bus_width_pcie_x2; 3431 break; 3432 case IXGBE_PCI_LINK_WIDTH_4: 3433 hw->bus.width = ixgbe_bus_width_pcie_x4; 3434 break; 3435 case IXGBE_PCI_LINK_WIDTH_8: 3436 hw->bus.width = ixgbe_bus_width_pcie_x8; 3437 break; 3438 default: 3439 hw->bus.width = ixgbe_bus_width_unknown; 3440 break; 3441 } 3442 3443 switch (link & IXGBE_PCI_LINK_SPEED) { 3444 case IXGBE_PCI_LINK_SPEED_2500: 3445 hw->bus.speed = ixgbe_bus_speed_2500; 3446 break; 3447 case IXGBE_PCI_LINK_SPEED_5000: 3448 hw->bus.speed = ixgbe_bus_speed_5000; 3449 break; 3450 case IXGBE_PCI_LINK_SPEED_8000: 3451 hw->bus.speed = ixgbe_bus_speed_8000; 3452 break; 3453 default: 3454 hw->bus.speed = ixgbe_bus_speed_unknown; 3455 break; 3456 } 3457 3458 mac->ops.set_lan_id(hw); 3459 3460 display: 3461 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 3462 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s": 3463 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s": 3464 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"), 3465 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 3466 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 3467 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 3468 ("Unknown")); 3469 3470 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 3471 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 3472 (hw->bus.speed == ixgbe_bus_speed_2500))) { 3473 device_printf(dev, "PCI-Express bandwidth available" 3474 " for this card\n is not sufficient for" 3475 " optimal performance.\n"); 3476 device_printf(dev, "For optimal performance a x8 " 3477 "PCIE, or x4 PCIE Gen2 slot is required.\n"); 3478 } 3479 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 3480 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 3481 (hw->bus.speed < ixgbe_bus_speed_8000))) { 3482 device_printf(dev, "PCI-Express bandwidth available" 3483 " for this card\n is not sufficient for" 3484 " optimal performance.\n"); 3485 device_printf(dev, "For optimal performance a x8 " 3486 "PCIE Gen3 slot is required.\n"); 3487 } 3488 3489 return; 3490 } 3491 3492 3493 /* 3494 ** Setup the correct IVAR register for a particular MSIX interrupt 3495 ** (yes this is all very magic and confusing :) 3496 ** - entry is the register array entry 3497 ** - vector is the MSIX vector for this queue 3498 ** - type is RX/TX/MISC 3499 */ 3500 static void 3501 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3502 { 3503 struct ixgbe_hw *hw = &adapter->hw; 3504 u32 ivar, index; 3505 3506 vector |= IXGBE_IVAR_ALLOC_VAL; 3507 3508 switch (hw->mac.type) { 3509 3510 case ixgbe_mac_82598EB: 3511 if (type == -1) 3512 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3513 else 3514 entry += (type * 64); 3515 index = (entry >> 2) & 0x1F; 3516 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3517 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3518 ivar |= (vector << (8 * (entry & 0x3))); 3519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3520 break; 3521 3522 case ixgbe_mac_82599EB: 3523 case ixgbe_mac_X540: 3524 case ixgbe_mac_X550: 3525 case ixgbe_mac_X550EM_x: 3526 if (type == -1) { /* MISC IVAR */ 3527 index = (entry & 1) * 8; 3528 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3529 ivar &= ~(0xFF << index); 3530 ivar |= (vector << index); 3531 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3532 } else { /* RX/TX IVARS */ 3533 index = (16 * (entry & 1)) + (8 * type); 3534 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3535 ivar &= ~(0xFF << index); 3536 ivar |= (vector << index); 3537 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3538 } 3539 3540 default: 3541 break; 3542 } 3543 } 3544 3545 static void 3546 ixgbe_configure_ivars(struct adapter *adapter) 3547 { 3548 struct ix_queue *que = adapter->queues; 3549 u32 newitr; 3550 3551 if (ixgbe_max_interrupt_rate > 0) 3552 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3553 else { 3554 /* 3555 ** Disable DMA coalescing if interrupt moderation is 3556 ** disabled. 3557 */ 3558 adapter->dmac = 0; 3559 newitr = 0; 3560 } 3561 3562 for (int i = 0; i < adapter->num_queues; i++, que++) { 3563 struct rx_ring *rxr = &adapter->rx_rings[i]; 3564 struct tx_ring *txr = &adapter->tx_rings[i]; 3565 /* First the RX queue entry */ 3566 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0); 3567 /* ... and the TX */ 3568 ixgbe_set_ivar(adapter, txr->me, que->msix, 1); 3569 /* Set an Initial EITR value */ 3570 IXGBE_WRITE_REG(&adapter->hw, 3571 IXGBE_EITR(que->msix), newitr); 3572 } 3573 3574 /* For the Link interrupt */ 3575 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3576 } 3577 3578 /* 3579 ** ixgbe_sfp_probe - called in the local timer to 3580 ** determine if a port had optics inserted. 3581 */ 3582 static bool 3583 ixgbe_sfp_probe(struct adapter *adapter) 3584 { 3585 struct ixgbe_hw *hw = &adapter->hw; 3586 device_t dev = adapter->dev; 3587 bool result = FALSE; 3588 3589 if ((hw->phy.type == ixgbe_phy_nl) && 3590 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3591 s32 ret = hw->phy.ops.identify_sfp(hw); 3592 if (ret) 3593 goto out; 3594 ret = hw->phy.ops.reset(hw); 3595 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3596 device_printf(dev,"Unsupported SFP+ module detected!"); 3597 printf(" Reload driver with supported module.\n"); 3598 adapter->sfp_probe = FALSE; 3599 goto out; 3600 } else 3601 device_printf(dev,"SFP+ module detected!\n"); 3602 /* We now have supported optics */ 3603 adapter->sfp_probe = FALSE; 3604 /* Set the optics type so system reports correctly */ 3605 ixgbe_setup_optics(adapter); 3606 result = TRUE; 3607 } 3608 out: 3609 return (result); 3610 } 3611 3612 /* 3613 ** Tasklet handler for MSIX Link interrupts 3614 ** - do outside interrupt since it might sleep 3615 */ 3616 static void 3617 ixgbe_handle_link(void *context, int pending) 3618 { 3619 struct adapter *adapter = context; 3620 3621 ixgbe_check_link(&adapter->hw, 3622 &adapter->link_speed, &adapter->link_up, 0); 3623 ixgbe_update_link_status(adapter); 3624 } 3625 3626 /* 3627 ** Tasklet for handling SFP module interrupts 3628 */ 3629 static void 3630 ixgbe_handle_mod(void *context, int pending) 3631 { 3632 struct adapter *adapter = context; 3633 struct ixgbe_hw *hw = &adapter->hw; 3634 device_t dev = adapter->dev; 3635 u32 err; 3636 3637 err = hw->phy.ops.identify_sfp(hw); 3638 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3639 device_printf(dev, 3640 "Unsupported SFP+ module type was detected.\n"); 3641 return; 3642 } 3643 3644 err = hw->mac.ops.setup_sfp(hw); 3645 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3646 device_printf(dev, 3647 "Setup failure - unsupported SFP+ module type.\n"); 3648 return; 3649 } 3650 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 3651 return; 3652 } 3653 3654 3655 /* 3656 ** Tasklet for handling MSF (multispeed fiber) interrupts 3657 */ 3658 static void 3659 ixgbe_handle_msf(void *context, int pending) 3660 { 3661 struct adapter *adapter = context; 3662 struct ixgbe_hw *hw = &adapter->hw; 3663 u32 autoneg; 3664 bool negotiate; 3665 int err; 3666 3667 err = hw->phy.ops.identify_sfp(hw); 3668 if (!err) { 3669 ixgbe_setup_optics(adapter); 3670 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics); 3671 } 3672 3673 autoneg = hw->phy.autoneg_advertised; 3674 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3675 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3676 if (hw->mac.ops.setup_link) 3677 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3678 3679 ifmedia_removeall(&adapter->media); 3680 ixgbe_add_media_types(adapter); 3681 return; 3682 } 3683 3684 /* 3685 ** Tasklet for handling interrupts from an external PHY 3686 */ 3687 static void 3688 ixgbe_handle_phy(void *context, int pending) 3689 { 3690 struct adapter *adapter = context; 3691 struct ixgbe_hw *hw = &adapter->hw; 3692 int error; 3693 3694 error = hw->phy.ops.handle_lasi(hw); 3695 if (error == IXGBE_ERR_OVERTEMP) 3696 device_printf(adapter->dev, 3697 "CRITICAL: EXTERNAL PHY OVER TEMP!! " 3698 " PHY will downshift to lower power state!\n"); 3699 else if (error) 3700 device_printf(adapter->dev, 3701 "Error handling LASI interrupt: %d\n", 3702 error); 3703 return; 3704 } 3705 3706 #ifdef IXGBE_FDIR 3707 /* 3708 ** Tasklet for reinitializing the Flow Director filter table 3709 */ 3710 static void 3711 ixgbe_reinit_fdir(void *context, int pending) 3712 { 3713 struct adapter *adapter = context; 3714 struct ifnet *ifp = adapter->ifp; 3715 3716 if (adapter->fdir_reinit != 1) /* Shouldn't happen */ 3717 return; 3718 ixgbe_reinit_fdir_tables_82599(&adapter->hw); 3719 adapter->fdir_reinit = 0; 3720 /* re-enable flow director interrupts */ 3721 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 3722 /* Restart the interface */ 3723 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3724 return; 3725 } 3726 #endif 3727 3728 /********************************************************************* 3729 * 3730 * Configure DMA Coalescing 3731 * 3732 **********************************************************************/ 3733 static void 3734 ixgbe_config_dmac(struct adapter *adapter) 3735 { 3736 struct ixgbe_hw *hw = &adapter->hw; 3737 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3738 3739 if (hw->mac.type < ixgbe_mac_X550 || 3740 !hw->mac.ops.dmac_config) 3741 return; 3742 3743 if (dcfg->watchdog_timer ^ adapter->dmac || 3744 dcfg->link_speed ^ adapter->link_speed) { 3745 dcfg->watchdog_timer = adapter->dmac; 3746 dcfg->fcoe_en = false; 3747 dcfg->link_speed = adapter->link_speed; 3748 dcfg->num_tcs = 1; 3749 3750 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3751 dcfg->watchdog_timer, dcfg->link_speed); 3752 3753 hw->mac.ops.dmac_config(hw); 3754 } 3755 } 3756 3757 /* 3758 * Checks whether the adapter supports Energy Efficient Ethernet 3759 * or not, based on device ID. 3760 */ 3761 static void 3762 ixgbe_check_eee_support(struct adapter *adapter) 3763 { 3764 struct ixgbe_hw *hw = &adapter->hw; 3765 3766 adapter->eee_enabled = !!(hw->mac.ops.setup_eee); 3767 } 3768 3769 /* 3770 * Checks whether the adapter's ports are capable of 3771 * Wake On LAN by reading the adapter's NVM. 3772 * 3773 * Sets each port's hw->wol_enabled value depending 3774 * on the value read here. 3775 */ 3776 static void 3777 ixgbe_check_wol_support(struct adapter *adapter) 3778 { 3779 struct ixgbe_hw *hw = &adapter->hw; 3780 u16 dev_caps = 0; 3781 3782 /* Find out WoL support for port */ 3783 adapter->wol_support = hw->wol_enabled = 0; 3784 ixgbe_get_device_caps(hw, &dev_caps); 3785 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 3786 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 3787 hw->bus.func == 0)) 3788 adapter->wol_support = hw->wol_enabled = 1; 3789 3790 /* Save initial wake up filter configuration */ 3791 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 3792 3793 return; 3794 } 3795 3796 /* 3797 * Prepare the adapter/port for LPLU and/or WoL 3798 */ 3799 static int 3800 ixgbe_setup_low_power_mode(struct adapter *adapter) 3801 { 3802 struct ixgbe_hw *hw = &adapter->hw; 3803 device_t dev = adapter->dev; 3804 s32 error = 0; 3805 3806 mtx_assert(&adapter->core_mtx, MA_OWNED); 3807 3808 /* Limit power management flow to X550EM baseT */ 3809 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T 3810 && hw->phy.ops.enter_lplu) { 3811 /* Turn off support for APM wakeup. (Using ACPI instead) */ 3812 IXGBE_WRITE_REG(hw, IXGBE_GRC, 3813 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 3814 3815 /* 3816 * Clear Wake Up Status register to prevent any previous wakeup 3817 * events from waking us up immediately after we suspend. 3818 */ 3819 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3820 3821 /* 3822 * Program the Wakeup Filter Control register with user filter 3823 * settings 3824 */ 3825 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 3826 3827 /* Enable wakeups and power management in Wakeup Control */ 3828 IXGBE_WRITE_REG(hw, IXGBE_WUC, 3829 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 3830 3831 /* X550EM baseT adapters need a special LPLU flow */ 3832 hw->phy.reset_disable = true; 3833 ixgbe_stop(adapter); 3834 error = hw->phy.ops.enter_lplu(hw); 3835 if (error) 3836 device_printf(dev, 3837 "Error entering LPLU: %d\n", error); 3838 hw->phy.reset_disable = false; 3839 } else { 3840 /* Just stop for other adapters */ 3841 ixgbe_stop(adapter); 3842 } 3843 3844 return error; 3845 } 3846 3847 /********************************************************************** 3848 * 3849 * Update the board statistics counters. 3850 * 3851 **********************************************************************/ 3852 static void 3853 ixgbe_update_stats_counters(struct adapter *adapter) 3854 { 3855 struct ixgbe_hw *hw = &adapter->hw; 3856 u32 missed_rx = 0, bprc, lxon, lxoff, total; 3857 u64 total_missed_rx = 0; 3858 3859 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3860 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3861 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3862 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3863 3864 for (int i = 0; i < 16; i++) { 3865 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3866 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3867 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3868 } 3869 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3870 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3871 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3872 3873 /* Hardware workaround, gprc counts missed packets */ 3874 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 3875 adapter->stats.pf.gprc -= missed_rx; 3876 3877 if (hw->mac.type != ixgbe_mac_82598EB) { 3878 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 3879 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3880 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 3881 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3882 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 3883 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3884 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3885 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3886 } else { 3887 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3888 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3889 /* 82598 only has a counter in the high register */ 3890 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3891 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3892 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3893 } 3894 3895 /* 3896 * Workaround: mprc hardware is incorrectly counting 3897 * broadcasts, so for now we subtract those. 3898 */ 3899 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3900 adapter->stats.pf.bprc += bprc; 3901 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3902 if (hw->mac.type == ixgbe_mac_82598EB) 3903 adapter->stats.pf.mprc -= bprc; 3904 3905 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3906 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3907 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3908 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3909 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3910 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3911 3912 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3913 adapter->stats.pf.lxontxc += lxon; 3914 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3915 adapter->stats.pf.lxofftxc += lxoff; 3916 total = lxon + lxoff; 3917 3918 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 3919 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3920 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3921 adapter->stats.pf.gptc -= total; 3922 adapter->stats.pf.mptc -= total; 3923 adapter->stats.pf.ptc64 -= total; 3924 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN; 3925 3926 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3927 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3928 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3929 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3930 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3931 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3932 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3933 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3934 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3935 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3936 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3937 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3938 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3939 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3940 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3941 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3942 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3943 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3944 /* Only read FCOE on 82599 */ 3945 if (hw->mac.type != ixgbe_mac_82598EB) { 3946 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3947 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3948 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3949 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3950 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3951 } 3952 3953 /* Fill out the OS statistics structure */ 3954 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc); 3955 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc); 3956 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc); 3957 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc); 3958 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc); 3959 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc); 3960 IXGBE_SET_COLLISIONS(adapter, 0); 3961 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 3962 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs 3963 + adapter->stats.pf.rlec); 3964 } 3965 3966 #if __FreeBSD_version >= 1100036 3967 static uint64_t 3968 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt) 3969 { 3970 struct adapter *adapter; 3971 struct tx_ring *txr; 3972 uint64_t rv; 3973 3974 adapter = if_getsoftc(ifp); 3975 3976 switch (cnt) { 3977 case IFCOUNTER_IPACKETS: 3978 return (adapter->ipackets); 3979 case IFCOUNTER_OPACKETS: 3980 return (adapter->opackets); 3981 case IFCOUNTER_IBYTES: 3982 return (adapter->ibytes); 3983 case IFCOUNTER_OBYTES: 3984 return (adapter->obytes); 3985 case IFCOUNTER_IMCASTS: 3986 return (adapter->imcasts); 3987 case IFCOUNTER_OMCASTS: 3988 return (adapter->omcasts); 3989 case IFCOUNTER_COLLISIONS: 3990 return (0); 3991 case IFCOUNTER_IQDROPS: 3992 return (adapter->iqdrops); 3993 case IFCOUNTER_OQDROPS: 3994 rv = 0; 3995 txr = adapter->tx_rings; 3996 for (int i = 0; i < adapter->num_queues; i++, txr++) 3997 rv += txr->br->br_drops; 3998 return (rv); 3999 case IFCOUNTER_IERRORS: 4000 return (adapter->ierrors); 4001 default: 4002 return (if_get_counter_default(ifp, cnt)); 4003 } 4004 } 4005 #endif 4006 4007 /** ixgbe_sysctl_tdh_handler - Handler function 4008 * Retrieves the TDH value from the hardware 4009 */ 4010 static int 4011 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 4012 { 4013 int error; 4014 4015 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 4016 if (!txr) return 0; 4017 4018 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 4019 error = sysctl_handle_int(oidp, &val, 0, req); 4020 if (error || !req->newptr) 4021 return error; 4022 return 0; 4023 } 4024 4025 /** ixgbe_sysctl_tdt_handler - Handler function 4026 * Retrieves the TDT value from the hardware 4027 */ 4028 static int 4029 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 4030 { 4031 int error; 4032 4033 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 4034 if (!txr) return 0; 4035 4036 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 4037 error = sysctl_handle_int(oidp, &val, 0, req); 4038 if (error || !req->newptr) 4039 return error; 4040 return 0; 4041 } 4042 4043 /** ixgbe_sysctl_rdh_handler - Handler function 4044 * Retrieves the RDH value from the hardware 4045 */ 4046 static int 4047 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 4048 { 4049 int error; 4050 4051 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 4052 if (!rxr) return 0; 4053 4054 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 4055 error = sysctl_handle_int(oidp, &val, 0, req); 4056 if (error || !req->newptr) 4057 return error; 4058 return 0; 4059 } 4060 4061 /** ixgbe_sysctl_rdt_handler - Handler function 4062 * Retrieves the RDT value from the hardware 4063 */ 4064 static int 4065 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 4066 { 4067 int error; 4068 4069 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 4070 if (!rxr) return 0; 4071 4072 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 4073 error = sysctl_handle_int(oidp, &val, 0, req); 4074 if (error || !req->newptr) 4075 return error; 4076 return 0; 4077 } 4078 4079 static int 4080 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 4081 { 4082 int error; 4083 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1); 4084 unsigned int reg, usec, rate; 4085 4086 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 4087 usec = ((reg & 0x0FF8) >> 3); 4088 if (usec > 0) 4089 rate = 500000 / usec; 4090 else 4091 rate = 0; 4092 error = sysctl_handle_int(oidp, &rate, 0, req); 4093 if (error || !req->newptr) 4094 return error; 4095 reg &= ~0xfff; /* default, no limitation */ 4096 ixgbe_max_interrupt_rate = 0; 4097 if (rate > 0 && rate < 500000) { 4098 if (rate < 1000) 4099 rate = 1000; 4100 ixgbe_max_interrupt_rate = rate; 4101 reg |= ((4000000/rate) & 0xff8 ); 4102 } 4103 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 4104 return 0; 4105 } 4106 4107 static void 4108 ixgbe_add_device_sysctls(struct adapter *adapter) 4109 { 4110 device_t dev = adapter->dev; 4111 struct ixgbe_hw *hw = &adapter->hw; 4112 struct sysctl_oid_list *child; 4113 struct sysctl_ctx_list *ctx; 4114 4115 ctx = device_get_sysctl_ctx(dev); 4116 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4117 4118 /* Sysctls for all devices */ 4119 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", 4120 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4121 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC); 4122 4123 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", 4124 CTLFLAG_RW, 4125 &ixgbe_enable_aim, 1, "Interrupt Moderation"); 4126 4127 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed", 4128 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4129 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED); 4130 4131 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test", 4132 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4133 ixgbe_sysctl_thermal_test, "I", "Thermal Test"); 4134 4135 /* for X550 devices */ 4136 if (hw->mac.type >= ixgbe_mac_X550) 4137 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac", 4138 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4139 ixgbe_sysctl_dmac, "I", "DMA Coalesce"); 4140 4141 /* for X550T and X550EM backplane devices */ 4142 if (hw->mac.ops.setup_eee) { 4143 struct sysctl_oid *eee_node; 4144 struct sysctl_oid_list *eee_list; 4145 4146 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee", 4147 CTLFLAG_RD, NULL, 4148 "Energy Efficient Ethernet sysctls"); 4149 eee_list = SYSCTL_CHILDREN(eee_node); 4150 4151 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable", 4152 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4153 ixgbe_sysctl_eee_enable, "I", 4154 "Enable or Disable EEE"); 4155 4156 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated", 4157 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4158 ixgbe_sysctl_eee_negotiated, "I", 4159 "EEE negotiated on link"); 4160 4161 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status", 4162 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4163 ixgbe_sysctl_eee_tx_lpi_status, "I", 4164 "Whether or not TX link is in LPI state"); 4165 4166 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status", 4167 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4168 ixgbe_sysctl_eee_rx_lpi_status, "I", 4169 "Whether or not RX link is in LPI state"); 4170 } 4171 4172 /* for certain 10GBaseT devices */ 4173 if (hw->device_id == IXGBE_DEV_ID_X550T || 4174 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 4175 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable", 4176 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4177 ixgbe_sysctl_wol_enable, "I", 4178 "Enable/Disable Wake on LAN"); 4179 4180 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc", 4181 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4182 ixgbe_sysctl_wufc, "I", 4183 "Enable/Disable Wake Up Filters"); 4184 } 4185 4186 /* for X550EM 10GBaseT devices */ 4187 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 4188 struct sysctl_oid *phy_node; 4189 struct sysctl_oid_list *phy_list; 4190 4191 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy", 4192 CTLFLAG_RD, NULL, 4193 "External PHY sysctls"); 4194 phy_list = SYSCTL_CHILDREN(phy_node); 4195 4196 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp", 4197 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4198 ixgbe_sysctl_phy_temp, "I", 4199 "Current External PHY Temperature (Celsius)"); 4200 4201 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred", 4202 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4203 ixgbe_sysctl_phy_overtemp_occurred, "I", 4204 "External PHY High Temperature Event Occurred"); 4205 } 4206 } 4207 4208 /* 4209 * Add sysctl variables, one per statistic, to the system. 4210 */ 4211 static void 4212 ixgbe_add_hw_stats(struct adapter *adapter) 4213 { 4214 device_t dev = adapter->dev; 4215 4216 struct tx_ring *txr = adapter->tx_rings; 4217 struct rx_ring *rxr = adapter->rx_rings; 4218 4219 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4220 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 4221 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 4222 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 4223 4224 struct sysctl_oid *stat_node, *queue_node; 4225 struct sysctl_oid_list *stat_list, *queue_list; 4226 4227 #define QUEUE_NAME_LEN 32 4228 char namebuf[QUEUE_NAME_LEN]; 4229 4230 /* Driver Statistics */ 4231 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 4232 CTLFLAG_RD, &adapter->dropped_pkts, 4233 "Driver dropped packets"); 4234 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 4235 CTLFLAG_RD, &adapter->mbuf_defrag_failed, 4236 "m_defrag() failed"); 4237 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 4238 CTLFLAG_RD, &adapter->watchdog_events, 4239 "Watchdog timeouts"); 4240 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 4241 CTLFLAG_RD, &adapter->link_irq, 4242 "Link MSIX IRQ Handled"); 4243 4244 for (int i = 0; i < adapter->num_queues; i++, txr++) { 4245 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4246 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4247 CTLFLAG_RD, NULL, "Queue Name"); 4248 queue_list = SYSCTL_CHILDREN(queue_node); 4249 4250 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 4251 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i], 4252 sizeof(&adapter->queues[i]), 4253 ixgbe_sysctl_interrupt_rate_handler, "IU", 4254 "Interrupt Rate"); 4255 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 4256 CTLFLAG_RD, &(adapter->queues[i].irqs), 4257 "irqs on this queue"); 4258 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 4259 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 4260 ixgbe_sysctl_tdh_handler, "IU", 4261 "Transmit Descriptor Head"); 4262 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 4263 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 4264 ixgbe_sysctl_tdt_handler, "IU", 4265 "Transmit Descriptor Tail"); 4266 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx", 4267 CTLFLAG_RD, &txr->tso_tx, 4268 "TSO"); 4269 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup", 4270 CTLFLAG_RD, &txr->no_tx_dma_setup, 4271 "Driver tx dma failure in xmit"); 4272 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 4273 CTLFLAG_RD, &txr->no_desc_avail, 4274 "Queue No Descriptor Available"); 4275 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 4276 CTLFLAG_RD, &txr->total_packets, 4277 "Queue Packets Transmitted"); 4278 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops", 4279 CTLFLAG_RD, &txr->br->br_drops, 4280 "Packets dropped in buf_ring"); 4281 } 4282 4283 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 4284 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4285 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4286 CTLFLAG_RD, NULL, "Queue Name"); 4287 queue_list = SYSCTL_CHILDREN(queue_node); 4288 4289 struct lro_ctrl *lro = &rxr->lro; 4290 4291 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4292 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4293 CTLFLAG_RD, NULL, "Queue Name"); 4294 queue_list = SYSCTL_CHILDREN(queue_node); 4295 4296 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 4297 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 4298 ixgbe_sysctl_rdh_handler, "IU", 4299 "Receive Descriptor Head"); 4300 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 4301 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 4302 ixgbe_sysctl_rdt_handler, "IU", 4303 "Receive Descriptor Tail"); 4304 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 4305 CTLFLAG_RD, &rxr->rx_packets, 4306 "Queue Packets Received"); 4307 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 4308 CTLFLAG_RD, &rxr->rx_bytes, 4309 "Queue Bytes Received"); 4310 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 4311 CTLFLAG_RD, &rxr->rx_copies, 4312 "Copied RX Frames"); 4313 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 4314 CTLFLAG_RD, &lro->lro_queued, 0, 4315 "LRO Queued"); 4316 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 4317 CTLFLAG_RD, &lro->lro_flushed, 0, 4318 "LRO Flushed"); 4319 } 4320 4321 /* MAC stats get the own sub node */ 4322 4323 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 4324 CTLFLAG_RD, NULL, "MAC Statistics"); 4325 stat_list = SYSCTL_CHILDREN(stat_node); 4326 4327 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 4328 CTLFLAG_RD, &stats->crcerrs, 4329 "CRC Errors"); 4330 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 4331 CTLFLAG_RD, &stats->illerrc, 4332 "Illegal Byte Errors"); 4333 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 4334 CTLFLAG_RD, &stats->errbc, 4335 "Byte Errors"); 4336 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 4337 CTLFLAG_RD, &stats->mspdc, 4338 "MAC Short Packets Discarded"); 4339 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 4340 CTLFLAG_RD, &stats->mlfc, 4341 "MAC Local Faults"); 4342 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 4343 CTLFLAG_RD, &stats->mrfc, 4344 "MAC Remote Faults"); 4345 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 4346 CTLFLAG_RD, &stats->rlec, 4347 "Receive Length Errors"); 4348 4349 /* Flow Control stats */ 4350 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 4351 CTLFLAG_RD, &stats->lxontxc, 4352 "Link XON Transmitted"); 4353 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 4354 CTLFLAG_RD, &stats->lxonrxc, 4355 "Link XON Received"); 4356 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 4357 CTLFLAG_RD, &stats->lxofftxc, 4358 "Link XOFF Transmitted"); 4359 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 4360 CTLFLAG_RD, &stats->lxoffrxc, 4361 "Link XOFF Received"); 4362 4363 /* Packet Reception Stats */ 4364 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 4365 CTLFLAG_RD, &stats->tor, 4366 "Total Octets Received"); 4367 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 4368 CTLFLAG_RD, &stats->gorc, 4369 "Good Octets Received"); 4370 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 4371 CTLFLAG_RD, &stats->tpr, 4372 "Total Packets Received"); 4373 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 4374 CTLFLAG_RD, &stats->gprc, 4375 "Good Packets Received"); 4376 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 4377 CTLFLAG_RD, &stats->mprc, 4378 "Multicast Packets Received"); 4379 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 4380 CTLFLAG_RD, &stats->bprc, 4381 "Broadcast Packets Received"); 4382 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 4383 CTLFLAG_RD, &stats->prc64, 4384 "64 byte frames received "); 4385 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 4386 CTLFLAG_RD, &stats->prc127, 4387 "65-127 byte frames received"); 4388 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 4389 CTLFLAG_RD, &stats->prc255, 4390 "128-255 byte frames received"); 4391 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 4392 CTLFLAG_RD, &stats->prc511, 4393 "256-511 byte frames received"); 4394 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 4395 CTLFLAG_RD, &stats->prc1023, 4396 "512-1023 byte frames received"); 4397 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 4398 CTLFLAG_RD, &stats->prc1522, 4399 "1023-1522 byte frames received"); 4400 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 4401 CTLFLAG_RD, &stats->ruc, 4402 "Receive Undersized"); 4403 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 4404 CTLFLAG_RD, &stats->rfc, 4405 "Fragmented Packets Received "); 4406 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 4407 CTLFLAG_RD, &stats->roc, 4408 "Oversized Packets Received"); 4409 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 4410 CTLFLAG_RD, &stats->rjc, 4411 "Received Jabber"); 4412 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 4413 CTLFLAG_RD, &stats->mngprc, 4414 "Management Packets Received"); 4415 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 4416 CTLFLAG_RD, &stats->mngptc, 4417 "Management Packets Dropped"); 4418 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 4419 CTLFLAG_RD, &stats->xec, 4420 "Checksum Errors"); 4421 4422 /* Packet Transmission Stats */ 4423 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 4424 CTLFLAG_RD, &stats->gotc, 4425 "Good Octets Transmitted"); 4426 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 4427 CTLFLAG_RD, &stats->tpt, 4428 "Total Packets Transmitted"); 4429 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 4430 CTLFLAG_RD, &stats->gptc, 4431 "Good Packets Transmitted"); 4432 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 4433 CTLFLAG_RD, &stats->bptc, 4434 "Broadcast Packets Transmitted"); 4435 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 4436 CTLFLAG_RD, &stats->mptc, 4437 "Multicast Packets Transmitted"); 4438 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 4439 CTLFLAG_RD, &stats->mngptc, 4440 "Management Packets Transmitted"); 4441 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 4442 CTLFLAG_RD, &stats->ptc64, 4443 "64 byte frames transmitted "); 4444 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 4445 CTLFLAG_RD, &stats->ptc127, 4446 "65-127 byte frames transmitted"); 4447 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 4448 CTLFLAG_RD, &stats->ptc255, 4449 "128-255 byte frames transmitted"); 4450 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 4451 CTLFLAG_RD, &stats->ptc511, 4452 "256-511 byte frames transmitted"); 4453 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 4454 CTLFLAG_RD, &stats->ptc1023, 4455 "512-1023 byte frames transmitted"); 4456 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 4457 CTLFLAG_RD, &stats->ptc1522, 4458 "1024-1522 byte frames transmitted"); 4459 } 4460 4461 /* 4462 ** Set flow control using sysctl: 4463 ** Flow control values: 4464 ** 0 - off 4465 ** 1 - rx pause 4466 ** 2 - tx pause 4467 ** 3 - full 4468 */ 4469 static int 4470 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS) 4471 { 4472 int error, last; 4473 struct adapter *adapter = (struct adapter *) arg1; 4474 4475 last = adapter->fc; 4476 error = sysctl_handle_int(oidp, &adapter->fc, 0, req); 4477 if ((error) || (req->newptr == NULL)) 4478 return (error); 4479 4480 /* Don't bother if it's not changed */ 4481 if (adapter->fc == last) 4482 return (0); 4483 4484 switch (adapter->fc) { 4485 case ixgbe_fc_rx_pause: 4486 case ixgbe_fc_tx_pause: 4487 case ixgbe_fc_full: 4488 adapter->hw.fc.requested_mode = adapter->fc; 4489 if (adapter->num_queues > 1) 4490 ixgbe_disable_rx_drop(adapter); 4491 break; 4492 case ixgbe_fc_none: 4493 adapter->hw.fc.requested_mode = ixgbe_fc_none; 4494 if (adapter->num_queues > 1) 4495 ixgbe_enable_rx_drop(adapter); 4496 break; 4497 default: 4498 adapter->fc = last; 4499 return (EINVAL); 4500 } 4501 /* Don't autoneg if forcing a value */ 4502 adapter->hw.fc.disable_fc_autoneg = TRUE; 4503 ixgbe_fc_enable(&adapter->hw); 4504 return error; 4505 } 4506 4507 /* 4508 ** Control advertised link speed: 4509 ** Flags: 4510 ** 0x1 - advertise 100 Mb 4511 ** 0x2 - advertise 1G 4512 ** 0x4 - advertise 10G 4513 */ 4514 static int 4515 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS) 4516 { 4517 int error = 0, requested; 4518 struct adapter *adapter; 4519 device_t dev; 4520 struct ixgbe_hw *hw; 4521 ixgbe_link_speed speed = 0; 4522 4523 adapter = (struct adapter *) arg1; 4524 dev = adapter->dev; 4525 hw = &adapter->hw; 4526 4527 requested = adapter->advertise; 4528 error = sysctl_handle_int(oidp, &requested, 0, req); 4529 if ((error) || (req->newptr == NULL)) 4530 return (error); 4531 4532 /* Checks to validate new value */ 4533 if (adapter->advertise == requested) /* no change */ 4534 return (0); 4535 4536 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4537 (hw->phy.multispeed_fiber))) { 4538 device_printf(dev, 4539 "Advertised speed can only be set on copper or " 4540 "multispeed fiber media types.\n"); 4541 return (EINVAL); 4542 } 4543 4544 if (requested < 0x1 || requested > 0x7) { 4545 device_printf(dev, 4546 "Invalid advertised speed; valid modes are 0x1 through 0x7\n"); 4547 return (EINVAL); 4548 } 4549 4550 if ((requested & 0x1) 4551 && (hw->mac.type != ixgbe_mac_X540) 4552 && (hw->mac.type != ixgbe_mac_X550)) { 4553 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n"); 4554 return (EINVAL); 4555 } 4556 4557 /* Set new value and report new advertised mode */ 4558 if (requested & 0x1) 4559 speed |= IXGBE_LINK_SPEED_100_FULL; 4560 if (requested & 0x2) 4561 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4562 if (requested & 0x4) 4563 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4564 4565 hw->mac.autotry_restart = TRUE; 4566 hw->mac.ops.setup_link(hw, speed, TRUE); 4567 adapter->advertise = requested; 4568 4569 return (error); 4570 } 4571 4572 /* 4573 * The following two sysctls are for X550 BaseT devices; 4574 * they deal with the external PHY used in them. 4575 */ 4576 static int 4577 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4578 { 4579 struct adapter *adapter = (struct adapter *) arg1; 4580 struct ixgbe_hw *hw = &adapter->hw; 4581 u16 reg; 4582 4583 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4584 device_printf(adapter->dev, 4585 "Device has no supported external thermal sensor.\n"); 4586 return (ENODEV); 4587 } 4588 4589 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4590 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 4591 ®)) { 4592 device_printf(adapter->dev, 4593 "Error reading from PHY's current temperature register\n"); 4594 return (EAGAIN); 4595 } 4596 4597 /* Shift temp for output */ 4598 reg = reg >> 8; 4599 4600 return (sysctl_handle_int(oidp, NULL, reg, req)); 4601 } 4602 4603 /* 4604 * Reports whether the current PHY temperature is over 4605 * the overtemp threshold. 4606 * - This is reported directly from the PHY 4607 */ 4608 static int 4609 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4610 { 4611 struct adapter *adapter = (struct adapter *) arg1; 4612 struct ixgbe_hw *hw = &adapter->hw; 4613 u16 reg; 4614 4615 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4616 device_printf(adapter->dev, 4617 "Device has no supported external thermal sensor.\n"); 4618 return (ENODEV); 4619 } 4620 4621 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4622 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 4623 ®)) { 4624 device_printf(adapter->dev, 4625 "Error reading from PHY's temperature status register\n"); 4626 return (EAGAIN); 4627 } 4628 4629 /* Get occurrence bit */ 4630 reg = !!(reg & 0x4000); 4631 return (sysctl_handle_int(oidp, 0, reg, req)); 4632 } 4633 4634 /* 4635 ** Thermal Shutdown Trigger (internal MAC) 4636 ** - Set this to 1 to cause an overtemp event to occur 4637 */ 4638 static int 4639 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS) 4640 { 4641 struct adapter *adapter = (struct adapter *) arg1; 4642 struct ixgbe_hw *hw = &adapter->hw; 4643 int error, fire = 0; 4644 4645 error = sysctl_handle_int(oidp, &fire, 0, req); 4646 if ((error) || (req->newptr == NULL)) 4647 return (error); 4648 4649 if (fire) { 4650 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS); 4651 reg |= IXGBE_EICR_TS; 4652 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg); 4653 } 4654 4655 return (0); 4656 } 4657 4658 /* 4659 ** Manage DMA Coalescing. 4660 ** Control values: 4661 ** 0/1 - off / on (use default value of 1000) 4662 ** 4663 ** Legal timer values are: 4664 ** 50,100,250,500,1000,2000,5000,10000 4665 ** 4666 ** Turning off interrupt moderation will also turn this off. 4667 */ 4668 static int 4669 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4670 { 4671 struct adapter *adapter = (struct adapter *) arg1; 4672 struct ixgbe_hw *hw = &adapter->hw; 4673 struct ifnet *ifp = adapter->ifp; 4674 int error; 4675 u16 oldval; 4676 4677 oldval = adapter->dmac; 4678 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req); 4679 if ((error) || (req->newptr == NULL)) 4680 return (error); 4681 4682 switch (hw->mac.type) { 4683 case ixgbe_mac_X550: 4684 case ixgbe_mac_X550EM_x: 4685 break; 4686 default: 4687 device_printf(adapter->dev, 4688 "DMA Coalescing is only supported on X550 devices\n"); 4689 return (ENODEV); 4690 } 4691 4692 switch (adapter->dmac) { 4693 case 0: 4694 /* Disabled */ 4695 break; 4696 case 1: /* Enable and use default */ 4697 adapter->dmac = 1000; 4698 break; 4699 case 50: 4700 case 100: 4701 case 250: 4702 case 500: 4703 case 1000: 4704 case 2000: 4705 case 5000: 4706 case 10000: 4707 /* Legal values - allow */ 4708 break; 4709 default: 4710 /* Do nothing, illegal value */ 4711 adapter->dmac = oldval; 4712 return (EINVAL); 4713 } 4714 4715 /* Re-initialize hardware if it's already running */ 4716 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4717 ixgbe_init(adapter); 4718 4719 return (0); 4720 } 4721 4722 /* 4723 * Sysctl to enable/disable the WoL capability, if supported by the adapter. 4724 * Values: 4725 * 0 - disabled 4726 * 1 - enabled 4727 */ 4728 static int 4729 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4730 { 4731 struct adapter *adapter = (struct adapter *) arg1; 4732 struct ixgbe_hw *hw = &adapter->hw; 4733 int new_wol_enabled; 4734 int error = 0; 4735 4736 new_wol_enabled = hw->wol_enabled; 4737 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4738 if ((error) || (req->newptr == NULL)) 4739 return (error); 4740 if (new_wol_enabled == hw->wol_enabled) 4741 return (0); 4742 4743 if (new_wol_enabled > 0 && !adapter->wol_support) 4744 return (ENODEV); 4745 else 4746 hw->wol_enabled = !!(new_wol_enabled); 4747 4748 return (0); 4749 } 4750 4751 /* 4752 * Sysctl to enable/disable the Energy Efficient Ethernet capability, 4753 * if supported by the adapter. 4754 * Values: 4755 * 0 - disabled 4756 * 1 - enabled 4757 */ 4758 static int 4759 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4760 { 4761 struct adapter *adapter = (struct adapter *) arg1; 4762 struct ixgbe_hw *hw = &adapter->hw; 4763 struct ifnet *ifp = adapter->ifp; 4764 int new_eee_enabled, error = 0; 4765 4766 new_eee_enabled = adapter->eee_enabled; 4767 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req); 4768 if ((error) || (req->newptr == NULL)) 4769 return (error); 4770 if (new_eee_enabled == adapter->eee_enabled) 4771 return (0); 4772 4773 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee) 4774 return (ENODEV); 4775 else 4776 adapter->eee_enabled = !!(new_eee_enabled); 4777 4778 /* Re-initialize hardware if it's already running */ 4779 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4780 ixgbe_init(adapter); 4781 4782 return (0); 4783 } 4784 4785 /* 4786 * Read-only sysctl indicating whether EEE support was negotiated 4787 * on the link. 4788 */ 4789 static int 4790 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS) 4791 { 4792 struct adapter *adapter = (struct adapter *) arg1; 4793 struct ixgbe_hw *hw = &adapter->hw; 4794 bool status; 4795 4796 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG); 4797 4798 return (sysctl_handle_int(oidp, 0, status, req)); 4799 } 4800 4801 /* 4802 * Read-only sysctl indicating whether RX Link is in LPI state. 4803 */ 4804 static int 4805 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS) 4806 { 4807 struct adapter *adapter = (struct adapter *) arg1; 4808 struct ixgbe_hw *hw = &adapter->hw; 4809 bool status; 4810 4811 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & 4812 IXGBE_EEE_RX_LPI_STATUS); 4813 4814 return (sysctl_handle_int(oidp, 0, status, req)); 4815 } 4816 4817 /* 4818 * Read-only sysctl indicating whether TX Link is in LPI state. 4819 */ 4820 static int 4821 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS) 4822 { 4823 struct adapter *adapter = (struct adapter *) arg1; 4824 struct ixgbe_hw *hw = &adapter->hw; 4825 bool status; 4826 4827 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & 4828 IXGBE_EEE_TX_LPI_STATUS); 4829 4830 return (sysctl_handle_int(oidp, 0, status, req)); 4831 } 4832 4833 /* 4834 * Sysctl to enable/disable the types of packets that the 4835 * adapter will wake up on upon receipt. 4836 * WUFC - Wake Up Filter Control 4837 * Flags: 4838 * 0x1 - Link Status Change 4839 * 0x2 - Magic Packet 4840 * 0x4 - Direct Exact 4841 * 0x8 - Directed Multicast 4842 * 0x10 - Broadcast 4843 * 0x20 - ARP/IPv4 Request Packet 4844 * 0x40 - Direct IPv4 Packet 4845 * 0x80 - Direct IPv6 Packet 4846 * 4847 * Setting another flag will cause the sysctl to return an 4848 * error. 4849 */ 4850 static int 4851 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4852 { 4853 struct adapter *adapter = (struct adapter *) arg1; 4854 int error = 0; 4855 u32 new_wufc; 4856 4857 new_wufc = adapter->wufc; 4858 4859 error = sysctl_handle_int(oidp, &new_wufc, 0, req); 4860 if ((error) || (req->newptr == NULL)) 4861 return (error); 4862 if (new_wufc == adapter->wufc) 4863 return (0); 4864 4865 if (new_wufc & 0xffffff00) 4866 return (EINVAL); 4867 else { 4868 new_wufc &= 0xff; 4869 new_wufc |= (0xffffff & adapter->wufc); 4870 adapter->wufc = new_wufc; 4871 } 4872 4873 return (0); 4874 } 4875 4876 /* 4877 ** Enable the hardware to drop packets when the buffer is 4878 ** full. This is useful when multiqueue,so that no single 4879 ** queue being full stalls the entire RX engine. We only 4880 ** enable this when Multiqueue AND when Flow Control is 4881 ** disabled. 4882 */ 4883 static void 4884 ixgbe_enable_rx_drop(struct adapter *adapter) 4885 { 4886 struct ixgbe_hw *hw = &adapter->hw; 4887 4888 for (int i = 0; i < adapter->num_queues; i++) { 4889 struct rx_ring *rxr = &adapter->rx_rings[i]; 4890 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4891 srrctl |= IXGBE_SRRCTL_DROP_EN; 4892 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4893 } 4894 #ifdef PCI_IOV 4895 /* enable drop for each vf */ 4896 for (int i = 0; i < adapter->num_vfs; i++) { 4897 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4898 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4899 IXGBE_QDE_ENABLE)); 4900 } 4901 #endif 4902 } 4903 4904 static void 4905 ixgbe_disable_rx_drop(struct adapter *adapter) 4906 { 4907 struct ixgbe_hw *hw = &adapter->hw; 4908 4909 for (int i = 0; i < adapter->num_queues; i++) { 4910 struct rx_ring *rxr = &adapter->rx_rings[i]; 4911 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4912 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4913 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4914 } 4915 #ifdef PCI_IOV 4916 /* disable drop for each vf */ 4917 for (int i = 0; i < adapter->num_vfs; i++) { 4918 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4919 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4920 } 4921 #endif 4922 } 4923 4924 static void 4925 ixgbe_rearm_queues(struct adapter *adapter, u64 queues) 4926 { 4927 u32 mask; 4928 4929 switch (adapter->hw.mac.type) { 4930 case ixgbe_mac_82598EB: 4931 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 4932 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 4933 break; 4934 case ixgbe_mac_82599EB: 4935 case ixgbe_mac_X540: 4936 case ixgbe_mac_X550: 4937 case ixgbe_mac_X550EM_x: 4938 mask = (queues & 0xFFFFFFFF); 4939 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 4940 mask = (queues >> 32); 4941 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 4942 break; 4943 default: 4944 break; 4945 } 4946 } 4947 4948 #ifdef PCI_IOV 4949 4950 /* 4951 ** Support functions for SRIOV/VF management 4952 */ 4953 4954 static void 4955 ixgbe_ping_all_vfs(struct adapter *adapter) 4956 { 4957 struct ixgbe_vf *vf; 4958 4959 for (int i = 0; i < adapter->num_vfs; i++) { 4960 vf = &adapter->vfs[i]; 4961 if (vf->flags & IXGBE_VF_ACTIVE) 4962 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); 4963 } 4964 } 4965 4966 4967 static void 4968 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf, 4969 uint16_t tag) 4970 { 4971 struct ixgbe_hw *hw; 4972 uint32_t vmolr, vmvir; 4973 4974 hw = &adapter->hw; 4975 4976 vf->vlan_tag = tag; 4977 4978 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); 4979 4980 /* Do not receive packets that pass inexact filters. */ 4981 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); 4982 4983 /* Disable Multicast Promicuous Mode. */ 4984 vmolr &= ~IXGBE_VMOLR_MPE; 4985 4986 /* Accept broadcasts. */ 4987 vmolr |= IXGBE_VMOLR_BAM; 4988 4989 if (tag == 0) { 4990 /* Accept non-vlan tagged traffic. */ 4991 //vmolr |= IXGBE_VMOLR_AUPE; 4992 4993 /* Allow VM to tag outgoing traffic; no default tag. */ 4994 vmvir = 0; 4995 } else { 4996 /* Require vlan-tagged traffic. */ 4997 vmolr &= ~IXGBE_VMOLR_AUPE; 4998 4999 /* Tag all traffic with provided vlan tag. */ 5000 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); 5001 } 5002 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); 5003 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); 5004 } 5005 5006 5007 static boolean_t 5008 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf) 5009 { 5010 5011 /* 5012 * Frame size compatibility between PF and VF is only a problem on 5013 * 82599-based cards. X540 and later support any combination of jumbo 5014 * frames on PFs and VFs. 5015 */ 5016 if (adapter->hw.mac.type != ixgbe_mac_82599EB) 5017 return (TRUE); 5018 5019 switch (vf->api_ver) { 5020 case IXGBE_API_VER_1_0: 5021 case IXGBE_API_VER_UNKNOWN: 5022 /* 5023 * On legacy (1.0 and older) VF versions, we don't support jumbo 5024 * frames on either the PF or the VF. 5025 */ 5026 if (adapter->max_frame_size > ETHER_MAX_LEN || 5027 vf->max_frame_size > ETHER_MAX_LEN) 5028 return (FALSE); 5029 5030 return (TRUE); 5031 5032 break; 5033 case IXGBE_API_VER_1_1: 5034 default: 5035 /* 5036 * 1.1 or later VF versions always work if they aren't using 5037 * jumbo frames. 5038 */ 5039 if (vf->max_frame_size <= ETHER_MAX_LEN) 5040 return (TRUE); 5041 5042 /* 5043 * Jumbo frames only work with VFs if the PF is also using jumbo 5044 * frames. 5045 */ 5046 if (adapter->max_frame_size <= ETHER_MAX_LEN) 5047 return (TRUE); 5048 5049 return (FALSE); 5050 5051 } 5052 } 5053 5054 5055 static void 5056 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf) 5057 { 5058 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan); 5059 5060 // XXX clear multicast addresses 5061 5062 ixgbe_clear_rar(&adapter->hw, vf->rar_index); 5063 5064 vf->api_ver = IXGBE_API_VER_UNKNOWN; 5065 } 5066 5067 5068 static void 5069 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf) 5070 { 5071 struct ixgbe_hw *hw; 5072 uint32_t vf_index, vfte; 5073 5074 hw = &adapter->hw; 5075 5076 vf_index = IXGBE_VF_INDEX(vf->pool); 5077 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); 5078 vfte |= IXGBE_VF_BIT(vf->pool); 5079 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); 5080 } 5081 5082 5083 static void 5084 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf) 5085 { 5086 struct ixgbe_hw *hw; 5087 uint32_t vf_index, vfre; 5088 5089 hw = &adapter->hw; 5090 5091 vf_index = IXGBE_VF_INDEX(vf->pool); 5092 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); 5093 if (ixgbe_vf_frame_size_compatible(adapter, vf)) 5094 vfre |= IXGBE_VF_BIT(vf->pool); 5095 else 5096 vfre &= ~IXGBE_VF_BIT(vf->pool); 5097 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); 5098 } 5099 5100 5101 static void 5102 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5103 { 5104 struct ixgbe_hw *hw; 5105 uint32_t ack; 5106 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; 5107 5108 hw = &adapter->hw; 5109 5110 ixgbe_process_vf_reset(adapter, vf); 5111 5112 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { 5113 ixgbe_set_rar(&adapter->hw, vf->rar_index, 5114 vf->ether_addr, vf->pool, TRUE); 5115 ack = IXGBE_VT_MSGTYPE_ACK; 5116 } else 5117 ack = IXGBE_VT_MSGTYPE_NACK; 5118 5119 ixgbe_vf_enable_transmit(adapter, vf); 5120 ixgbe_vf_enable_receive(adapter, vf); 5121 5122 vf->flags |= IXGBE_VF_CTS; 5123 5124 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS; 5125 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); 5126 resp[3] = hw->mac.mc_filter_type; 5127 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); 5128 } 5129 5130 5131 static void 5132 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5133 { 5134 uint8_t *mac; 5135 5136 mac = (uint8_t*)&msg[1]; 5137 5138 /* Check that the VF has permission to change the MAC address. */ 5139 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { 5140 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5141 return; 5142 } 5143 5144 if (ixgbe_validate_mac_addr(mac) != 0) { 5145 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5146 return; 5147 } 5148 5149 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); 5150 5151 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, 5152 vf->pool, TRUE); 5153 5154 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5155 } 5156 5157 5158 /* 5159 ** VF multicast addresses are set by using the appropriate bit in 5160 ** 1 of 128 32 bit addresses (4096 possible). 5161 */ 5162 static void 5163 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg) 5164 { 5165 u16 *list = (u16*)&msg[1]; 5166 int entries; 5167 u32 vmolr, vec_bit, vec_reg, mta_reg; 5168 5169 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; 5170 entries = min(entries, IXGBE_MAX_VF_MC); 5171 5172 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool)); 5173 5174 vf->num_mc_hashes = entries; 5175 5176 /* Set the appropriate MTA bit */ 5177 for (int i = 0; i < entries; i++) { 5178 vf->mc_hash[i] = list[i]; 5179 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; 5180 vec_bit = vf->mc_hash[i] & 0x1F; 5181 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg)); 5182 mta_reg |= (1 << vec_bit); 5183 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg); 5184 } 5185 5186 vmolr |= IXGBE_VMOLR_ROMPE; 5187 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr); 5188 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5189 return; 5190 } 5191 5192 5193 static void 5194 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5195 { 5196 struct ixgbe_hw *hw; 5197 int enable; 5198 uint16_t tag; 5199 5200 hw = &adapter->hw; 5201 enable = IXGBE_VT_MSGINFO(msg[0]); 5202 tag = msg[1] & IXGBE_VLVF_VLANID_MASK; 5203 5204 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { 5205 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5206 return; 5207 } 5208 5209 /* It is illegal to enable vlan tag 0. */ 5210 if (tag == 0 && enable != 0){ 5211 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5212 return; 5213 } 5214 5215 ixgbe_set_vfta(hw, tag, vf->pool, enable); 5216 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5217 } 5218 5219 5220 static void 5221 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5222 { 5223 struct ixgbe_hw *hw; 5224 uint32_t vf_max_size, pf_max_size, mhadd; 5225 5226 hw = &adapter->hw; 5227 vf_max_size = msg[1]; 5228 5229 if (vf_max_size < ETHER_CRC_LEN) { 5230 /* We intentionally ACK invalid LPE requests. */ 5231 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5232 return; 5233 } 5234 5235 vf_max_size -= ETHER_CRC_LEN; 5236 5237 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { 5238 /* We intentionally ACK invalid LPE requests. */ 5239 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5240 return; 5241 } 5242 5243 vf->max_frame_size = vf_max_size; 5244 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5245 5246 /* 5247 * We might have to disable reception to this VF if the frame size is 5248 * not compatible with the config on the PF. 5249 */ 5250 ixgbe_vf_enable_receive(adapter, vf); 5251 5252 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 5253 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; 5254 5255 if (pf_max_size < adapter->max_frame_size) { 5256 mhadd &= ~IXGBE_MHADD_MFS_MASK; 5257 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 5258 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 5259 } 5260 5261 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5262 } 5263 5264 5265 static void 5266 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf, 5267 uint32_t *msg) 5268 { 5269 //XXX implement this 5270 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5271 } 5272 5273 5274 static void 5275 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf, 5276 uint32_t *msg) 5277 { 5278 5279 switch (msg[1]) { 5280 case IXGBE_API_VER_1_0: 5281 case IXGBE_API_VER_1_1: 5282 vf->api_ver = msg[1]; 5283 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5284 break; 5285 default: 5286 vf->api_ver = IXGBE_API_VER_UNKNOWN; 5287 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5288 break; 5289 } 5290 } 5291 5292 5293 static void 5294 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, 5295 uint32_t *msg) 5296 { 5297 struct ixgbe_hw *hw; 5298 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; 5299 int num_queues; 5300 5301 hw = &adapter->hw; 5302 5303 /* GET_QUEUES is not supported on pre-1.1 APIs. */ 5304 switch (msg[0]) { 5305 case IXGBE_API_VER_1_0: 5306 case IXGBE_API_VER_UNKNOWN: 5307 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5308 return; 5309 } 5310 5311 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | 5312 IXGBE_VT_MSGTYPE_CTS; 5313 5314 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter)); 5315 resp[IXGBE_VF_TX_QUEUES] = num_queues; 5316 resp[IXGBE_VF_RX_QUEUES] = num_queues; 5317 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); 5318 resp[IXGBE_VF_DEF_QUEUE] = 0; 5319 5320 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); 5321 } 5322 5323 5324 static void 5325 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf) 5326 { 5327 struct ixgbe_hw *hw; 5328 uint32_t msg[IXGBE_VFMAILBOX_SIZE]; 5329 int error; 5330 5331 hw = &adapter->hw; 5332 5333 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); 5334 5335 if (error != 0) 5336 return; 5337 5338 CTR3(KTR_MALLOC, "%s: received msg %x from %d", 5339 adapter->ifp->if_xname, msg[0], vf->pool); 5340 if (msg[0] == IXGBE_VF_RESET) { 5341 ixgbe_vf_reset_msg(adapter, vf, msg); 5342 return; 5343 } 5344 5345 if (!(vf->flags & IXGBE_VF_CTS)) { 5346 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5347 return; 5348 } 5349 5350 switch (msg[0] & IXGBE_VT_MSG_MASK) { 5351 case IXGBE_VF_SET_MAC_ADDR: 5352 ixgbe_vf_set_mac(adapter, vf, msg); 5353 break; 5354 case IXGBE_VF_SET_MULTICAST: 5355 ixgbe_vf_set_mc_addr(adapter, vf, msg); 5356 break; 5357 case IXGBE_VF_SET_VLAN: 5358 ixgbe_vf_set_vlan(adapter, vf, msg); 5359 break; 5360 case IXGBE_VF_SET_LPE: 5361 ixgbe_vf_set_lpe(adapter, vf, msg); 5362 break; 5363 case IXGBE_VF_SET_MACVLAN: 5364 ixgbe_vf_set_macvlan(adapter, vf, msg); 5365 break; 5366 case IXGBE_VF_API_NEGOTIATE: 5367 ixgbe_vf_api_negotiate(adapter, vf, msg); 5368 break; 5369 case IXGBE_VF_GET_QUEUES: 5370 ixgbe_vf_get_queues(adapter, vf, msg); 5371 break; 5372 default: 5373 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5374 } 5375 } 5376 5377 5378 /* 5379 * Tasklet for handling VF -> PF mailbox messages. 5380 */ 5381 static void 5382 ixgbe_handle_mbx(void *context, int pending) 5383 { 5384 struct adapter *adapter; 5385 struct ixgbe_hw *hw; 5386 struct ixgbe_vf *vf; 5387 int i; 5388 5389 adapter = context; 5390 hw = &adapter->hw; 5391 5392 IXGBE_CORE_LOCK(adapter); 5393 for (i = 0; i < adapter->num_vfs; i++) { 5394 vf = &adapter->vfs[i]; 5395 5396 if (vf->flags & IXGBE_VF_ACTIVE) { 5397 if (ixgbe_check_for_rst(hw, vf->pool) == 0) 5398 ixgbe_process_vf_reset(adapter, vf); 5399 5400 if (ixgbe_check_for_msg(hw, vf->pool) == 0) 5401 ixgbe_process_vf_msg(adapter, vf); 5402 5403 if (ixgbe_check_for_ack(hw, vf->pool) == 0) 5404 ixgbe_process_vf_ack(adapter, vf); 5405 } 5406 } 5407 IXGBE_CORE_UNLOCK(adapter); 5408 } 5409 5410 5411 static int 5412 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config) 5413 { 5414 struct adapter *adapter; 5415 enum ixgbe_iov_mode mode; 5416 5417 adapter = device_get_softc(dev); 5418 adapter->num_vfs = num_vfs; 5419 mode = ixgbe_get_iov_mode(adapter); 5420 5421 if (num_vfs > ixgbe_max_vfs(mode)) { 5422 adapter->num_vfs = 0; 5423 return (ENOSPC); 5424 } 5425 5426 IXGBE_CORE_LOCK(adapter); 5427 5428 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE, 5429 M_NOWAIT | M_ZERO); 5430 5431 if (adapter->vfs == NULL) { 5432 adapter->num_vfs = 0; 5433 IXGBE_CORE_UNLOCK(adapter); 5434 return (ENOMEM); 5435 } 5436 5437 ixgbe_init_locked(adapter); 5438 5439 IXGBE_CORE_UNLOCK(adapter); 5440 5441 return (0); 5442 } 5443 5444 5445 static void 5446 ixgbe_uninit_iov(device_t dev) 5447 { 5448 struct ixgbe_hw *hw; 5449 struct adapter *adapter; 5450 uint32_t pf_reg, vf_reg; 5451 5452 adapter = device_get_softc(dev); 5453 hw = &adapter->hw; 5454 5455 IXGBE_CORE_LOCK(adapter); 5456 5457 /* Enable rx/tx for the PF and disable it for all VFs. */ 5458 pf_reg = IXGBE_VF_INDEX(adapter->pool); 5459 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), 5460 IXGBE_VF_BIT(adapter->pool)); 5461 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), 5462 IXGBE_VF_BIT(adapter->pool)); 5463 5464 if (pf_reg == 0) 5465 vf_reg = 1; 5466 else 5467 vf_reg = 0; 5468 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); 5469 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); 5470 5471 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); 5472 5473 free(adapter->vfs, M_IXGBE); 5474 adapter->vfs = NULL; 5475 adapter->num_vfs = 0; 5476 5477 IXGBE_CORE_UNLOCK(adapter); 5478 } 5479 5480 5481 static void 5482 ixgbe_initialize_iov(struct adapter *adapter) 5483 { 5484 struct ixgbe_hw *hw = &adapter->hw; 5485 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; 5486 enum ixgbe_iov_mode mode; 5487 int i; 5488 5489 mode = ixgbe_get_iov_mode(adapter); 5490 if (mode == IXGBE_NO_VM) 5491 return; 5492 5493 IXGBE_CORE_LOCK_ASSERT(adapter); 5494 5495 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 5496 mrqc &= ~IXGBE_MRQC_MRQE_MASK; 5497 5498 switch (mode) { 5499 case IXGBE_64_VM: 5500 mrqc |= IXGBE_MRQC_VMDQRSS64EN; 5501 break; 5502 case IXGBE_32_VM: 5503 mrqc |= IXGBE_MRQC_VMDQRSS32EN; 5504 break; 5505 default: 5506 panic("Unexpected SR-IOV mode %d", mode); 5507 } 5508 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 5509 5510 mtqc = IXGBE_MTQC_VT_ENA; 5511 switch (mode) { 5512 case IXGBE_64_VM: 5513 mtqc |= IXGBE_MTQC_64VF; 5514 break; 5515 case IXGBE_32_VM: 5516 mtqc |= IXGBE_MTQC_32VF; 5517 break; 5518 default: 5519 panic("Unexpected SR-IOV mode %d", mode); 5520 } 5521 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); 5522 5523 5524 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 5525 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; 5526 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; 5527 switch (mode) { 5528 case IXGBE_64_VM: 5529 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; 5530 break; 5531 case IXGBE_32_VM: 5532 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; 5533 break; 5534 default: 5535 panic("Unexpected SR-IOV mode %d", mode); 5536 } 5537 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 5538 5539 5540 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5541 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK; 5542 switch (mode) { 5543 case IXGBE_64_VM: 5544 gpie |= IXGBE_GPIE_VTMODE_64; 5545 break; 5546 case IXGBE_32_VM: 5547 gpie |= IXGBE_GPIE_VTMODE_32; 5548 break; 5549 default: 5550 panic("Unexpected SR-IOV mode %d", mode); 5551 } 5552 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5553 5554 /* Enable rx/tx for the PF. */ 5555 vf_reg = IXGBE_VF_INDEX(adapter->pool); 5556 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 5557 IXGBE_VF_BIT(adapter->pool)); 5558 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 5559 IXGBE_VF_BIT(adapter->pool)); 5560 5561 /* Allow VM-to-VM communication. */ 5562 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 5563 5564 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 5565 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT); 5566 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); 5567 5568 for (i = 0; i < adapter->num_vfs; i++) 5569 ixgbe_init_vf(adapter, &adapter->vfs[i]); 5570 } 5571 5572 5573 /* 5574 ** Check the max frame setting of all active VF's 5575 */ 5576 static void 5577 ixgbe_recalculate_max_frame(struct adapter *adapter) 5578 { 5579 struct ixgbe_vf *vf; 5580 5581 IXGBE_CORE_LOCK_ASSERT(adapter); 5582 5583 for (int i = 0; i < adapter->num_vfs; i++) { 5584 vf = &adapter->vfs[i]; 5585 if (vf->flags & IXGBE_VF_ACTIVE) 5586 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5587 } 5588 } 5589 5590 5591 static void 5592 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf) 5593 { 5594 struct ixgbe_hw *hw; 5595 uint32_t vf_index, pfmbimr; 5596 5597 IXGBE_CORE_LOCK_ASSERT(adapter); 5598 5599 hw = &adapter->hw; 5600 5601 if (!(vf->flags & IXGBE_VF_ACTIVE)) 5602 return; 5603 5604 vf_index = IXGBE_VF_INDEX(vf->pool); 5605 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); 5606 pfmbimr |= IXGBE_VF_BIT(vf->pool); 5607 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); 5608 5609 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag); 5610 5611 // XXX multicast addresses 5612 5613 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { 5614 ixgbe_set_rar(&adapter->hw, vf->rar_index, 5615 vf->ether_addr, vf->pool, TRUE); 5616 } 5617 5618 ixgbe_vf_enable_transmit(adapter, vf); 5619 ixgbe_vf_enable_receive(adapter, vf); 5620 5621 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); 5622 } 5623 5624 static int 5625 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config) 5626 { 5627 struct adapter *adapter; 5628 struct ixgbe_vf *vf; 5629 const void *mac; 5630 5631 adapter = device_get_softc(dev); 5632 5633 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d", 5634 vfnum, adapter->num_vfs)); 5635 5636 IXGBE_CORE_LOCK(adapter); 5637 vf = &adapter->vfs[vfnum]; 5638 vf->pool= vfnum; 5639 5640 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ 5641 vf->rar_index = vfnum + 1; 5642 vf->default_vlan = 0; 5643 vf->max_frame_size = ETHER_MAX_LEN; 5644 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5645 5646 if (nvlist_exists_binary(config, "mac-addr")) { 5647 mac = nvlist_get_binary(config, "mac-addr", NULL); 5648 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); 5649 if (nvlist_get_bool(config, "allow-set-mac")) 5650 vf->flags |= IXGBE_VF_CAP_MAC; 5651 } else 5652 /* 5653 * If the administrator has not specified a MAC address then 5654 * we must allow the VF to choose one. 5655 */ 5656 vf->flags |= IXGBE_VF_CAP_MAC; 5657 5658 vf->flags = IXGBE_VF_ACTIVE; 5659 5660 ixgbe_init_vf(adapter, vf); 5661 IXGBE_CORE_UNLOCK(adapter); 5662 5663 return (0); 5664 } 5665 #endif /* PCI_IOV */ 5666 5667