1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_rss.h" 40 #endif 41 42 #include "ixgbe.h" 43 44 #ifdef RSS 45 #include <net/rss_config.h> 46 #include <netinet/in_rss.h> 47 #endif 48 49 /********************************************************************* 50 * Set this to one to display debug statistics 51 *********************************************************************/ 52 int ixgbe_display_debug_stats = 0; 53 54 /********************************************************************* 55 * Driver version 56 *********************************************************************/ 57 char ixgbe_driver_version[] = "3.1.0"; 58 59 /********************************************************************* 60 * PCI Device ID Table 61 * 62 * Used by probe to select devices to load on 63 * Last field stores an index into ixgbe_strings 64 * Last entry must be all 0s 65 * 66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 67 *********************************************************************/ 68 69 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 70 { 71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 101 /* required last entry */ 102 {0, 0, 0, 0, 0} 103 }; 104 105 /********************************************************************* 106 * Table of branding strings 107 *********************************************************************/ 108 109 static char *ixgbe_strings[] = { 110 "Intel(R) PRO/10GbE PCI-Express Network Driver" 111 }; 112 113 /********************************************************************* 114 * Function prototypes 115 *********************************************************************/ 116 static int ixgbe_probe(device_t); 117 static int ixgbe_attach(device_t); 118 static int ixgbe_detach(device_t); 119 static int ixgbe_shutdown(device_t); 120 static int ixgbe_suspend(device_t); 121 static int ixgbe_resume(device_t); 122 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); 123 static void ixgbe_init(void *); 124 static void ixgbe_init_locked(struct adapter *); 125 static void ixgbe_stop(void *); 126 #if __FreeBSD_version >= 1100036 127 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter); 128 #endif 129 static void ixgbe_add_media_types(struct adapter *); 130 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 131 static int ixgbe_media_change(struct ifnet *); 132 static void ixgbe_identify_hardware(struct adapter *); 133 static int ixgbe_allocate_pci_resources(struct adapter *); 134 static void ixgbe_get_slot_info(struct ixgbe_hw *); 135 static int ixgbe_allocate_msix(struct adapter *); 136 static int ixgbe_allocate_legacy(struct adapter *); 137 static int ixgbe_setup_msix(struct adapter *); 138 static void ixgbe_free_pci_resources(struct adapter *); 139 static void ixgbe_local_timer(void *); 140 static int ixgbe_setup_interface(device_t, struct adapter *); 141 static void ixgbe_config_gpie(struct adapter *); 142 static void ixgbe_config_dmac(struct adapter *); 143 static void ixgbe_config_delay_values(struct adapter *); 144 static void ixgbe_config_link(struct adapter *); 145 static void ixgbe_check_eee_support(struct adapter *); 146 static void ixgbe_check_wol_support(struct adapter *); 147 static int ixgbe_setup_low_power_mode(struct adapter *); 148 static void ixgbe_rearm_queues(struct adapter *, u64); 149 150 static void ixgbe_initialize_transmit_units(struct adapter *); 151 static void ixgbe_initialize_receive_units(struct adapter *); 152 static void ixgbe_enable_rx_drop(struct adapter *); 153 static void ixgbe_disable_rx_drop(struct adapter *); 154 155 static void ixgbe_enable_intr(struct adapter *); 156 static void ixgbe_disable_intr(struct adapter *); 157 static void ixgbe_update_stats_counters(struct adapter *); 158 static void ixgbe_set_promisc(struct adapter *); 159 static void ixgbe_set_multi(struct adapter *); 160 static void ixgbe_update_link_status(struct adapter *); 161 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 162 static void ixgbe_configure_ivars(struct adapter *); 163 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 164 165 static void ixgbe_setup_vlan_hw_support(struct adapter *); 166 static void ixgbe_register_vlan(void *, struct ifnet *, u16); 167 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16); 168 169 static void ixgbe_add_device_sysctls(struct adapter *); 170 static void ixgbe_add_hw_stats(struct adapter *); 171 172 /* Sysctl handlers */ 173 static void ixgbe_set_sysctl_value(struct adapter *, const char *, 174 const char *, int *, int); 175 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS); 176 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS); 177 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS); 178 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 179 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 180 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 181 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 182 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 183 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 184 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS); 185 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS); 186 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS); 187 188 /* Support for pluggable optic modules */ 189 static bool ixgbe_sfp_probe(struct adapter *); 190 static void ixgbe_setup_optics(struct adapter *); 191 192 /* Legacy (single vector interrupt handler */ 193 static void ixgbe_legacy_irq(void *); 194 195 /* The MSI/X Interrupt handlers */ 196 static void ixgbe_msix_que(void *); 197 static void ixgbe_msix_link(void *); 198 199 /* Deferred interrupt tasklets */ 200 static void ixgbe_handle_que(void *, int); 201 static void ixgbe_handle_link(void *, int); 202 static void ixgbe_handle_msf(void *, int); 203 static void ixgbe_handle_mod(void *, int); 204 static void ixgbe_handle_phy(void *, int); 205 206 #ifdef IXGBE_FDIR 207 static void ixgbe_reinit_fdir(void *, int); 208 #endif 209 210 #ifdef PCI_IOV 211 static void ixgbe_ping_all_vfs(struct adapter *); 212 static void ixgbe_handle_mbx(void *, int); 213 static int ixgbe_init_iov(device_t, u16, const nvlist_t *); 214 static void ixgbe_uninit_iov(device_t); 215 static int ixgbe_add_vf(device_t, u16, const nvlist_t *); 216 static void ixgbe_initialize_iov(struct adapter *); 217 static void ixgbe_recalculate_max_frame(struct adapter *); 218 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *); 219 #endif /* PCI_IOV */ 220 221 222 /********************************************************************* 223 * FreeBSD Device Interface Entry Points 224 *********************************************************************/ 225 226 static device_method_t ix_methods[] = { 227 /* Device interface */ 228 DEVMETHOD(device_probe, ixgbe_probe), 229 DEVMETHOD(device_attach, ixgbe_attach), 230 DEVMETHOD(device_detach, ixgbe_detach), 231 DEVMETHOD(device_shutdown, ixgbe_shutdown), 232 DEVMETHOD(device_suspend, ixgbe_suspend), 233 DEVMETHOD(device_resume, ixgbe_resume), 234 #ifdef PCI_IOV 235 DEVMETHOD(pci_iov_init, ixgbe_init_iov), 236 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov), 237 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf), 238 #endif /* PCI_IOV */ 239 DEVMETHOD_END 240 }; 241 242 static driver_t ix_driver = { 243 "ix", ix_methods, sizeof(struct adapter), 244 }; 245 246 devclass_t ix_devclass; 247 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 248 249 MODULE_DEPEND(ix, pci, 1, 1, 1); 250 MODULE_DEPEND(ix, ether, 1, 1, 1); 251 #ifdef DEV_NETMAP 252 MODULE_DEPEND(ix, netmap, 1, 1, 1); 253 #endif /* DEV_NETMAP */ 254 255 /* 256 ** TUNEABLE PARAMETERS: 257 */ 258 259 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, 260 "IXGBE driver parameters"); 261 262 /* 263 ** AIM: Adaptive Interrupt Moderation 264 ** which means that the interrupt rate 265 ** is varied over time based on the 266 ** traffic for that interrupt vector 267 */ 268 static int ixgbe_enable_aim = TRUE; 269 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 270 "Enable adaptive interrupt moderation"); 271 272 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 273 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 274 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 275 276 /* How many packets rxeof tries to clean at a time */ 277 static int ixgbe_rx_process_limit = 256; 278 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 279 &ixgbe_rx_process_limit, 0, 280 "Maximum number of received packets to process at a time," 281 "-1 means unlimited"); 282 283 /* How many packets txeof tries to clean at a time */ 284 static int ixgbe_tx_process_limit = 256; 285 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 286 &ixgbe_tx_process_limit, 0, 287 "Maximum number of sent packets to process at a time," 288 "-1 means unlimited"); 289 290 /* 291 ** Smart speed setting, default to on 292 ** this only works as a compile option 293 ** right now as its during attach, set 294 ** this to 'ixgbe_smart_speed_off' to 295 ** disable. 296 */ 297 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 298 299 /* 300 * MSIX should be the default for best performance, 301 * but this allows it to be forced off for testing. 302 */ 303 static int ixgbe_enable_msix = 1; 304 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 305 "Enable MSI-X interrupts"); 306 307 /* 308 * Number of Queues, can be set to 0, 309 * it then autoconfigures based on the 310 * number of cpus with a max of 8. This 311 * can be overriden manually here. 312 */ 313 static int ixgbe_num_queues = 0; 314 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 315 "Number of queues to configure, 0 indicates autoconfigure"); 316 317 /* 318 ** Number of TX descriptors per ring, 319 ** setting higher than RX as this seems 320 ** the better performing choice. 321 */ 322 static int ixgbe_txd = PERFORM_TXD; 323 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 324 "Number of transmit descriptors per queue"); 325 326 /* Number of RX descriptors per ring */ 327 static int ixgbe_rxd = PERFORM_RXD; 328 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 329 "Number of receive descriptors per queue"); 330 331 /* 332 ** Defining this on will allow the use 333 ** of unsupported SFP+ modules, note that 334 ** doing so you are on your own :) 335 */ 336 static int allow_unsupported_sfp = FALSE; 337 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 338 339 /* Keep running tab on them for sanity check */ 340 static int ixgbe_total_ports; 341 342 #ifdef IXGBE_FDIR 343 /* 344 ** Flow Director actually 'steals' 345 ** part of the packet buffer as its 346 ** filter pool, this variable controls 347 ** how much it uses: 348 ** 0 = 64K, 1 = 128K, 2 = 256K 349 */ 350 static int fdir_pballoc = 1; 351 #endif 352 353 #ifdef DEV_NETMAP 354 /* 355 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to 356 * be a reference on how to implement netmap support in a driver. 357 * Additional comments are in ixgbe_netmap.h . 358 * 359 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support 360 * that extend the standard driver. 361 */ 362 #include <dev/netmap/ixgbe_netmap.h> 363 #endif /* DEV_NETMAP */ 364 365 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 366 367 /********************************************************************* 368 * Device identification routine 369 * 370 * ixgbe_probe determines if the driver should be loaded on 371 * adapter based on PCI vendor/device id of the adapter. 372 * 373 * return BUS_PROBE_DEFAULT on success, positive on failure 374 *********************************************************************/ 375 376 static int 377 ixgbe_probe(device_t dev) 378 { 379 ixgbe_vendor_info_t *ent; 380 381 u16 pci_vendor_id = 0; 382 u16 pci_device_id = 0; 383 u16 pci_subvendor_id = 0; 384 u16 pci_subdevice_id = 0; 385 char adapter_name[256]; 386 387 INIT_DEBUGOUT("ixgbe_probe: begin"); 388 389 pci_vendor_id = pci_get_vendor(dev); 390 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 391 return (ENXIO); 392 393 pci_device_id = pci_get_device(dev); 394 pci_subvendor_id = pci_get_subvendor(dev); 395 pci_subdevice_id = pci_get_subdevice(dev); 396 397 ent = ixgbe_vendor_info_array; 398 while (ent->vendor_id != 0) { 399 if ((pci_vendor_id == ent->vendor_id) && 400 (pci_device_id == ent->device_id) && 401 402 ((pci_subvendor_id == ent->subvendor_id) || 403 (ent->subvendor_id == 0)) && 404 405 ((pci_subdevice_id == ent->subdevice_id) || 406 (ent->subdevice_id == 0))) { 407 sprintf(adapter_name, "%s, Version - %s", 408 ixgbe_strings[ent->index], 409 ixgbe_driver_version); 410 device_set_desc_copy(dev, adapter_name); 411 ++ixgbe_total_ports; 412 return (BUS_PROBE_DEFAULT); 413 } 414 ent++; 415 } 416 return (ENXIO); 417 } 418 419 /********************************************************************* 420 * Device initialization routine 421 * 422 * The attach entry point is called when the driver is being loaded. 423 * This routine identifies the type of hardware, allocates all resources 424 * and initializes the hardware. 425 * 426 * return 0 on success, positive on failure 427 *********************************************************************/ 428 429 static int 430 ixgbe_attach(device_t dev) 431 { 432 struct adapter *adapter; 433 struct ixgbe_hw *hw; 434 int error = 0; 435 u16 csum; 436 u32 ctrl_ext; 437 438 INIT_DEBUGOUT("ixgbe_attach: begin"); 439 440 /* Allocate, clear, and link in our adapter structure */ 441 adapter = device_get_softc(dev); 442 adapter->dev = adapter->osdep.dev = dev; 443 hw = &adapter->hw; 444 445 #ifdef DEV_NETMAP 446 adapter->init_locked = ixgbe_init_locked; 447 adapter->stop_locked = ixgbe_stop; 448 #endif 449 450 /* Core Lock Init*/ 451 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 452 453 /* Set up the timer callout */ 454 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 455 456 /* Determine hardware revision */ 457 ixgbe_identify_hardware(adapter); 458 459 /* Do base PCI setup - map BAR0 */ 460 if (ixgbe_allocate_pci_resources(adapter)) { 461 device_printf(dev, "Allocation of PCI resources failed\n"); 462 error = ENXIO; 463 goto err_out; 464 } 465 466 /* Sysctls for limiting the amount of work done in the taskqueues */ 467 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 468 "max number of rx packets to process", 469 &adapter->rx_process_limit, ixgbe_rx_process_limit); 470 471 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 472 "max number of tx packets to process", 473 &adapter->tx_process_limit, ixgbe_tx_process_limit); 474 475 /* Do descriptor calc and sanity checks */ 476 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 477 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 478 device_printf(dev, "TXD config issue, using default!\n"); 479 adapter->num_tx_desc = DEFAULT_TXD; 480 } else 481 adapter->num_tx_desc = ixgbe_txd; 482 483 /* 484 ** With many RX rings it is easy to exceed the 485 ** system mbuf allocation. Tuning nmbclusters 486 ** can alleviate this. 487 */ 488 if (nmbclusters > 0) { 489 int s; 490 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports; 491 if (s > nmbclusters) { 492 device_printf(dev, "RX Descriptors exceed " 493 "system mbuf max, using default instead!\n"); 494 ixgbe_rxd = DEFAULT_RXD; 495 } 496 } 497 498 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 499 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 500 device_printf(dev, "RXD config issue, using default!\n"); 501 adapter->num_rx_desc = DEFAULT_RXD; 502 } else 503 adapter->num_rx_desc = ixgbe_rxd; 504 505 /* Allocate our TX/RX Queues */ 506 if (ixgbe_allocate_queues(adapter)) { 507 error = ENOMEM; 508 goto err_out; 509 } 510 511 /* Allocate multicast array memory. */ 512 adapter->mta = malloc(sizeof(*adapter->mta) * 513 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 514 if (adapter->mta == NULL) { 515 device_printf(dev, "Can not allocate multicast setup array\n"); 516 error = ENOMEM; 517 goto err_late; 518 } 519 520 /* Initialize the shared code */ 521 hw->allow_unsupported_sfp = allow_unsupported_sfp; 522 error = ixgbe_init_shared_code(hw); 523 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 524 /* 525 ** No optics in this port, set up 526 ** so the timer routine will probe 527 ** for later insertion. 528 */ 529 adapter->sfp_probe = TRUE; 530 error = 0; 531 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 532 device_printf(dev,"Unsupported SFP+ module detected!\n"); 533 error = EIO; 534 goto err_late; 535 } else if (error) { 536 device_printf(dev,"Unable to initialize the shared code\n"); 537 error = EIO; 538 goto err_late; 539 } 540 541 /* Make sure we have a good EEPROM before we read from it */ 542 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) { 543 device_printf(dev,"The EEPROM Checksum Is Not Valid\n"); 544 error = EIO; 545 goto err_late; 546 } 547 548 error = ixgbe_init_hw(hw); 549 switch (error) { 550 case IXGBE_ERR_EEPROM_VERSION: 551 device_printf(dev, "This device is a pre-production adapter/" 552 "LOM. Please be aware there may be issues associated " 553 "with your hardware.\n If you are experiencing problems " 554 "please contact your Intel or hardware representative " 555 "who provided you with this hardware.\n"); 556 break; 557 case IXGBE_ERR_SFP_NOT_SUPPORTED: 558 device_printf(dev,"Unsupported SFP+ Module\n"); 559 error = EIO; 560 goto err_late; 561 case IXGBE_ERR_SFP_NOT_PRESENT: 562 device_printf(dev,"No SFP+ Module found\n"); 563 /* falls thru */ 564 default: 565 break; 566 } 567 568 /* Detect and set physical type */ 569 ixgbe_setup_optics(adapter); 570 571 if ((adapter->msix > 1) && (ixgbe_enable_msix)) 572 error = ixgbe_allocate_msix(adapter); 573 else 574 error = ixgbe_allocate_legacy(adapter); 575 if (error) 576 goto err_late; 577 578 /* Setup OS specific network interface */ 579 if (ixgbe_setup_interface(dev, adapter) != 0) 580 goto err_late; 581 582 /* Initialize statistics */ 583 ixgbe_update_stats_counters(adapter); 584 585 /* Register for VLAN events */ 586 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 587 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 588 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 589 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 590 591 /* Check PCIE slot type/speed/width */ 592 ixgbe_get_slot_info(hw); 593 594 595 /* Set an initial default flow control value */ 596 adapter->fc = ixgbe_fc_full; 597 598 #ifdef PCI_IOV 599 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) { 600 nvlist_t *pf_schema, *vf_schema; 601 602 hw->mbx.ops.init_params(hw); 603 pf_schema = pci_iov_schema_alloc_node(); 604 vf_schema = pci_iov_schema_alloc_node(); 605 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 606 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", 607 IOV_SCHEMA_HASDEFAULT, TRUE); 608 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 609 IOV_SCHEMA_HASDEFAULT, FALSE); 610 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 611 IOV_SCHEMA_HASDEFAULT, FALSE); 612 error = pci_iov_attach(dev, pf_schema, vf_schema); 613 if (error != 0) { 614 device_printf(dev, 615 "Error %d setting up SR-IOV\n", error); 616 } 617 } 618 #endif /* PCI_IOV */ 619 620 /* Check for certain supported features */ 621 ixgbe_check_wol_support(adapter); 622 ixgbe_check_eee_support(adapter); 623 624 /* Add sysctls */ 625 ixgbe_add_device_sysctls(adapter); 626 ixgbe_add_hw_stats(adapter); 627 628 /* let hardware know driver is loaded */ 629 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 630 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 631 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 632 633 #ifdef DEV_NETMAP 634 ixgbe_netmap_attach(adapter); 635 #endif /* DEV_NETMAP */ 636 INIT_DEBUGOUT("ixgbe_attach: end"); 637 return (0); 638 639 err_late: 640 ixgbe_free_transmit_structures(adapter); 641 ixgbe_free_receive_structures(adapter); 642 err_out: 643 if (adapter->ifp != NULL) 644 if_free(adapter->ifp); 645 ixgbe_free_pci_resources(adapter); 646 free(adapter->mta, M_DEVBUF); 647 return (error); 648 } 649 650 /********************************************************************* 651 * Device removal routine 652 * 653 * The detach entry point is called when the driver is being removed. 654 * This routine stops the adapter and deallocates all the resources 655 * that were allocated for driver operation. 656 * 657 * return 0 on success, positive on failure 658 *********************************************************************/ 659 660 static int 661 ixgbe_detach(device_t dev) 662 { 663 struct adapter *adapter = device_get_softc(dev); 664 struct ix_queue *que = adapter->queues; 665 struct tx_ring *txr = adapter->tx_rings; 666 u32 ctrl_ext; 667 668 INIT_DEBUGOUT("ixgbe_detach: begin"); 669 670 /* Make sure VLANS are not using driver */ 671 if (adapter->ifp->if_vlantrunk != NULL) { 672 device_printf(dev,"Vlan in use, detach first\n"); 673 return (EBUSY); 674 } 675 676 #ifdef PCI_IOV 677 if (pci_iov_detach(dev) != 0) { 678 device_printf(dev, "SR-IOV in use; detach first.\n"); 679 return (EBUSY); 680 } 681 #endif /* PCI_IOV */ 682 683 /* Stop the adapter */ 684 IXGBE_CORE_LOCK(adapter); 685 ixgbe_setup_low_power_mode(adapter); 686 IXGBE_CORE_UNLOCK(adapter); 687 688 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 689 if (que->tq) { 690 #ifndef IXGBE_LEGACY_TX 691 taskqueue_drain(que->tq, &txr->txq_task); 692 #endif 693 taskqueue_drain(que->tq, &que->que_task); 694 taskqueue_free(que->tq); 695 } 696 } 697 698 /* Drain the Link queue */ 699 if (adapter->tq) { 700 taskqueue_drain(adapter->tq, &adapter->link_task); 701 taskqueue_drain(adapter->tq, &adapter->mod_task); 702 taskqueue_drain(adapter->tq, &adapter->msf_task); 703 #ifdef PCI_IOV 704 taskqueue_drain(adapter->tq, &adapter->mbx_task); 705 #endif 706 taskqueue_drain(adapter->tq, &adapter->phy_task); 707 #ifdef IXGBE_FDIR 708 taskqueue_drain(adapter->tq, &adapter->fdir_task); 709 #endif 710 taskqueue_free(adapter->tq); 711 } 712 713 /* let hardware know driver is unloading */ 714 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 715 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 716 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 717 718 /* Unregister VLAN events */ 719 if (adapter->vlan_attach != NULL) 720 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 721 if (adapter->vlan_detach != NULL) 722 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 723 724 ether_ifdetach(adapter->ifp); 725 callout_drain(&adapter->timer); 726 #ifdef DEV_NETMAP 727 netmap_detach(adapter->ifp); 728 #endif /* DEV_NETMAP */ 729 ixgbe_free_pci_resources(adapter); 730 bus_generic_detach(dev); 731 if_free(adapter->ifp); 732 733 ixgbe_free_transmit_structures(adapter); 734 ixgbe_free_receive_structures(adapter); 735 free(adapter->mta, M_DEVBUF); 736 737 IXGBE_CORE_LOCK_DESTROY(adapter); 738 return (0); 739 } 740 741 /********************************************************************* 742 * 743 * Shutdown entry point 744 * 745 **********************************************************************/ 746 747 static int 748 ixgbe_shutdown(device_t dev) 749 { 750 struct adapter *adapter = device_get_softc(dev); 751 int error = 0; 752 753 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 754 755 IXGBE_CORE_LOCK(adapter); 756 error = ixgbe_setup_low_power_mode(adapter); 757 IXGBE_CORE_UNLOCK(adapter); 758 759 return (error); 760 } 761 762 /** 763 * Methods for going from: 764 * D0 -> D3: ixgbe_suspend 765 * D3 -> D0: ixgbe_resume 766 */ 767 static int 768 ixgbe_suspend(device_t dev) 769 { 770 struct adapter *adapter = device_get_softc(dev); 771 int error = 0; 772 773 INIT_DEBUGOUT("ixgbe_suspend: begin"); 774 775 IXGBE_CORE_LOCK(adapter); 776 777 error = ixgbe_setup_low_power_mode(adapter); 778 779 /* Save state and power down */ 780 pci_save_state(dev); 781 pci_set_powerstate(dev, PCI_POWERSTATE_D3); 782 783 IXGBE_CORE_UNLOCK(adapter); 784 785 return (error); 786 } 787 788 static int 789 ixgbe_resume(device_t dev) 790 { 791 struct adapter *adapter = device_get_softc(dev); 792 struct ifnet *ifp = adapter->ifp; 793 struct ixgbe_hw *hw = &adapter->hw; 794 u32 wus; 795 796 INIT_DEBUGOUT("ixgbe_resume: begin"); 797 798 IXGBE_CORE_LOCK(adapter); 799 800 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 801 pci_restore_state(dev); 802 803 /* Read & clear WUS register */ 804 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 805 if (wus) 806 device_printf(dev, "Woken up by (WUS): %#010x\n", 807 IXGBE_READ_REG(hw, IXGBE_WUS)); 808 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 809 /* And clear WUFC until next low-power transition */ 810 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 811 812 /* 813 * Required after D3->D0 transition; 814 * will re-advertise all previous advertised speeds 815 */ 816 if (ifp->if_flags & IFF_UP) 817 ixgbe_init_locked(adapter); 818 819 IXGBE_CORE_UNLOCK(adapter); 820 821 INIT_DEBUGOUT("ixgbe_resume: end"); 822 return (0); 823 } 824 825 826 /********************************************************************* 827 * Ioctl entry point 828 * 829 * ixgbe_ioctl is called when the user wants to configure the 830 * interface. 831 * 832 * return 0 on success, positive on failure 833 **********************************************************************/ 834 835 static int 836 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 837 { 838 struct adapter *adapter = ifp->if_softc; 839 struct ifreq *ifr = (struct ifreq *) data; 840 #if defined(INET) || defined(INET6) 841 struct ifaddr *ifa = (struct ifaddr *)data; 842 #endif 843 int error = 0; 844 bool avoid_reset = FALSE; 845 846 switch (command) { 847 848 case SIOCSIFADDR: 849 #ifdef INET 850 if (ifa->ifa_addr->sa_family == AF_INET) 851 avoid_reset = TRUE; 852 #endif 853 #ifdef INET6 854 if (ifa->ifa_addr->sa_family == AF_INET6) 855 avoid_reset = TRUE; 856 #endif 857 /* 858 ** Calling init results in link renegotiation, 859 ** so we avoid doing it when possible. 860 */ 861 if (avoid_reset) { 862 ifp->if_flags |= IFF_UP; 863 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 864 ixgbe_init(adapter); 865 #if defined(INET) 866 if (!(ifp->if_flags & IFF_NOARP)) 867 arp_ifinit(ifp, ifa); 868 #endif 869 } else 870 error = ether_ioctl(ifp, command, data); 871 break; 872 case SIOCSIFMTU: 873 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 874 if (ifr->ifr_mtu > IXGBE_MAX_MTU) { 875 error = EINVAL; 876 } else { 877 IXGBE_CORE_LOCK(adapter); 878 ifp->if_mtu = ifr->ifr_mtu; 879 adapter->max_frame_size = 880 ifp->if_mtu + IXGBE_MTU_HDR; 881 ixgbe_init_locked(adapter); 882 #ifdef PCI_IOV 883 ixgbe_recalculate_max_frame(adapter); 884 #endif 885 IXGBE_CORE_UNLOCK(adapter); 886 } 887 break; 888 case SIOCSIFFLAGS: 889 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 890 IXGBE_CORE_LOCK(adapter); 891 if (ifp->if_flags & IFF_UP) { 892 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 893 if ((ifp->if_flags ^ adapter->if_flags) & 894 (IFF_PROMISC | IFF_ALLMULTI)) { 895 ixgbe_set_promisc(adapter); 896 } 897 } else 898 ixgbe_init_locked(adapter); 899 } else 900 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 901 ixgbe_stop(adapter); 902 adapter->if_flags = ifp->if_flags; 903 IXGBE_CORE_UNLOCK(adapter); 904 break; 905 case SIOCADDMULTI: 906 case SIOCDELMULTI: 907 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 908 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 909 IXGBE_CORE_LOCK(adapter); 910 ixgbe_disable_intr(adapter); 911 ixgbe_set_multi(adapter); 912 ixgbe_enable_intr(adapter); 913 IXGBE_CORE_UNLOCK(adapter); 914 } 915 break; 916 case SIOCSIFMEDIA: 917 case SIOCGIFMEDIA: 918 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 919 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 920 break; 921 case SIOCSIFCAP: 922 { 923 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 924 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 925 if (mask & IFCAP_HWCSUM) 926 ifp->if_capenable ^= IFCAP_HWCSUM; 927 if (mask & IFCAP_TSO4) 928 ifp->if_capenable ^= IFCAP_TSO4; 929 if (mask & IFCAP_TSO6) 930 ifp->if_capenable ^= IFCAP_TSO6; 931 if (mask & IFCAP_LRO) 932 ifp->if_capenable ^= IFCAP_LRO; 933 if (mask & IFCAP_VLAN_HWTAGGING) 934 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 935 if (mask & IFCAP_VLAN_HWFILTER) 936 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 937 if (mask & IFCAP_VLAN_HWTSO) 938 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 939 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 940 IXGBE_CORE_LOCK(adapter); 941 ixgbe_init_locked(adapter); 942 IXGBE_CORE_UNLOCK(adapter); 943 } 944 VLAN_CAPABILITIES(ifp); 945 break; 946 } 947 #if __FreeBSD_version >= 1100036 948 case SIOCGI2C: 949 { 950 struct ixgbe_hw *hw = &adapter->hw; 951 struct ifi2creq i2c; 952 int i; 953 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 954 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 955 if (error != 0) 956 break; 957 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 958 error = EINVAL; 959 break; 960 } 961 if (i2c.len > sizeof(i2c.data)) { 962 error = EINVAL; 963 break; 964 } 965 966 for (i = 0; i < i2c.len; i++) 967 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i, 968 i2c.dev_addr, &i2c.data[i]); 969 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 970 break; 971 } 972 #endif 973 default: 974 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 975 error = ether_ioctl(ifp, command, data); 976 break; 977 } 978 979 return (error); 980 } 981 982 /********************************************************************* 983 * Init entry point 984 * 985 * This routine is used in two ways. It is used by the stack as 986 * init entry point in network interface structure. It is also used 987 * by the driver as a hw/sw initialization routine to get to a 988 * consistent state. 989 * 990 * return 0 on success, positive on failure 991 **********************************************************************/ 992 #define IXGBE_MHADD_MFS_SHIFT 16 993 994 static void 995 ixgbe_init_locked(struct adapter *adapter) 996 { 997 struct ifnet *ifp = adapter->ifp; 998 device_t dev = adapter->dev; 999 struct ixgbe_hw *hw = &adapter->hw; 1000 struct tx_ring *txr; 1001 struct rx_ring *rxr; 1002 u32 txdctl, mhadd; 1003 u32 rxdctl, rxctrl; 1004 #ifdef PCI_IOV 1005 enum ixgbe_iov_mode mode; 1006 #endif 1007 1008 mtx_assert(&adapter->core_mtx, MA_OWNED); 1009 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 1010 1011 hw->adapter_stopped = FALSE; 1012 ixgbe_stop_adapter(hw); 1013 callout_stop(&adapter->timer); 1014 1015 #ifdef PCI_IOV 1016 mode = ixgbe_get_iov_mode(adapter); 1017 adapter->pool = ixgbe_max_vfs(mode); 1018 /* Queue indices may change with IOV mode */ 1019 for (int i = 0; i < adapter->num_queues; i++) { 1020 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i); 1021 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i); 1022 } 1023 #endif 1024 /* reprogram the RAR[0] in case user changed it. */ 1025 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 1026 1027 /* Get the latest mac address, User can use a LAA */ 1028 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 1029 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 1030 hw->addr_ctrl.rar_used_count = 1; 1031 1032 /* Set the various hardware offload abilities */ 1033 ifp->if_hwassist = 0; 1034 if (ifp->if_capenable & IFCAP_TSO) 1035 ifp->if_hwassist |= CSUM_TSO; 1036 if (ifp->if_capenable & IFCAP_TXCSUM) { 1037 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 1038 #if __FreeBSD_version >= 800000 1039 if (hw->mac.type != ixgbe_mac_82598EB) 1040 ifp->if_hwassist |= CSUM_SCTP; 1041 #endif 1042 } 1043 1044 /* Prepare transmit descriptors and buffers */ 1045 if (ixgbe_setup_transmit_structures(adapter)) { 1046 device_printf(dev, "Could not setup transmit structures\n"); 1047 ixgbe_stop(adapter); 1048 return; 1049 } 1050 1051 ixgbe_init_hw(hw); 1052 #ifdef PCI_IOV 1053 ixgbe_initialize_iov(adapter); 1054 #endif 1055 ixgbe_initialize_transmit_units(adapter); 1056 1057 /* Setup Multicast table */ 1058 ixgbe_set_multi(adapter); 1059 1060 /* 1061 ** Determine the correct mbuf pool 1062 ** for doing jumbo frames 1063 */ 1064 if (adapter->max_frame_size <= MCLBYTES) 1065 adapter->rx_mbuf_sz = MCLBYTES; 1066 else 1067 adapter->rx_mbuf_sz = MJUMPAGESIZE; 1068 1069 /* Prepare receive descriptors and buffers */ 1070 if (ixgbe_setup_receive_structures(adapter)) { 1071 device_printf(dev, "Could not setup receive structures\n"); 1072 ixgbe_stop(adapter); 1073 return; 1074 } 1075 1076 /* Configure RX settings */ 1077 ixgbe_initialize_receive_units(adapter); 1078 1079 /* Enable SDP & MSIX interrupts based on adapter */ 1080 ixgbe_config_gpie(adapter); 1081 1082 /* Set MTU size */ 1083 if (ifp->if_mtu > ETHERMTU) { 1084 /* aka IXGBE_MAXFRS on 82599 and newer */ 1085 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 1086 mhadd &= ~IXGBE_MHADD_MFS_MASK; 1087 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 1088 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 1089 } 1090 1091 /* Now enable all the queues */ 1092 for (int i = 0; i < adapter->num_queues; i++) { 1093 txr = &adapter->tx_rings[i]; 1094 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 1095 txdctl |= IXGBE_TXDCTL_ENABLE; 1096 /* Set WTHRESH to 8, burst writeback */ 1097 txdctl |= (8 << 16); 1098 /* 1099 * When the internal queue falls below PTHRESH (32), 1100 * start prefetching as long as there are at least 1101 * HTHRESH (1) buffers ready. The values are taken 1102 * from the Intel linux driver 3.8.21. 1103 * Prefetching enables tx line rate even with 1 queue. 1104 */ 1105 txdctl |= (32 << 0) | (1 << 8); 1106 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 1107 } 1108 1109 for (int i = 0, j = 0; i < adapter->num_queues; i++) { 1110 rxr = &adapter->rx_rings[i]; 1111 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1112 if (hw->mac.type == ixgbe_mac_82598EB) { 1113 /* 1114 ** PTHRESH = 21 1115 ** HTHRESH = 4 1116 ** WTHRESH = 8 1117 */ 1118 rxdctl &= ~0x3FFFFF; 1119 rxdctl |= 0x080420; 1120 } 1121 rxdctl |= IXGBE_RXDCTL_ENABLE; 1122 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 1123 for (; j < 10; j++) { 1124 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 1125 IXGBE_RXDCTL_ENABLE) 1126 break; 1127 else 1128 msec_delay(1); 1129 } 1130 wmb(); 1131 #ifdef DEV_NETMAP 1132 /* 1133 * In netmap mode, we must preserve the buffers made 1134 * available to userspace before the if_init() 1135 * (this is true by default on the TX side, because 1136 * init makes all buffers available to userspace). 1137 * 1138 * netmap_reset() and the device specific routines 1139 * (e.g. ixgbe_setup_receive_rings()) map these 1140 * buffers at the end of the NIC ring, so here we 1141 * must set the RDT (tail) register to make sure 1142 * they are not overwritten. 1143 * 1144 * In this driver the NIC ring starts at RDH = 0, 1145 * RDT points to the last slot available for reception (?), 1146 * so RDT = num_rx_desc - 1 means the whole ring is available. 1147 */ 1148 if (ifp->if_capenable & IFCAP_NETMAP) { 1149 struct netmap_adapter *na = NA(adapter->ifp); 1150 struct netmap_kring *kring = &na->rx_rings[i]; 1151 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1152 1153 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 1154 } else 1155 #endif /* DEV_NETMAP */ 1156 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1); 1157 } 1158 1159 /* Enable Receive engine */ 1160 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1161 if (hw->mac.type == ixgbe_mac_82598EB) 1162 rxctrl |= IXGBE_RXCTRL_DMBYPS; 1163 rxctrl |= IXGBE_RXCTRL_RXEN; 1164 ixgbe_enable_rx_dma(hw, rxctrl); 1165 1166 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 1167 1168 /* Set up MSI/X routing */ 1169 if (ixgbe_enable_msix) { 1170 ixgbe_configure_ivars(adapter); 1171 /* Set up auto-mask */ 1172 if (hw->mac.type == ixgbe_mac_82598EB) 1173 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1174 else { 1175 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 1176 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 1177 } 1178 } else { /* Simple settings for Legacy/MSI */ 1179 ixgbe_set_ivar(adapter, 0, 0, 0); 1180 ixgbe_set_ivar(adapter, 0, 0, 1); 1181 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1182 } 1183 1184 #ifdef IXGBE_FDIR 1185 /* Init Flow director */ 1186 if (hw->mac.type != ixgbe_mac_82598EB) { 1187 u32 hdrm = 32 << fdir_pballoc; 1188 1189 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL); 1190 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc); 1191 } 1192 #endif 1193 1194 /* 1195 * Check on any SFP devices that 1196 * need to be kick-started 1197 */ 1198 if (hw->phy.type == ixgbe_phy_none) { 1199 int err = hw->phy.ops.identify(hw); 1200 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 1201 device_printf(dev, 1202 "Unsupported SFP+ module type was detected.\n"); 1203 return; 1204 } 1205 } 1206 1207 /* Set moderation on the Link interrupt */ 1208 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 1209 1210 /* Configure Energy Efficient Ethernet for supported devices */ 1211 ixgbe_setup_eee(hw, adapter->eee_enabled); 1212 1213 /* Config/Enable Link */ 1214 ixgbe_config_link(adapter); 1215 1216 /* Hardware Packet Buffer & Flow Control setup */ 1217 ixgbe_config_delay_values(adapter); 1218 1219 /* Initialize the FC settings */ 1220 ixgbe_start_hw(hw); 1221 1222 /* Set up VLAN support and filter */ 1223 ixgbe_setup_vlan_hw_support(adapter); 1224 1225 /* Setup DMA Coalescing */ 1226 ixgbe_config_dmac(adapter); 1227 1228 /* And now turn on interrupts */ 1229 ixgbe_enable_intr(adapter); 1230 1231 #ifdef PCI_IOV 1232 /* Enable the use of the MBX by the VF's */ 1233 { 1234 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1235 reg |= IXGBE_CTRL_EXT_PFRSTD; 1236 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg); 1237 } 1238 #endif 1239 1240 /* Now inform the stack we're ready */ 1241 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1242 1243 return; 1244 } 1245 1246 static void 1247 ixgbe_init(void *arg) 1248 { 1249 struct adapter *adapter = arg; 1250 1251 IXGBE_CORE_LOCK(adapter); 1252 ixgbe_init_locked(adapter); 1253 IXGBE_CORE_UNLOCK(adapter); 1254 return; 1255 } 1256 1257 static void 1258 ixgbe_config_gpie(struct adapter *adapter) 1259 { 1260 struct ixgbe_hw *hw = &adapter->hw; 1261 u32 gpie; 1262 1263 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 1264 1265 /* Fan Failure Interrupt */ 1266 if (hw->device_id == IXGBE_DEV_ID_82598AT) 1267 gpie |= IXGBE_SDP1_GPIEN; 1268 1269 /* 1270 * Module detection (SDP2) 1271 * Media ready (SDP1) 1272 */ 1273 if (hw->mac.type == ixgbe_mac_82599EB) { 1274 gpie |= IXGBE_SDP2_GPIEN; 1275 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP) 1276 gpie |= IXGBE_SDP1_GPIEN; 1277 } 1278 1279 /* 1280 * Thermal Failure Detection (X540) 1281 * Link Detection (X557) 1282 */ 1283 if (hw->mac.type == ixgbe_mac_X540 || 1284 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1285 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 1286 gpie |= IXGBE_SDP0_GPIEN_X540; 1287 1288 if (adapter->msix > 1) { 1289 /* Enable Enhanced MSIX mode */ 1290 gpie |= IXGBE_GPIE_MSIX_MODE; 1291 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | 1292 IXGBE_GPIE_OCD; 1293 } 1294 1295 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 1296 return; 1297 } 1298 1299 /* 1300 * Requires adapter->max_frame_size to be set. 1301 */ 1302 static void 1303 ixgbe_config_delay_values(struct adapter *adapter) 1304 { 1305 struct ixgbe_hw *hw = &adapter->hw; 1306 u32 rxpb, frame, size, tmp; 1307 1308 frame = adapter->max_frame_size; 1309 1310 /* Calculate High Water */ 1311 switch (hw->mac.type) { 1312 case ixgbe_mac_X540: 1313 case ixgbe_mac_X550: 1314 case ixgbe_mac_X550EM_x: 1315 tmp = IXGBE_DV_X540(frame, frame); 1316 break; 1317 default: 1318 tmp = IXGBE_DV(frame, frame); 1319 break; 1320 } 1321 size = IXGBE_BT2KB(tmp); 1322 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 1323 hw->fc.high_water[0] = rxpb - size; 1324 1325 /* Now calculate Low Water */ 1326 switch (hw->mac.type) { 1327 case ixgbe_mac_X540: 1328 case ixgbe_mac_X550: 1329 case ixgbe_mac_X550EM_x: 1330 tmp = IXGBE_LOW_DV_X540(frame); 1331 break; 1332 default: 1333 tmp = IXGBE_LOW_DV(frame); 1334 break; 1335 } 1336 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 1337 1338 hw->fc.requested_mode = adapter->fc; 1339 hw->fc.pause_time = IXGBE_FC_PAUSE; 1340 hw->fc.send_xon = TRUE; 1341 } 1342 1343 /* 1344 ** 1345 ** MSIX Interrupt Handlers and Tasklets 1346 ** 1347 */ 1348 1349 static inline void 1350 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 1351 { 1352 struct ixgbe_hw *hw = &adapter->hw; 1353 u64 queue = (u64)(1 << vector); 1354 u32 mask; 1355 1356 if (hw->mac.type == ixgbe_mac_82598EB) { 1357 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1358 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 1359 } else { 1360 mask = (queue & 0xFFFFFFFF); 1361 if (mask) 1362 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 1363 mask = (queue >> 32); 1364 if (mask) 1365 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 1366 } 1367 } 1368 1369 static inline void 1370 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 1371 { 1372 struct ixgbe_hw *hw = &adapter->hw; 1373 u64 queue = (u64)(1 << vector); 1374 u32 mask; 1375 1376 if (hw->mac.type == ixgbe_mac_82598EB) { 1377 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1378 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 1379 } else { 1380 mask = (queue & 0xFFFFFFFF); 1381 if (mask) 1382 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 1383 mask = (queue >> 32); 1384 if (mask) 1385 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 1386 } 1387 } 1388 1389 static void 1390 ixgbe_handle_que(void *context, int pending) 1391 { 1392 struct ix_queue *que = context; 1393 struct adapter *adapter = que->adapter; 1394 struct tx_ring *txr = que->txr; 1395 struct ifnet *ifp = adapter->ifp; 1396 1397 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1398 ixgbe_rxeof(que); 1399 IXGBE_TX_LOCK(txr); 1400 ixgbe_txeof(txr); 1401 #ifndef IXGBE_LEGACY_TX 1402 if (!drbr_empty(ifp, txr->br)) 1403 ixgbe_mq_start_locked(ifp, txr); 1404 #else 1405 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1406 ixgbe_start_locked(txr, ifp); 1407 #endif 1408 IXGBE_TX_UNLOCK(txr); 1409 } 1410 1411 /* Reenable this interrupt */ 1412 if (que->res != NULL) 1413 ixgbe_enable_queue(adapter, que->msix); 1414 else 1415 ixgbe_enable_intr(adapter); 1416 return; 1417 } 1418 1419 1420 /********************************************************************* 1421 * 1422 * Legacy Interrupt Service routine 1423 * 1424 **********************************************************************/ 1425 1426 static void 1427 ixgbe_legacy_irq(void *arg) 1428 { 1429 struct ix_queue *que = arg; 1430 struct adapter *adapter = que->adapter; 1431 struct ixgbe_hw *hw = &adapter->hw; 1432 struct ifnet *ifp = adapter->ifp; 1433 struct tx_ring *txr = adapter->tx_rings; 1434 bool more; 1435 u32 reg_eicr; 1436 1437 1438 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1439 1440 ++que->irqs; 1441 if (reg_eicr == 0) { 1442 ixgbe_enable_intr(adapter); 1443 return; 1444 } 1445 1446 more = ixgbe_rxeof(que); 1447 1448 IXGBE_TX_LOCK(txr); 1449 ixgbe_txeof(txr); 1450 #ifdef IXGBE_LEGACY_TX 1451 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1452 ixgbe_start_locked(txr, ifp); 1453 #else 1454 if (!drbr_empty(ifp, txr->br)) 1455 ixgbe_mq_start_locked(ifp, txr); 1456 #endif 1457 IXGBE_TX_UNLOCK(txr); 1458 1459 /* Check for fan failure */ 1460 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 1461 (reg_eicr & IXGBE_EICR_GPI_SDP1)) { 1462 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " 1463 "REPLACE IMMEDIATELY!!\n"); 1464 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 1465 } 1466 1467 /* Link status change */ 1468 if (reg_eicr & IXGBE_EICR_LSC) 1469 taskqueue_enqueue(adapter->tq, &adapter->link_task); 1470 1471 /* External PHY interrupt */ 1472 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 1473 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) 1474 taskqueue_enqueue(adapter->tq, &adapter->phy_task); 1475 1476 if (more) 1477 taskqueue_enqueue(que->tq, &que->que_task); 1478 else 1479 ixgbe_enable_intr(adapter); 1480 return; 1481 } 1482 1483 1484 /********************************************************************* 1485 * 1486 * MSIX Queue Interrupt Service routine 1487 * 1488 **********************************************************************/ 1489 void 1490 ixgbe_msix_que(void *arg) 1491 { 1492 struct ix_queue *que = arg; 1493 struct adapter *adapter = que->adapter; 1494 struct ifnet *ifp = adapter->ifp; 1495 struct tx_ring *txr = que->txr; 1496 struct rx_ring *rxr = que->rxr; 1497 bool more; 1498 u32 newitr = 0; 1499 1500 1501 /* Protect against spurious interrupts */ 1502 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1503 return; 1504 1505 ixgbe_disable_queue(adapter, que->msix); 1506 ++que->irqs; 1507 1508 more = ixgbe_rxeof(que); 1509 1510 IXGBE_TX_LOCK(txr); 1511 ixgbe_txeof(txr); 1512 #ifdef IXGBE_LEGACY_TX 1513 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd)) 1514 ixgbe_start_locked(txr, ifp); 1515 #else 1516 if (!drbr_empty(ifp, txr->br)) 1517 ixgbe_mq_start_locked(ifp, txr); 1518 #endif 1519 IXGBE_TX_UNLOCK(txr); 1520 1521 /* Do AIM now? */ 1522 1523 if (ixgbe_enable_aim == FALSE) 1524 goto no_calc; 1525 /* 1526 ** Do Adaptive Interrupt Moderation: 1527 ** - Write out last calculated setting 1528 ** - Calculate based on average size over 1529 ** the last interval. 1530 */ 1531 if (que->eitr_setting) 1532 IXGBE_WRITE_REG(&adapter->hw, 1533 IXGBE_EITR(que->msix), que->eitr_setting); 1534 1535 que->eitr_setting = 0; 1536 1537 /* Idle, do nothing */ 1538 if ((txr->bytes == 0) && (rxr->bytes == 0)) 1539 goto no_calc; 1540 1541 if ((txr->bytes) && (txr->packets)) 1542 newitr = txr->bytes/txr->packets; 1543 if ((rxr->bytes) && (rxr->packets)) 1544 newitr = max(newitr, 1545 (rxr->bytes / rxr->packets)); 1546 newitr += 24; /* account for hardware frame, crc */ 1547 1548 /* set an upper boundary */ 1549 newitr = min(newitr, 3000); 1550 1551 /* Be nice to the mid range */ 1552 if ((newitr > 300) && (newitr < 1200)) 1553 newitr = (newitr / 3); 1554 else 1555 newitr = (newitr / 2); 1556 1557 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1558 newitr |= newitr << 16; 1559 else 1560 newitr |= IXGBE_EITR_CNT_WDIS; 1561 1562 /* save for next interrupt */ 1563 que->eitr_setting = newitr; 1564 1565 /* Reset state */ 1566 txr->bytes = 0; 1567 txr->packets = 0; 1568 rxr->bytes = 0; 1569 rxr->packets = 0; 1570 1571 no_calc: 1572 if (more) 1573 taskqueue_enqueue(que->tq, &que->que_task); 1574 else 1575 ixgbe_enable_queue(adapter, que->msix); 1576 return; 1577 } 1578 1579 1580 static void 1581 ixgbe_msix_link(void *arg) 1582 { 1583 struct adapter *adapter = arg; 1584 struct ixgbe_hw *hw = &adapter->hw; 1585 u32 reg_eicr, mod_mask; 1586 1587 ++adapter->link_irq; 1588 1589 /* First get the cause */ 1590 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 1591 /* Be sure the queue bits are not cleared */ 1592 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE; 1593 /* Clear interrupt with write */ 1594 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr); 1595 1596 /* Link status change */ 1597 if (reg_eicr & IXGBE_EICR_LSC) 1598 taskqueue_enqueue(adapter->tq, &adapter->link_task); 1599 1600 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 1601 #ifdef IXGBE_FDIR 1602 if (reg_eicr & IXGBE_EICR_FLOW_DIR) { 1603 /* This is probably overkill :) */ 1604 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 1605 return; 1606 /* Disable the interrupt */ 1607 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 1608 taskqueue_enqueue(adapter->tq, &adapter->fdir_task); 1609 } else 1610 #endif 1611 if (reg_eicr & IXGBE_EICR_ECC) { 1612 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! " 1613 "Please Reboot!!\n"); 1614 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 1615 } 1616 1617 /* Check for over temp condition */ 1618 if (reg_eicr & IXGBE_EICR_TS) { 1619 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! " 1620 "PHY IS SHUT DOWN!!\n"); 1621 device_printf(adapter->dev, "System shutdown required!\n"); 1622 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 1623 } 1624 #ifdef PCI_IOV 1625 if (reg_eicr & IXGBE_EICR_MAILBOX) 1626 taskqueue_enqueue(adapter->tq, &adapter->mbx_task); 1627 #endif 1628 } 1629 1630 /* Pluggable optics-related interrupt */ 1631 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1632 mod_mask = IXGBE_EICR_GPI_SDP0_X540; 1633 else 1634 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 1635 1636 if (ixgbe_is_sfp(hw)) { 1637 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 1638 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 1639 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 1640 } else if (reg_eicr & mod_mask) { 1641 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask); 1642 taskqueue_enqueue(adapter->tq, &adapter->mod_task); 1643 } 1644 } 1645 1646 /* Check for fan failure */ 1647 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 1648 (reg_eicr & IXGBE_EICR_GPI_SDP1)) { 1649 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1650 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " 1651 "REPLACE IMMEDIATELY!!\n"); 1652 } 1653 1654 /* External PHY interrupt */ 1655 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 1656 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) { 1657 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 1658 taskqueue_enqueue(adapter->tq, &adapter->phy_task); 1659 } 1660 1661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1662 return; 1663 } 1664 1665 /********************************************************************* 1666 * 1667 * Media Ioctl callback 1668 * 1669 * This routine is called whenever the user queries the status of 1670 * the interface using ifconfig. 1671 * 1672 **********************************************************************/ 1673 static void 1674 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 1675 { 1676 struct adapter *adapter = ifp->if_softc; 1677 struct ixgbe_hw *hw = &adapter->hw; 1678 int layer; 1679 1680 INIT_DEBUGOUT("ixgbe_media_status: begin"); 1681 IXGBE_CORE_LOCK(adapter); 1682 ixgbe_update_link_status(adapter); 1683 1684 ifmr->ifm_status = IFM_AVALID; 1685 ifmr->ifm_active = IFM_ETHER; 1686 1687 if (!adapter->link_active) { 1688 IXGBE_CORE_UNLOCK(adapter); 1689 return; 1690 } 1691 1692 ifmr->ifm_status |= IFM_ACTIVE; 1693 layer = adapter->phy_layer; 1694 1695 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 1696 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 1697 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1698 switch (adapter->link_speed) { 1699 case IXGBE_LINK_SPEED_10GB_FULL: 1700 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 1701 break; 1702 case IXGBE_LINK_SPEED_1GB_FULL: 1703 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 1704 break; 1705 case IXGBE_LINK_SPEED_100_FULL: 1706 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1707 break; 1708 } 1709 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1710 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1711 switch (adapter->link_speed) { 1712 case IXGBE_LINK_SPEED_10GB_FULL: 1713 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 1714 break; 1715 } 1716 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 1717 switch (adapter->link_speed) { 1718 case IXGBE_LINK_SPEED_10GB_FULL: 1719 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 1720 break; 1721 case IXGBE_LINK_SPEED_1GB_FULL: 1722 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1723 break; 1724 } 1725 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 1726 switch (adapter->link_speed) { 1727 case IXGBE_LINK_SPEED_10GB_FULL: 1728 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 1729 break; 1730 case IXGBE_LINK_SPEED_1GB_FULL: 1731 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1732 break; 1733 } 1734 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 1735 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1736 switch (adapter->link_speed) { 1737 case IXGBE_LINK_SPEED_10GB_FULL: 1738 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 1739 break; 1740 case IXGBE_LINK_SPEED_1GB_FULL: 1741 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1742 break; 1743 } 1744 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1745 switch (adapter->link_speed) { 1746 case IXGBE_LINK_SPEED_10GB_FULL: 1747 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 1748 break; 1749 } 1750 /* 1751 ** XXX: These need to use the proper media types once 1752 ** they're added. 1753 */ 1754 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1755 switch (adapter->link_speed) { 1756 case IXGBE_LINK_SPEED_10GB_FULL: 1757 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 1758 break; 1759 case IXGBE_LINK_SPEED_2_5GB_FULL: 1760 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 1761 break; 1762 case IXGBE_LINK_SPEED_1GB_FULL: 1763 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 1764 break; 1765 } 1766 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 1767 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1768 switch (adapter->link_speed) { 1769 case IXGBE_LINK_SPEED_10GB_FULL: 1770 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 1771 break; 1772 case IXGBE_LINK_SPEED_2_5GB_FULL: 1773 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 1774 break; 1775 case IXGBE_LINK_SPEED_1GB_FULL: 1776 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 1777 break; 1778 } 1779 1780 /* If nothing is recognized... */ 1781 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 1782 ifmr->ifm_active |= IFM_UNKNOWN; 1783 1784 #if __FreeBSD_version >= 900025 1785 /* Display current flow control setting used on link */ 1786 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 1787 hw->fc.current_mode == ixgbe_fc_full) 1788 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1789 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 1790 hw->fc.current_mode == ixgbe_fc_full) 1791 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1792 #endif 1793 1794 IXGBE_CORE_UNLOCK(adapter); 1795 1796 return; 1797 } 1798 1799 /********************************************************************* 1800 * 1801 * Media Ioctl callback 1802 * 1803 * This routine is called when the user changes speed/duplex using 1804 * media/mediopt option with ifconfig. 1805 * 1806 **********************************************************************/ 1807 static int 1808 ixgbe_media_change(struct ifnet * ifp) 1809 { 1810 struct adapter *adapter = ifp->if_softc; 1811 struct ifmedia *ifm = &adapter->media; 1812 struct ixgbe_hw *hw = &adapter->hw; 1813 ixgbe_link_speed speed = 0; 1814 1815 INIT_DEBUGOUT("ixgbe_media_change: begin"); 1816 1817 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1818 return (EINVAL); 1819 1820 if (hw->phy.media_type == ixgbe_media_type_backplane) 1821 return (EPERM); 1822 1823 /* 1824 ** We don't actually need to check against the supported 1825 ** media types of the adapter; ifmedia will take care of 1826 ** that for us. 1827 */ 1828 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1829 case IFM_AUTO: 1830 case IFM_10G_T: 1831 speed |= IXGBE_LINK_SPEED_100_FULL; 1832 case IFM_10G_LRM: 1833 case IFM_10G_SR: /* KR, too */ 1834 case IFM_10G_LR: 1835 case IFM_10G_CX4: /* KX4 */ 1836 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1837 case IFM_10G_TWINAX: 1838 speed |= IXGBE_LINK_SPEED_10GB_FULL; 1839 break; 1840 case IFM_1000_T: 1841 speed |= IXGBE_LINK_SPEED_100_FULL; 1842 case IFM_1000_LX: 1843 case IFM_1000_SX: 1844 case IFM_1000_CX: /* KX */ 1845 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1846 break; 1847 case IFM_100_TX: 1848 speed |= IXGBE_LINK_SPEED_100_FULL; 1849 break; 1850 default: 1851 goto invalid; 1852 } 1853 1854 hw->mac.autotry_restart = TRUE; 1855 hw->mac.ops.setup_link(hw, speed, TRUE); 1856 adapter->advertise = 1857 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) | 1858 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) | 1859 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0); 1860 1861 return (0); 1862 1863 invalid: 1864 device_printf(adapter->dev, "Invalid media type!\n"); 1865 return (EINVAL); 1866 } 1867 1868 static void 1869 ixgbe_set_promisc(struct adapter *adapter) 1870 { 1871 u_int32_t reg_rctl; 1872 struct ifnet *ifp = adapter->ifp; 1873 int mcnt = 0; 1874 1875 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1876 reg_rctl &= (~IXGBE_FCTRL_UPE); 1877 if (ifp->if_flags & IFF_ALLMULTI) 1878 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1879 else { 1880 struct ifmultiaddr *ifma; 1881 #if __FreeBSD_version < 800000 1882 IF_ADDR_LOCK(ifp); 1883 #else 1884 if_maddr_rlock(ifp); 1885 #endif 1886 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1887 if (ifma->ifma_addr->sa_family != AF_LINK) 1888 continue; 1889 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1890 break; 1891 mcnt++; 1892 } 1893 #if __FreeBSD_version < 800000 1894 IF_ADDR_UNLOCK(ifp); 1895 #else 1896 if_maddr_runlock(ifp); 1897 #endif 1898 } 1899 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1900 reg_rctl &= (~IXGBE_FCTRL_MPE); 1901 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 1902 1903 if (ifp->if_flags & IFF_PROMISC) { 1904 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 1906 } else if (ifp->if_flags & IFF_ALLMULTI) { 1907 reg_rctl |= IXGBE_FCTRL_MPE; 1908 reg_rctl &= ~IXGBE_FCTRL_UPE; 1909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 1910 } 1911 return; 1912 } 1913 1914 1915 /********************************************************************* 1916 * Multicast Update 1917 * 1918 * This routine is called whenever multicast address list is updated. 1919 * 1920 **********************************************************************/ 1921 #define IXGBE_RAR_ENTRIES 16 1922 1923 static void 1924 ixgbe_set_multi(struct adapter *adapter) 1925 { 1926 u32 fctrl; 1927 u8 *update_ptr; 1928 struct ifmultiaddr *ifma; 1929 struct ixgbe_mc_addr *mta; 1930 int mcnt = 0; 1931 struct ifnet *ifp = adapter->ifp; 1932 1933 IOCTL_DEBUGOUT("ixgbe_set_multi: begin"); 1934 1935 mta = adapter->mta; 1936 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 1937 1938 #if __FreeBSD_version < 800000 1939 IF_ADDR_LOCK(ifp); 1940 #else 1941 if_maddr_rlock(ifp); 1942 #endif 1943 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1944 if (ifma->ifma_addr->sa_family != AF_LINK) 1945 continue; 1946 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1947 break; 1948 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1949 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 1950 mta[mcnt].vmdq = adapter->pool; 1951 mcnt++; 1952 } 1953 #if __FreeBSD_version < 800000 1954 IF_ADDR_UNLOCK(ifp); 1955 #else 1956 if_maddr_runlock(ifp); 1957 #endif 1958 1959 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1960 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1961 if (ifp->if_flags & IFF_PROMISC) 1962 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1963 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 1964 ifp->if_flags & IFF_ALLMULTI) { 1965 fctrl |= IXGBE_FCTRL_MPE; 1966 fctrl &= ~IXGBE_FCTRL_UPE; 1967 } else 1968 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1969 1970 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 1971 1972 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 1973 update_ptr = (u8 *)mta; 1974 ixgbe_update_mc_addr_list(&adapter->hw, 1975 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE); 1976 } 1977 1978 return; 1979 } 1980 1981 /* 1982 * This is an iterator function now needed by the multicast 1983 * shared code. It simply feeds the shared code routine the 1984 * addresses in the array of ixgbe_set_multi() one by one. 1985 */ 1986 static u8 * 1987 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1988 { 1989 struct ixgbe_mc_addr *mta; 1990 1991 mta = (struct ixgbe_mc_addr *)*update_ptr; 1992 *vmdq = mta->vmdq; 1993 1994 *update_ptr = (u8*)(mta + 1);; 1995 return (mta->addr); 1996 } 1997 1998 1999 /********************************************************************* 2000 * Timer routine 2001 * 2002 * This routine checks for link status,updates statistics, 2003 * and runs the watchdog check. 2004 * 2005 **********************************************************************/ 2006 2007 static void 2008 ixgbe_local_timer(void *arg) 2009 { 2010 struct adapter *adapter = arg; 2011 device_t dev = adapter->dev; 2012 struct ix_queue *que = adapter->queues; 2013 u64 queues = 0; 2014 int hung = 0; 2015 2016 mtx_assert(&adapter->core_mtx, MA_OWNED); 2017 2018 /* Check for pluggable optics */ 2019 if (adapter->sfp_probe) 2020 if (!ixgbe_sfp_probe(adapter)) 2021 goto out; /* Nothing to do */ 2022 2023 ixgbe_update_link_status(adapter); 2024 ixgbe_update_stats_counters(adapter); 2025 2026 /* 2027 ** Check the TX queues status 2028 ** - mark hung queues so we don't schedule on them 2029 ** - watchdog only if all queues show hung 2030 */ 2031 for (int i = 0; i < adapter->num_queues; i++, que++) { 2032 /* Keep track of queues with work for soft irq */ 2033 if (que->txr->busy) 2034 queues |= ((u64)1 << que->me); 2035 /* 2036 ** Each time txeof runs without cleaning, but there 2037 ** are uncleaned descriptors it increments busy. If 2038 ** we get to the MAX we declare it hung. 2039 */ 2040 if (que->busy == IXGBE_QUEUE_HUNG) { 2041 ++hung; 2042 /* Mark the queue as inactive */ 2043 adapter->active_queues &= ~((u64)1 << que->me); 2044 continue; 2045 } else { 2046 /* Check if we've come back from hung */ 2047 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 2048 adapter->active_queues |= ((u64)1 << que->me); 2049 } 2050 if (que->busy >= IXGBE_MAX_TX_BUSY) { 2051 device_printf(dev,"Warning queue %d " 2052 "appears to be hung!\n", i); 2053 que->txr->busy = IXGBE_QUEUE_HUNG; 2054 ++hung; 2055 } 2056 2057 } 2058 2059 /* Only truly watchdog if all queues show hung */ 2060 if (hung == adapter->num_queues) 2061 goto watchdog; 2062 else if (queues != 0) { /* Force an IRQ on queues with work */ 2063 ixgbe_rearm_queues(adapter, queues); 2064 } 2065 2066 out: 2067 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 2068 return; 2069 2070 watchdog: 2071 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 2072 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2073 adapter->watchdog_events++; 2074 ixgbe_init_locked(adapter); 2075 } 2076 2077 2078 /* 2079 ** Note: this routine updates the OS on the link state 2080 ** the real check of the hardware only happens with 2081 ** a link interrupt. 2082 */ 2083 static void 2084 ixgbe_update_link_status(struct adapter *adapter) 2085 { 2086 struct ifnet *ifp = adapter->ifp; 2087 device_t dev = adapter->dev; 2088 2089 if (adapter->link_up){ 2090 if (adapter->link_active == FALSE) { 2091 if (bootverbose) 2092 device_printf(dev,"Link is up %d Gbps %s \n", 2093 ((adapter->link_speed == 128)? 10:1), 2094 "Full Duplex"); 2095 adapter->link_active = TRUE; 2096 /* Update any Flow Control changes */ 2097 ixgbe_fc_enable(&adapter->hw); 2098 /* Update DMA coalescing config */ 2099 ixgbe_config_dmac(adapter); 2100 if_link_state_change(ifp, LINK_STATE_UP); 2101 #ifdef PCI_IOV 2102 ixgbe_ping_all_vfs(adapter); 2103 #endif 2104 } 2105 } else { /* Link down */ 2106 if (adapter->link_active == TRUE) { 2107 if (bootverbose) 2108 device_printf(dev,"Link is Down\n"); 2109 if_link_state_change(ifp, LINK_STATE_DOWN); 2110 adapter->link_active = FALSE; 2111 #ifdef PCI_IOV 2112 ixgbe_ping_all_vfs(adapter); 2113 #endif 2114 } 2115 } 2116 2117 return; 2118 } 2119 2120 2121 /********************************************************************* 2122 * 2123 * This routine disables all traffic on the adapter by issuing a 2124 * global reset on the MAC and deallocates TX/RX buffers. 2125 * 2126 **********************************************************************/ 2127 2128 static void 2129 ixgbe_stop(void *arg) 2130 { 2131 struct ifnet *ifp; 2132 struct adapter *adapter = arg; 2133 struct ixgbe_hw *hw = &adapter->hw; 2134 ifp = adapter->ifp; 2135 2136 mtx_assert(&adapter->core_mtx, MA_OWNED); 2137 2138 INIT_DEBUGOUT("ixgbe_stop: begin\n"); 2139 ixgbe_disable_intr(adapter); 2140 callout_stop(&adapter->timer); 2141 2142 /* Let the stack know...*/ 2143 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2144 2145 ixgbe_reset_hw(hw); 2146 hw->adapter_stopped = FALSE; 2147 ixgbe_stop_adapter(hw); 2148 if (hw->mac.type == ixgbe_mac_82599EB) 2149 ixgbe_stop_mac_link_on_d3_82599(hw); 2150 /* Turn off the laser - noop with no optics */ 2151 ixgbe_disable_tx_laser(hw); 2152 2153 /* Update the stack */ 2154 adapter->link_up = FALSE; 2155 ixgbe_update_link_status(adapter); 2156 2157 /* reprogram the RAR[0] in case user changed it. */ 2158 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 2159 2160 return; 2161 } 2162 2163 2164 /********************************************************************* 2165 * 2166 * Determine hardware revision. 2167 * 2168 **********************************************************************/ 2169 static void 2170 ixgbe_identify_hardware(struct adapter *adapter) 2171 { 2172 device_t dev = adapter->dev; 2173 struct ixgbe_hw *hw = &adapter->hw; 2174 2175 /* Save off the information about this board */ 2176 hw->vendor_id = pci_get_vendor(dev); 2177 hw->device_id = pci_get_device(dev); 2178 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 2179 hw->subsystem_vendor_id = 2180 pci_read_config(dev, PCIR_SUBVEND_0, 2); 2181 hw->subsystem_device_id = 2182 pci_read_config(dev, PCIR_SUBDEV_0, 2); 2183 2184 /* 2185 ** Make sure BUSMASTER is set 2186 */ 2187 pci_enable_busmaster(dev); 2188 2189 /* We need this here to set the num_segs below */ 2190 ixgbe_set_mac_type(hw); 2191 2192 /* Pick up the 82599 settings */ 2193 if (hw->mac.type != ixgbe_mac_82598EB) { 2194 hw->phy.smart_speed = ixgbe_smart_speed; 2195 adapter->num_segs = IXGBE_82599_SCATTER; 2196 } else 2197 adapter->num_segs = IXGBE_82598_SCATTER; 2198 2199 return; 2200 } 2201 2202 /********************************************************************* 2203 * 2204 * Determine optic type 2205 * 2206 **********************************************************************/ 2207 static void 2208 ixgbe_setup_optics(struct adapter *adapter) 2209 { 2210 struct ixgbe_hw *hw = &adapter->hw; 2211 int layer; 2212 2213 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 2214 2215 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 2216 adapter->optics = IFM_10G_T; 2217 return; 2218 } 2219 2220 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 2221 adapter->optics = IFM_1000_T; 2222 return; 2223 } 2224 2225 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 2226 adapter->optics = IFM_1000_SX; 2227 return; 2228 } 2229 2230 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR | 2231 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) { 2232 adapter->optics = IFM_10G_LR; 2233 return; 2234 } 2235 2236 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 2237 adapter->optics = IFM_10G_SR; 2238 return; 2239 } 2240 2241 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) { 2242 adapter->optics = IFM_10G_TWINAX; 2243 return; 2244 } 2245 2246 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | 2247 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) { 2248 adapter->optics = IFM_10G_CX4; 2249 return; 2250 } 2251 2252 /* If we get here just set the default */ 2253 adapter->optics = IFM_ETHER | IFM_AUTO; 2254 return; 2255 } 2256 2257 /********************************************************************* 2258 * 2259 * Setup the Legacy or MSI Interrupt handler 2260 * 2261 **********************************************************************/ 2262 static int 2263 ixgbe_allocate_legacy(struct adapter *adapter) 2264 { 2265 device_t dev = adapter->dev; 2266 struct ix_queue *que = adapter->queues; 2267 #ifndef IXGBE_LEGACY_TX 2268 struct tx_ring *txr = adapter->tx_rings; 2269 #endif 2270 int error, rid = 0; 2271 2272 /* MSI RID at 1 */ 2273 if (adapter->msix == 1) 2274 rid = 1; 2275 2276 /* We allocate a single interrupt resource */ 2277 adapter->res = bus_alloc_resource_any(dev, 2278 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2279 if (adapter->res == NULL) { 2280 device_printf(dev, "Unable to allocate bus resource: " 2281 "interrupt\n"); 2282 return (ENXIO); 2283 } 2284 2285 /* 2286 * Try allocating a fast interrupt and the associated deferred 2287 * processing contexts. 2288 */ 2289 #ifndef IXGBE_LEGACY_TX 2290 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2291 #endif 2292 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 2293 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 2294 taskqueue_thread_enqueue, &que->tq); 2295 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq", 2296 device_get_nameunit(adapter->dev)); 2297 2298 /* Tasklets for Link, SFP and Multispeed Fiber */ 2299 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); 2300 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); 2301 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); 2302 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); 2303 #ifdef IXGBE_FDIR 2304 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); 2305 #endif 2306 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, 2307 taskqueue_thread_enqueue, &adapter->tq); 2308 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", 2309 device_get_nameunit(adapter->dev)); 2310 2311 if ((error = bus_setup_intr(dev, adapter->res, 2312 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, 2313 que, &adapter->tag)) != 0) { 2314 device_printf(dev, "Failed to register fast interrupt " 2315 "handler: %d\n", error); 2316 taskqueue_free(que->tq); 2317 taskqueue_free(adapter->tq); 2318 que->tq = NULL; 2319 adapter->tq = NULL; 2320 return (error); 2321 } 2322 /* For simplicity in the handlers */ 2323 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK; 2324 2325 return (0); 2326 } 2327 2328 2329 /********************************************************************* 2330 * 2331 * Setup MSIX Interrupt resources and handlers 2332 * 2333 **********************************************************************/ 2334 static int 2335 ixgbe_allocate_msix(struct adapter *adapter) 2336 { 2337 device_t dev = adapter->dev; 2338 struct ix_queue *que = adapter->queues; 2339 struct tx_ring *txr = adapter->tx_rings; 2340 int error, rid, vector = 0; 2341 int cpu_id = 0; 2342 #ifdef RSS 2343 cpuset_t cpu_mask; 2344 #endif 2345 2346 #ifdef RSS 2347 /* 2348 * If we're doing RSS, the number of queues needs to 2349 * match the number of RSS buckets that are configured. 2350 * 2351 * + If there's more queues than RSS buckets, we'll end 2352 * up with queues that get no traffic. 2353 * 2354 * + If there's more RSS buckets than queues, we'll end 2355 * up having multiple RSS buckets map to the same queue, 2356 * so there'll be some contention. 2357 */ 2358 if (adapter->num_queues != rss_getnumbuckets()) { 2359 device_printf(dev, 2360 "%s: number of queues (%d) != number of RSS buckets (%d)" 2361 "; performance will be impacted.\n", 2362 __func__, 2363 adapter->num_queues, 2364 rss_getnumbuckets()); 2365 } 2366 #endif 2367 2368 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 2369 rid = vector + 1; 2370 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2371 RF_SHAREABLE | RF_ACTIVE); 2372 if (que->res == NULL) { 2373 device_printf(dev,"Unable to allocate" 2374 " bus resource: que interrupt [%d]\n", vector); 2375 return (ENXIO); 2376 } 2377 /* Set the handler function */ 2378 error = bus_setup_intr(dev, que->res, 2379 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2380 ixgbe_msix_que, que, &que->tag); 2381 if (error) { 2382 que->res = NULL; 2383 device_printf(dev, "Failed to register QUE handler"); 2384 return (error); 2385 } 2386 #if __FreeBSD_version >= 800504 2387 bus_describe_intr(dev, que->res, que->tag, "que %d", i); 2388 #endif 2389 que->msix = vector; 2390 adapter->active_queues |= (u64)(1 << que->msix); 2391 #ifdef RSS 2392 /* 2393 * The queue ID is used as the RSS layer bucket ID. 2394 * We look up the queue ID -> RSS CPU ID and select 2395 * that. 2396 */ 2397 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2398 #else 2399 /* 2400 * Bind the msix vector, and thus the 2401 * rings to the corresponding cpu. 2402 * 2403 * This just happens to match the default RSS round-robin 2404 * bucket -> queue -> CPU allocation. 2405 */ 2406 if (adapter->num_queues > 1) 2407 cpu_id = i; 2408 #endif 2409 if (adapter->num_queues > 1) 2410 bus_bind_intr(dev, que->res, cpu_id); 2411 #ifdef IXGBE_DEBUG 2412 #ifdef RSS 2413 device_printf(dev, 2414 "Bound RSS bucket %d to CPU %d\n", 2415 i, cpu_id); 2416 #else 2417 device_printf(dev, 2418 "Bound queue %d to cpu %d\n", 2419 i, cpu_id); 2420 #endif 2421 #endif /* IXGBE_DEBUG */ 2422 2423 2424 #ifndef IXGBE_LEGACY_TX 2425 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2426 #endif 2427 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 2428 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 2429 taskqueue_thread_enqueue, &que->tq); 2430 #ifdef RSS 2431 CPU_SETOF(cpu_id, &cpu_mask); 2432 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, 2433 &cpu_mask, 2434 "%s (bucket %d)", 2435 device_get_nameunit(adapter->dev), 2436 cpu_id); 2437 #else 2438 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", 2439 device_get_nameunit(adapter->dev)); 2440 #endif 2441 } 2442 2443 /* and Link */ 2444 rid = vector + 1; 2445 adapter->res = bus_alloc_resource_any(dev, 2446 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2447 if (!adapter->res) { 2448 device_printf(dev,"Unable to allocate" 2449 " bus resource: Link interrupt [%d]\n", rid); 2450 return (ENXIO); 2451 } 2452 /* Set the link handler function */ 2453 error = bus_setup_intr(dev, adapter->res, 2454 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2455 ixgbe_msix_link, adapter, &adapter->tag); 2456 if (error) { 2457 adapter->res = NULL; 2458 device_printf(dev, "Failed to register LINK handler"); 2459 return (error); 2460 } 2461 #if __FreeBSD_version >= 800504 2462 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); 2463 #endif 2464 adapter->vector = vector; 2465 /* Tasklets for Link, SFP and Multispeed Fiber */ 2466 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); 2467 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); 2468 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); 2469 #ifdef PCI_IOV 2470 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter); 2471 #endif 2472 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); 2473 #ifdef IXGBE_FDIR 2474 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); 2475 #endif 2476 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, 2477 taskqueue_thread_enqueue, &adapter->tq); 2478 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", 2479 device_get_nameunit(adapter->dev)); 2480 2481 return (0); 2482 } 2483 2484 /* 2485 * Setup Either MSI/X or MSI 2486 */ 2487 static int 2488 ixgbe_setup_msix(struct adapter *adapter) 2489 { 2490 device_t dev = adapter->dev; 2491 int rid, want, queues, msgs; 2492 2493 /* Override by tuneable */ 2494 if (ixgbe_enable_msix == 0) 2495 goto msi; 2496 2497 /* First try MSI/X */ 2498 msgs = pci_msix_count(dev); 2499 if (msgs == 0) 2500 goto msi; 2501 rid = PCIR_BAR(MSIX_82598_BAR); 2502 adapter->msix_mem = bus_alloc_resource_any(dev, 2503 SYS_RES_MEMORY, &rid, RF_ACTIVE); 2504 if (adapter->msix_mem == NULL) { 2505 rid += 4; /* 82599 maps in higher BAR */ 2506 adapter->msix_mem = bus_alloc_resource_any(dev, 2507 SYS_RES_MEMORY, &rid, RF_ACTIVE); 2508 } 2509 if (adapter->msix_mem == NULL) { 2510 /* May not be enabled */ 2511 device_printf(adapter->dev, 2512 "Unable to map MSIX table \n"); 2513 goto msi; 2514 } 2515 2516 /* Figure out a reasonable auto config value */ 2517 queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; 2518 2519 #ifdef RSS 2520 /* If we're doing RSS, clamp at the number of RSS buckets */ 2521 if (queues > rss_getnumbuckets()) 2522 queues = rss_getnumbuckets(); 2523 #endif 2524 2525 if (ixgbe_num_queues != 0) 2526 queues = ixgbe_num_queues; 2527 2528 /* reflect correct sysctl value */ 2529 ixgbe_num_queues = queues; 2530 2531 /* 2532 ** Want one vector (RX/TX pair) per queue 2533 ** plus an additional for Link. 2534 */ 2535 want = queues + 1; 2536 if (msgs >= want) 2537 msgs = want; 2538 else { 2539 device_printf(adapter->dev, 2540 "MSIX Configuration Problem, " 2541 "%d vectors but %d queues wanted!\n", 2542 msgs, want); 2543 goto msi; 2544 } 2545 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 2546 device_printf(adapter->dev, 2547 "Using MSIX interrupts with %d vectors\n", msgs); 2548 adapter->num_queues = queues; 2549 return (msgs); 2550 } 2551 /* 2552 ** If MSIX alloc failed or provided us with 2553 ** less than needed, free and fall through to MSI 2554 */ 2555 pci_release_msi(dev); 2556 2557 msi: 2558 if (adapter->msix_mem != NULL) { 2559 bus_release_resource(dev, SYS_RES_MEMORY, 2560 rid, adapter->msix_mem); 2561 adapter->msix_mem = NULL; 2562 } 2563 msgs = 1; 2564 if (pci_alloc_msi(dev, &msgs) == 0) { 2565 device_printf(adapter->dev,"Using an MSI interrupt\n"); 2566 return (msgs); 2567 } 2568 device_printf(adapter->dev,"Using a Legacy interrupt\n"); 2569 return (0); 2570 } 2571 2572 2573 static int 2574 ixgbe_allocate_pci_resources(struct adapter *adapter) 2575 { 2576 int rid; 2577 device_t dev = adapter->dev; 2578 2579 rid = PCIR_BAR(0); 2580 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2581 &rid, RF_ACTIVE); 2582 2583 if (!(adapter->pci_mem)) { 2584 device_printf(dev,"Unable to allocate bus resource: memory\n"); 2585 return (ENXIO); 2586 } 2587 2588 adapter->osdep.mem_bus_space_tag = 2589 rman_get_bustag(adapter->pci_mem); 2590 adapter->osdep.mem_bus_space_handle = 2591 rman_get_bushandle(adapter->pci_mem); 2592 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; 2593 2594 /* Legacy defaults */ 2595 adapter->num_queues = 1; 2596 adapter->hw.back = &adapter->osdep; 2597 2598 /* 2599 ** Now setup MSI or MSI/X, should 2600 ** return us the number of supported 2601 ** vectors. (Will be 1 for MSI) 2602 */ 2603 adapter->msix = ixgbe_setup_msix(adapter); 2604 return (0); 2605 } 2606 2607 static void 2608 ixgbe_free_pci_resources(struct adapter * adapter) 2609 { 2610 struct ix_queue *que = adapter->queues; 2611 device_t dev = adapter->dev; 2612 int rid, memrid; 2613 2614 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2615 memrid = PCIR_BAR(MSIX_82598_BAR); 2616 else 2617 memrid = PCIR_BAR(MSIX_82599_BAR); 2618 2619 /* 2620 ** There is a slight possibility of a failure mode 2621 ** in attach that will result in entering this function 2622 ** before interrupt resources have been initialized, and 2623 ** in that case we do not want to execute the loops below 2624 ** We can detect this reliably by the state of the adapter 2625 ** res pointer. 2626 */ 2627 if (adapter->res == NULL) 2628 goto mem; 2629 2630 /* 2631 ** Release all msix queue resources: 2632 */ 2633 for (int i = 0; i < adapter->num_queues; i++, que++) { 2634 rid = que->msix + 1; 2635 if (que->tag != NULL) { 2636 bus_teardown_intr(dev, que->res, que->tag); 2637 que->tag = NULL; 2638 } 2639 if (que->res != NULL) 2640 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 2641 } 2642 2643 2644 /* Clean the Legacy or Link interrupt last */ 2645 if (adapter->vector) /* we are doing MSIX */ 2646 rid = adapter->vector + 1; 2647 else 2648 (adapter->msix != 0) ? (rid = 1):(rid = 0); 2649 2650 if (adapter->tag != NULL) { 2651 bus_teardown_intr(dev, adapter->res, adapter->tag); 2652 adapter->tag = NULL; 2653 } 2654 if (adapter->res != NULL) 2655 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 2656 2657 mem: 2658 if (adapter->msix) 2659 pci_release_msi(dev); 2660 2661 if (adapter->msix_mem != NULL) 2662 bus_release_resource(dev, SYS_RES_MEMORY, 2663 memrid, adapter->msix_mem); 2664 2665 if (adapter->pci_mem != NULL) 2666 bus_release_resource(dev, SYS_RES_MEMORY, 2667 PCIR_BAR(0), adapter->pci_mem); 2668 2669 return; 2670 } 2671 2672 /********************************************************************* 2673 * 2674 * Setup networking device structure and register an interface. 2675 * 2676 **********************************************************************/ 2677 static int 2678 ixgbe_setup_interface(device_t dev, struct adapter *adapter) 2679 { 2680 struct ifnet *ifp; 2681 2682 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 2683 2684 ifp = adapter->ifp = if_alloc(IFT_ETHER); 2685 if (ifp == NULL) { 2686 device_printf(dev, "can not allocate ifnet structure\n"); 2687 return (-1); 2688 } 2689 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2690 ifp->if_baudrate = IF_Gbps(10); 2691 ifp->if_init = ixgbe_init; 2692 ifp->if_softc = adapter; 2693 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2694 ifp->if_ioctl = ixgbe_ioctl; 2695 #if __FreeBSD_version >= 1100036 2696 if_setgetcounterfn(ifp, ixgbe_get_counter); 2697 #endif 2698 #if __FreeBSD_version >= 1100045 2699 /* TSO parameters */ 2700 ifp->if_hw_tsomax = 65518; 2701 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 2702 ifp->if_hw_tsomaxsegsize = 2048; 2703 #endif 2704 #ifndef IXGBE_LEGACY_TX 2705 ifp->if_transmit = ixgbe_mq_start; 2706 ifp->if_qflush = ixgbe_qflush; 2707 #else 2708 ifp->if_start = ixgbe_start; 2709 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 2710 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2; 2711 IFQ_SET_READY(&ifp->if_snd); 2712 #endif 2713 2714 ether_ifattach(ifp, adapter->hw.mac.addr); 2715 2716 adapter->max_frame_size = 2717 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2718 2719 /* 2720 * Tell the upper layer(s) we support long frames. 2721 */ 2722 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2723 2724 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM; 2725 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 2726 ifp->if_capabilities |= IFCAP_LRO; 2727 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING 2728 | IFCAP_VLAN_HWTSO 2729 | IFCAP_VLAN_MTU 2730 | IFCAP_HWSTATS; 2731 ifp->if_capenable = ifp->if_capabilities; 2732 2733 /* 2734 ** Don't turn this on by default, if vlans are 2735 ** created on another pseudo device (eg. lagg) 2736 ** then vlan events are not passed thru, breaking 2737 ** operation, but with HW FILTER off it works. If 2738 ** using vlans directly on the ixgbe driver you can 2739 ** enable this and get full hardware tag filtering. 2740 */ 2741 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2742 2743 /* 2744 * Specify the media types supported by this adapter and register 2745 * callbacks to update media and link information 2746 */ 2747 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, 2748 ixgbe_media_status); 2749 2750 ixgbe_add_media_types(adapter); 2751 2752 /* Autoselect media by default */ 2753 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2754 2755 return (0); 2756 } 2757 2758 static void 2759 ixgbe_add_media_types(struct adapter *adapter) 2760 { 2761 struct ixgbe_hw *hw = &adapter->hw; 2762 device_t dev = adapter->dev; 2763 int layer; 2764 2765 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 2766 2767 /* Media types with matching FreeBSD media defines */ 2768 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 2769 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 2770 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 2771 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 2772 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 2773 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2774 2775 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2776 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2777 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 2778 2779 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2780 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 2781 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) 2782 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 2783 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2784 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 2785 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2786 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2787 2788 /* 2789 ** Other (no matching FreeBSD media type): 2790 ** To workaround this, we'll assign these completely 2791 ** inappropriate media types. 2792 */ 2793 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 2794 device_printf(dev, "Media supported: 10GbaseKR\n"); 2795 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 2796 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 2797 } 2798 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 2799 device_printf(dev, "Media supported: 10GbaseKX4\n"); 2800 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 2801 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 2802 } 2803 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 2804 device_printf(dev, "Media supported: 1000baseKX\n"); 2805 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 2806 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 2807 } 2808 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) { 2809 /* Someday, someone will care about you... */ 2810 device_printf(dev, "Media supported: 1000baseBX\n"); 2811 } 2812 2813 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 2814 ifmedia_add(&adapter->media, 2815 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2816 ifmedia_add(&adapter->media, 2817 IFM_ETHER | IFM_1000_T, 0, NULL); 2818 } 2819 2820 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2821 } 2822 2823 static void 2824 ixgbe_config_link(struct adapter *adapter) 2825 { 2826 struct ixgbe_hw *hw = &adapter->hw; 2827 u32 autoneg, err = 0; 2828 bool sfp, negotiate; 2829 2830 sfp = ixgbe_is_sfp(hw); 2831 2832 if (sfp) { 2833 if (hw->phy.multispeed_fiber) { 2834 hw->mac.ops.setup_sfp(hw); 2835 ixgbe_enable_tx_laser(hw); 2836 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 2837 } else 2838 taskqueue_enqueue(adapter->tq, &adapter->mod_task); 2839 } else { 2840 if (hw->mac.ops.check_link) 2841 err = ixgbe_check_link(hw, &adapter->link_speed, 2842 &adapter->link_up, FALSE); 2843 if (err) 2844 goto out; 2845 autoneg = hw->phy.autoneg_advertised; 2846 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 2847 err = hw->mac.ops.get_link_capabilities(hw, 2848 &autoneg, &negotiate); 2849 if (err) 2850 goto out; 2851 if (hw->mac.ops.setup_link) 2852 err = hw->mac.ops.setup_link(hw, 2853 autoneg, adapter->link_up); 2854 } 2855 out: 2856 return; 2857 } 2858 2859 2860 /********************************************************************* 2861 * 2862 * Enable transmit units. 2863 * 2864 **********************************************************************/ 2865 static void 2866 ixgbe_initialize_transmit_units(struct adapter *adapter) 2867 { 2868 struct tx_ring *txr = adapter->tx_rings; 2869 struct ixgbe_hw *hw = &adapter->hw; 2870 2871 /* Setup the Base and Length of the Tx Descriptor Ring */ 2872 2873 for (int i = 0; i < adapter->num_queues; i++, txr++) { 2874 u64 tdba = txr->txdma.dma_paddr; 2875 u32 txctrl = 0; 2876 int j = txr->me; 2877 2878 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 2879 (tdba & 0x00000000ffffffffULL)); 2880 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 2881 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 2882 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 2883 2884 /* Setup the HW Tx Head and Tail descriptor pointers */ 2885 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 2886 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 2887 2888 /* Cache the tail address */ 2889 txr->tail = IXGBE_TDT(j); 2890 2891 /* Disable Head Writeback */ 2892 switch (hw->mac.type) { 2893 case ixgbe_mac_82598EB: 2894 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 2895 break; 2896 case ixgbe_mac_82599EB: 2897 case ixgbe_mac_X540: 2898 default: 2899 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 2900 break; 2901 } 2902 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2903 switch (hw->mac.type) { 2904 case ixgbe_mac_82598EB: 2905 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 2906 break; 2907 case ixgbe_mac_82599EB: 2908 case ixgbe_mac_X540: 2909 default: 2910 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 2911 break; 2912 } 2913 2914 } 2915 2916 if (hw->mac.type != ixgbe_mac_82598EB) { 2917 u32 dmatxctl, rttdcs; 2918 #ifdef PCI_IOV 2919 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter); 2920 #endif 2921 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2922 dmatxctl |= IXGBE_DMATXCTL_TE; 2923 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 2924 /* Disable arbiter to set MTQC */ 2925 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2926 rttdcs |= IXGBE_RTTDCS_ARBDIS; 2927 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2928 #ifdef PCI_IOV 2929 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode)); 2930 #else 2931 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2932 #endif 2933 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2934 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2935 } 2936 2937 return; 2938 } 2939 2940 static void 2941 ixgbe_initialise_rss_mapping(struct adapter *adapter) 2942 { 2943 struct ixgbe_hw *hw = &adapter->hw; 2944 u32 reta = 0, mrqc, rss_key[10]; 2945 int queue_id, table_size, index_mult; 2946 #ifdef RSS 2947 u32 rss_hash_config; 2948 #endif 2949 #ifdef PCI_IOV 2950 enum ixgbe_iov_mode mode; 2951 #endif 2952 2953 #ifdef RSS 2954 /* Fetch the configured RSS key */ 2955 rss_getkey((uint8_t *) &rss_key); 2956 #else 2957 /* set up random bits */ 2958 arc4rand(&rss_key, sizeof(rss_key), 0); 2959 #endif 2960 2961 /* Set multiplier for RETA setup and table size based on MAC */ 2962 index_mult = 0x1; 2963 table_size = 128; 2964 switch (adapter->hw.mac.type) { 2965 case ixgbe_mac_82598EB: 2966 index_mult = 0x11; 2967 break; 2968 case ixgbe_mac_X550: 2969 case ixgbe_mac_X550EM_x: 2970 table_size = 512; 2971 break; 2972 default: 2973 break; 2974 } 2975 2976 /* Set up the redirection table */ 2977 for (int i = 0, j = 0; i < table_size; i++, j++) { 2978 if (j == adapter->num_queues) j = 0; 2979 #ifdef RSS 2980 /* 2981 * Fetch the RSS bucket id for the given indirection entry. 2982 * Cap it at the number of configured buckets (which is 2983 * num_queues.) 2984 */ 2985 queue_id = rss_get_indirection_to_bucket(i); 2986 queue_id = queue_id % adapter->num_queues; 2987 #else 2988 queue_id = (j * index_mult); 2989 #endif 2990 /* 2991 * The low 8 bits are for hash value (n+0); 2992 * The next 8 bits are for hash value (n+1), etc. 2993 */ 2994 reta = reta >> 8; 2995 reta = reta | ( ((uint32_t) queue_id) << 24); 2996 if ((i & 3) == 3) { 2997 if (i < 128) 2998 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2999 else 3000 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); 3001 reta = 0; 3002 } 3003 } 3004 3005 /* Now fill our hash function seeds */ 3006 for (int i = 0; i < 10; i++) 3007 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 3008 3009 /* Perform hash on these packet types */ 3010 #ifdef RSS 3011 mrqc = IXGBE_MRQC_RSSEN; 3012 rss_hash_config = rss_gethashconfig(); 3013 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 3014 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 3015 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 3016 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 3017 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 3018 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 3019 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 3020 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 3021 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 3022 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 3023 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 3024 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 3025 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 3026 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 3027 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX) 3028 device_printf(adapter->dev, 3029 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, " 3030 "but not supported\n", __func__); 3031 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 3032 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3033 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 3034 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 3035 #else 3036 /* 3037 * Disable UDP - IP fragments aren't currently being handled 3038 * and so we end up with a mix of 2-tuple and 4-tuple 3039 * traffic. 3040 */ 3041 mrqc = IXGBE_MRQC_RSSEN 3042 | IXGBE_MRQC_RSS_FIELD_IPV4 3043 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 3044 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 3045 | IXGBE_MRQC_RSS_FIELD_IPV6_EX 3046 | IXGBE_MRQC_RSS_FIELD_IPV6 3047 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 3048 ; 3049 #endif /* RSS */ 3050 #ifdef PCI_IOV 3051 mode = ixgbe_get_iov_mode(adapter); 3052 mrqc |= ixgbe_get_mrqc(mode); 3053 #endif 3054 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3055 } 3056 3057 3058 /********************************************************************* 3059 * 3060 * Setup receive registers and features. 3061 * 3062 **********************************************************************/ 3063 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 3064 3065 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 3066 3067 static void 3068 ixgbe_initialize_receive_units(struct adapter *adapter) 3069 { 3070 struct rx_ring *rxr = adapter->rx_rings; 3071 struct ixgbe_hw *hw = &adapter->hw; 3072 struct ifnet *ifp = adapter->ifp; 3073 u32 bufsz, fctrl, srrctl, rxcsum; 3074 u32 hlreg; 3075 3076 3077 /* 3078 * Make sure receives are disabled while 3079 * setting up the descriptor ring 3080 */ 3081 ixgbe_disable_rx(hw); 3082 3083 /* Enable broadcasts */ 3084 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3085 fctrl |= IXGBE_FCTRL_BAM; 3086 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3087 fctrl |= IXGBE_FCTRL_DPF; 3088 fctrl |= IXGBE_FCTRL_PMCF; 3089 } 3090 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3091 3092 /* Set for Jumbo Frames? */ 3093 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3094 if (ifp->if_mtu > ETHERMTU) 3095 hlreg |= IXGBE_HLREG0_JUMBOEN; 3096 else 3097 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 3098 #ifdef DEV_NETMAP 3099 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */ 3100 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip) 3101 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 3102 else 3103 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 3104 #endif /* DEV_NETMAP */ 3105 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 3106 3107 bufsz = (adapter->rx_mbuf_sz + 3108 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3109 3110 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 3111 u64 rdba = rxr->rxdma.dma_paddr; 3112 int j = rxr->me; 3113 3114 /* Setup the Base and Length of the Rx Descriptor Ring */ 3115 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 3116 (rdba & 0x00000000ffffffffULL)); 3117 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 3118 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 3119 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 3120 3121 /* Set up the SRRCTL register */ 3122 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 3123 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 3124 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 3125 srrctl |= bufsz; 3126 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 3127 3128 /* 3129 * Set DROP_EN iff we have no flow control and >1 queue. 3130 * Note that srrctl was cleared shortly before during reset, 3131 * so we do not need to clear the bit, but do it just in case 3132 * this code is moved elsewhere. 3133 */ 3134 if (adapter->num_queues > 1 && 3135 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 3136 srrctl |= IXGBE_SRRCTL_DROP_EN; 3137 } else { 3138 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3139 } 3140 3141 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 3142 3143 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 3144 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 3145 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 3146 3147 /* Set the driver rx tail address */ 3148 rxr->tail = IXGBE_RDT(rxr->me); 3149 } 3150 3151 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 3152 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 3153 IXGBE_PSRTYPE_UDPHDR | 3154 IXGBE_PSRTYPE_IPV4HDR | 3155 IXGBE_PSRTYPE_IPV6HDR; 3156 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 3157 } 3158 3159 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3160 3161 ixgbe_initialise_rss_mapping(adapter); 3162 3163 if (adapter->num_queues > 1) { 3164 /* RSS and RX IPP Checksum are mutually exclusive */ 3165 rxcsum |= IXGBE_RXCSUM_PCSD; 3166 } 3167 3168 if (ifp->if_capenable & IFCAP_RXCSUM) 3169 rxcsum |= IXGBE_RXCSUM_PCSD; 3170 3171 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 3172 rxcsum |= IXGBE_RXCSUM_IPPCSE; 3173 3174 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3175 3176 return; 3177 } 3178 3179 3180 /* 3181 ** This routine is run via an vlan config EVENT, 3182 ** it enables us to use the HW Filter table since 3183 ** we can get the vlan id. This just creates the 3184 ** entry in the soft version of the VFTA, init will 3185 ** repopulate the real table. 3186 */ 3187 static void 3188 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3189 { 3190 struct adapter *adapter = ifp->if_softc; 3191 u16 index, bit; 3192 3193 if (ifp->if_softc != arg) /* Not our event */ 3194 return; 3195 3196 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3197 return; 3198 3199 IXGBE_CORE_LOCK(adapter); 3200 index = (vtag >> 5) & 0x7F; 3201 bit = vtag & 0x1F; 3202 adapter->shadow_vfta[index] |= (1 << bit); 3203 ++adapter->num_vlans; 3204 ixgbe_setup_vlan_hw_support(adapter); 3205 IXGBE_CORE_UNLOCK(adapter); 3206 } 3207 3208 /* 3209 ** This routine is run via an vlan 3210 ** unconfig EVENT, remove our entry 3211 ** in the soft vfta. 3212 */ 3213 static void 3214 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3215 { 3216 struct adapter *adapter = ifp->if_softc; 3217 u16 index, bit; 3218 3219 if (ifp->if_softc != arg) 3220 return; 3221 3222 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3223 return; 3224 3225 IXGBE_CORE_LOCK(adapter); 3226 index = (vtag >> 5) & 0x7F; 3227 bit = vtag & 0x1F; 3228 adapter->shadow_vfta[index] &= ~(1 << bit); 3229 --adapter->num_vlans; 3230 /* Re-init to load the changes */ 3231 ixgbe_setup_vlan_hw_support(adapter); 3232 IXGBE_CORE_UNLOCK(adapter); 3233 } 3234 3235 static void 3236 ixgbe_setup_vlan_hw_support(struct adapter *adapter) 3237 { 3238 struct ifnet *ifp = adapter->ifp; 3239 struct ixgbe_hw *hw = &adapter->hw; 3240 struct rx_ring *rxr; 3241 u32 ctrl; 3242 3243 3244 /* 3245 ** We get here thru init_locked, meaning 3246 ** a soft reset, this has already cleared 3247 ** the VFTA and other state, so if there 3248 ** have been no vlan's registered do nothing. 3249 */ 3250 if (adapter->num_vlans == 0) 3251 return; 3252 3253 /* Setup the queues for vlans */ 3254 for (int i = 0; i < adapter->num_queues; i++) { 3255 rxr = &adapter->rx_rings[i]; 3256 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 3257 if (hw->mac.type != ixgbe_mac_82598EB) { 3258 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3259 ctrl |= IXGBE_RXDCTL_VME; 3260 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 3261 } 3262 rxr->vtag_strip = TRUE; 3263 } 3264 3265 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 3266 return; 3267 /* 3268 ** A soft reset zero's out the VFTA, so 3269 ** we need to repopulate it now. 3270 */ 3271 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) 3272 if (adapter->shadow_vfta[i] != 0) 3273 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 3274 adapter->shadow_vfta[i]); 3275 3276 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3277 /* Enable the Filter Table if enabled */ 3278 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 3279 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 3280 ctrl |= IXGBE_VLNCTRL_VFE; 3281 } 3282 if (hw->mac.type == ixgbe_mac_82598EB) 3283 ctrl |= IXGBE_VLNCTRL_VME; 3284 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 3285 } 3286 3287 static void 3288 ixgbe_enable_intr(struct adapter *adapter) 3289 { 3290 struct ixgbe_hw *hw = &adapter->hw; 3291 struct ix_queue *que = adapter->queues; 3292 u32 mask, fwsm; 3293 3294 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3295 /* Enable Fan Failure detection */ 3296 if (hw->device_id == IXGBE_DEV_ID_82598AT) 3297 mask |= IXGBE_EIMS_GPI_SDP1; 3298 3299 switch (adapter->hw.mac.type) { 3300 case ixgbe_mac_82599EB: 3301 mask |= IXGBE_EIMS_ECC; 3302 /* Temperature sensor on some adapters */ 3303 mask |= IXGBE_EIMS_GPI_SDP0; 3304 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3305 mask |= IXGBE_EIMS_GPI_SDP1; 3306 mask |= IXGBE_EIMS_GPI_SDP2; 3307 #ifdef IXGBE_FDIR 3308 mask |= IXGBE_EIMS_FLOW_DIR; 3309 #endif 3310 #ifdef PCI_IOV 3311 mask |= IXGBE_EIMS_MAILBOX; 3312 #endif 3313 break; 3314 case ixgbe_mac_X540: 3315 /* Detect if Thermal Sensor is enabled */ 3316 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3317 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3318 mask |= IXGBE_EIMS_TS; 3319 mask |= IXGBE_EIMS_ECC; 3320 #ifdef IXGBE_FDIR 3321 mask |= IXGBE_EIMS_FLOW_DIR; 3322 #endif 3323 break; 3324 case ixgbe_mac_X550: 3325 case ixgbe_mac_X550EM_x: 3326 /* MAC thermal sensor is automatically enabled */ 3327 mask |= IXGBE_EIMS_TS; 3328 /* Some devices use SDP0 for important information */ 3329 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3330 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3331 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3332 mask |= IXGBE_EIMS_ECC; 3333 #ifdef IXGBE_FDIR 3334 mask |= IXGBE_EIMS_FLOW_DIR; 3335 #endif 3336 #ifdef PCI_IOV 3337 mask |= IXGBE_EIMS_MAILBOX; 3338 #endif 3339 /* falls through */ 3340 default: 3341 break; 3342 } 3343 3344 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3345 3346 /* With MSI-X we use auto clear */ 3347 if (adapter->msix_mem) { 3348 mask = IXGBE_EIMS_ENABLE_MASK; 3349 /* Don't autoclear Link */ 3350 mask &= ~IXGBE_EIMS_OTHER; 3351 mask &= ~IXGBE_EIMS_LSC; 3352 #ifdef PCI_IOV 3353 mask &= ~IXGBE_EIMS_MAILBOX; 3354 #endif 3355 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3356 } 3357 3358 /* 3359 ** Now enable all queues, this is done separately to 3360 ** allow for handling the extended (beyond 32) MSIX 3361 ** vectors that can be used by 82599 3362 */ 3363 for (int i = 0; i < adapter->num_queues; i++, que++) 3364 ixgbe_enable_queue(adapter, que->msix); 3365 3366 IXGBE_WRITE_FLUSH(hw); 3367 3368 return; 3369 } 3370 3371 static void 3372 ixgbe_disable_intr(struct adapter *adapter) 3373 { 3374 if (adapter->msix_mem) 3375 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3376 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3377 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3378 } else { 3379 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3380 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3381 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3382 } 3383 IXGBE_WRITE_FLUSH(&adapter->hw); 3384 return; 3385 } 3386 3387 /* 3388 ** Get the width and transaction speed of 3389 ** the slot this adapter is plugged into. 3390 */ 3391 static void 3392 ixgbe_get_slot_info(struct ixgbe_hw *hw) 3393 { 3394 device_t dev = ((struct ixgbe_osdep *)hw->back)->dev; 3395 struct ixgbe_mac_info *mac = &hw->mac; 3396 u16 link; 3397 u32 offset; 3398 3399 /* For most devices simply call the shared code routine */ 3400 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) { 3401 ixgbe_get_bus_info(hw); 3402 /* These devices don't use PCI-E */ 3403 switch (hw->mac.type) { 3404 case ixgbe_mac_X550EM_x: 3405 return; 3406 default: 3407 goto display; 3408 } 3409 } 3410 3411 /* 3412 ** For the Quad port adapter we need to parse back 3413 ** up the PCI tree to find the speed of the expansion 3414 ** slot into which this adapter is plugged. A bit more work. 3415 */ 3416 dev = device_get_parent(device_get_parent(dev)); 3417 #ifdef IXGBE_DEBUG 3418 device_printf(dev, "parent pcib = %x,%x,%x\n", 3419 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 3420 #endif 3421 dev = device_get_parent(device_get_parent(dev)); 3422 #ifdef IXGBE_DEBUG 3423 device_printf(dev, "slot pcib = %x,%x,%x\n", 3424 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 3425 #endif 3426 /* Now get the PCI Express Capabilities offset */ 3427 pci_find_cap(dev, PCIY_EXPRESS, &offset); 3428 /* ...and read the Link Status Register */ 3429 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 3430 switch (link & IXGBE_PCI_LINK_WIDTH) { 3431 case IXGBE_PCI_LINK_WIDTH_1: 3432 hw->bus.width = ixgbe_bus_width_pcie_x1; 3433 break; 3434 case IXGBE_PCI_LINK_WIDTH_2: 3435 hw->bus.width = ixgbe_bus_width_pcie_x2; 3436 break; 3437 case IXGBE_PCI_LINK_WIDTH_4: 3438 hw->bus.width = ixgbe_bus_width_pcie_x4; 3439 break; 3440 case IXGBE_PCI_LINK_WIDTH_8: 3441 hw->bus.width = ixgbe_bus_width_pcie_x8; 3442 break; 3443 default: 3444 hw->bus.width = ixgbe_bus_width_unknown; 3445 break; 3446 } 3447 3448 switch (link & IXGBE_PCI_LINK_SPEED) { 3449 case IXGBE_PCI_LINK_SPEED_2500: 3450 hw->bus.speed = ixgbe_bus_speed_2500; 3451 break; 3452 case IXGBE_PCI_LINK_SPEED_5000: 3453 hw->bus.speed = ixgbe_bus_speed_5000; 3454 break; 3455 case IXGBE_PCI_LINK_SPEED_8000: 3456 hw->bus.speed = ixgbe_bus_speed_8000; 3457 break; 3458 default: 3459 hw->bus.speed = ixgbe_bus_speed_unknown; 3460 break; 3461 } 3462 3463 mac->ops.set_lan_id(hw); 3464 3465 display: 3466 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 3467 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s": 3468 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s": 3469 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"), 3470 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 3471 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 3472 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 3473 ("Unknown")); 3474 3475 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 3476 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 3477 (hw->bus.speed == ixgbe_bus_speed_2500))) { 3478 device_printf(dev, "PCI-Express bandwidth available" 3479 " for this card\n is not sufficient for" 3480 " optimal performance.\n"); 3481 device_printf(dev, "For optimal performance a x8 " 3482 "PCIE, or x4 PCIE Gen2 slot is required.\n"); 3483 } 3484 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 3485 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 3486 (hw->bus.speed < ixgbe_bus_speed_8000))) { 3487 device_printf(dev, "PCI-Express bandwidth available" 3488 " for this card\n is not sufficient for" 3489 " optimal performance.\n"); 3490 device_printf(dev, "For optimal performance a x8 " 3491 "PCIE Gen3 slot is required.\n"); 3492 } 3493 3494 return; 3495 } 3496 3497 3498 /* 3499 ** Setup the correct IVAR register for a particular MSIX interrupt 3500 ** (yes this is all very magic and confusing :) 3501 ** - entry is the register array entry 3502 ** - vector is the MSIX vector for this queue 3503 ** - type is RX/TX/MISC 3504 */ 3505 static void 3506 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3507 { 3508 struct ixgbe_hw *hw = &adapter->hw; 3509 u32 ivar, index; 3510 3511 vector |= IXGBE_IVAR_ALLOC_VAL; 3512 3513 switch (hw->mac.type) { 3514 3515 case ixgbe_mac_82598EB: 3516 if (type == -1) 3517 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3518 else 3519 entry += (type * 64); 3520 index = (entry >> 2) & 0x1F; 3521 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3522 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3523 ivar |= (vector << (8 * (entry & 0x3))); 3524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3525 break; 3526 3527 case ixgbe_mac_82599EB: 3528 case ixgbe_mac_X540: 3529 case ixgbe_mac_X550: 3530 case ixgbe_mac_X550EM_x: 3531 if (type == -1) { /* MISC IVAR */ 3532 index = (entry & 1) * 8; 3533 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3534 ivar &= ~(0xFF << index); 3535 ivar |= (vector << index); 3536 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3537 } else { /* RX/TX IVARS */ 3538 index = (16 * (entry & 1)) + (8 * type); 3539 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3540 ivar &= ~(0xFF << index); 3541 ivar |= (vector << index); 3542 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3543 } 3544 3545 default: 3546 break; 3547 } 3548 } 3549 3550 static void 3551 ixgbe_configure_ivars(struct adapter *adapter) 3552 { 3553 struct ix_queue *que = adapter->queues; 3554 u32 newitr; 3555 3556 if (ixgbe_max_interrupt_rate > 0) 3557 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3558 else { 3559 /* 3560 ** Disable DMA coalescing if interrupt moderation is 3561 ** disabled. 3562 */ 3563 adapter->dmac = 0; 3564 newitr = 0; 3565 } 3566 3567 for (int i = 0; i < adapter->num_queues; i++, que++) { 3568 struct rx_ring *rxr = &adapter->rx_rings[i]; 3569 struct tx_ring *txr = &adapter->tx_rings[i]; 3570 /* First the RX queue entry */ 3571 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0); 3572 /* ... and the TX */ 3573 ixgbe_set_ivar(adapter, txr->me, que->msix, 1); 3574 /* Set an Initial EITR value */ 3575 IXGBE_WRITE_REG(&adapter->hw, 3576 IXGBE_EITR(que->msix), newitr); 3577 } 3578 3579 /* For the Link interrupt */ 3580 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3581 } 3582 3583 /* 3584 ** ixgbe_sfp_probe - called in the local timer to 3585 ** determine if a port had optics inserted. 3586 */ 3587 static bool 3588 ixgbe_sfp_probe(struct adapter *adapter) 3589 { 3590 struct ixgbe_hw *hw = &adapter->hw; 3591 device_t dev = adapter->dev; 3592 bool result = FALSE; 3593 3594 if ((hw->phy.type == ixgbe_phy_nl) && 3595 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3596 s32 ret = hw->phy.ops.identify_sfp(hw); 3597 if (ret) 3598 goto out; 3599 ret = hw->phy.ops.reset(hw); 3600 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3601 device_printf(dev,"Unsupported SFP+ module detected!"); 3602 printf(" Reload driver with supported module.\n"); 3603 adapter->sfp_probe = FALSE; 3604 goto out; 3605 } else 3606 device_printf(dev,"SFP+ module detected!\n"); 3607 /* We now have supported optics */ 3608 adapter->sfp_probe = FALSE; 3609 /* Set the optics type so system reports correctly */ 3610 ixgbe_setup_optics(adapter); 3611 result = TRUE; 3612 } 3613 out: 3614 return (result); 3615 } 3616 3617 /* 3618 ** Tasklet handler for MSIX Link interrupts 3619 ** - do outside interrupt since it might sleep 3620 */ 3621 static void 3622 ixgbe_handle_link(void *context, int pending) 3623 { 3624 struct adapter *adapter = context; 3625 3626 ixgbe_check_link(&adapter->hw, 3627 &adapter->link_speed, &adapter->link_up, 0); 3628 ixgbe_update_link_status(adapter); 3629 } 3630 3631 /* 3632 ** Tasklet for handling SFP module interrupts 3633 */ 3634 static void 3635 ixgbe_handle_mod(void *context, int pending) 3636 { 3637 struct adapter *adapter = context; 3638 struct ixgbe_hw *hw = &adapter->hw; 3639 device_t dev = adapter->dev; 3640 u32 err; 3641 3642 err = hw->phy.ops.identify_sfp(hw); 3643 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3644 device_printf(dev, 3645 "Unsupported SFP+ module type was detected.\n"); 3646 return; 3647 } 3648 3649 err = hw->mac.ops.setup_sfp(hw); 3650 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3651 device_printf(dev, 3652 "Setup failure - unsupported SFP+ module type.\n"); 3653 return; 3654 } 3655 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 3656 return; 3657 } 3658 3659 3660 /* 3661 ** Tasklet for handling MSF (multispeed fiber) interrupts 3662 */ 3663 static void 3664 ixgbe_handle_msf(void *context, int pending) 3665 { 3666 struct adapter *adapter = context; 3667 struct ixgbe_hw *hw = &adapter->hw; 3668 u32 autoneg; 3669 bool negotiate; 3670 int err; 3671 3672 err = hw->phy.ops.identify_sfp(hw); 3673 if (!err) { 3674 ixgbe_setup_optics(adapter); 3675 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics); 3676 } 3677 3678 autoneg = hw->phy.autoneg_advertised; 3679 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3680 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3681 if (hw->mac.ops.setup_link) 3682 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3683 3684 ifmedia_removeall(&adapter->media); 3685 ixgbe_add_media_types(adapter); 3686 return; 3687 } 3688 3689 /* 3690 ** Tasklet for handling interrupts from an external PHY 3691 */ 3692 static void 3693 ixgbe_handle_phy(void *context, int pending) 3694 { 3695 struct adapter *adapter = context; 3696 struct ixgbe_hw *hw = &adapter->hw; 3697 int error; 3698 3699 error = hw->phy.ops.handle_lasi(hw); 3700 if (error == IXGBE_ERR_OVERTEMP) 3701 device_printf(adapter->dev, 3702 "CRITICAL: EXTERNAL PHY OVER TEMP!! " 3703 " PHY will downshift to lower power state!\n"); 3704 else if (error) 3705 device_printf(adapter->dev, 3706 "Error handling LASI interrupt: %d\n", 3707 error); 3708 return; 3709 } 3710 3711 #ifdef IXGBE_FDIR 3712 /* 3713 ** Tasklet for reinitializing the Flow Director filter table 3714 */ 3715 static void 3716 ixgbe_reinit_fdir(void *context, int pending) 3717 { 3718 struct adapter *adapter = context; 3719 struct ifnet *ifp = adapter->ifp; 3720 3721 if (adapter->fdir_reinit != 1) /* Shouldn't happen */ 3722 return; 3723 ixgbe_reinit_fdir_tables_82599(&adapter->hw); 3724 adapter->fdir_reinit = 0; 3725 /* re-enable flow director interrupts */ 3726 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 3727 /* Restart the interface */ 3728 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3729 return; 3730 } 3731 #endif 3732 3733 /********************************************************************* 3734 * 3735 * Configure DMA Coalescing 3736 * 3737 **********************************************************************/ 3738 static void 3739 ixgbe_config_dmac(struct adapter *adapter) 3740 { 3741 struct ixgbe_hw *hw = &adapter->hw; 3742 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3743 3744 if (hw->mac.type < ixgbe_mac_X550 || 3745 !hw->mac.ops.dmac_config) 3746 return; 3747 3748 if (dcfg->watchdog_timer ^ adapter->dmac || 3749 dcfg->link_speed ^ adapter->link_speed) { 3750 dcfg->watchdog_timer = adapter->dmac; 3751 dcfg->fcoe_en = false; 3752 dcfg->link_speed = adapter->link_speed; 3753 dcfg->num_tcs = 1; 3754 3755 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3756 dcfg->watchdog_timer, dcfg->link_speed); 3757 3758 hw->mac.ops.dmac_config(hw); 3759 } 3760 } 3761 3762 /* 3763 * Checks whether the adapter supports Energy Efficient Ethernet 3764 * or not, based on device ID. 3765 */ 3766 static void 3767 ixgbe_check_eee_support(struct adapter *adapter) 3768 { 3769 struct ixgbe_hw *hw = &adapter->hw; 3770 3771 adapter->eee_enabled = !!(hw->mac.ops.setup_eee); 3772 } 3773 3774 /* 3775 * Checks whether the adapter's ports are capable of 3776 * Wake On LAN by reading the adapter's NVM. 3777 * 3778 * Sets each port's hw->wol_enabled value depending 3779 * on the value read here. 3780 */ 3781 static void 3782 ixgbe_check_wol_support(struct adapter *adapter) 3783 { 3784 struct ixgbe_hw *hw = &adapter->hw; 3785 u16 dev_caps = 0; 3786 3787 /* Find out WoL support for port */ 3788 adapter->wol_support = hw->wol_enabled = 0; 3789 ixgbe_get_device_caps(hw, &dev_caps); 3790 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 3791 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 3792 hw->bus.func == 0)) 3793 adapter->wol_support = hw->wol_enabled = 1; 3794 3795 /* Save initial wake up filter configuration */ 3796 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 3797 3798 return; 3799 } 3800 3801 /* 3802 * Prepare the adapter/port for LPLU and/or WoL 3803 */ 3804 static int 3805 ixgbe_setup_low_power_mode(struct adapter *adapter) 3806 { 3807 struct ixgbe_hw *hw = &adapter->hw; 3808 device_t dev = adapter->dev; 3809 s32 error = 0; 3810 3811 mtx_assert(&adapter->core_mtx, MA_OWNED); 3812 3813 /* Limit power management flow to X550EM baseT */ 3814 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T 3815 && hw->phy.ops.enter_lplu) { 3816 /* Turn off support for APM wakeup. (Using ACPI instead) */ 3817 IXGBE_WRITE_REG(hw, IXGBE_GRC, 3818 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 3819 3820 /* 3821 * Clear Wake Up Status register to prevent any previous wakeup 3822 * events from waking us up immediately after we suspend. 3823 */ 3824 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3825 3826 /* 3827 * Program the Wakeup Filter Control register with user filter 3828 * settings 3829 */ 3830 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 3831 3832 /* Enable wakeups and power management in Wakeup Control */ 3833 IXGBE_WRITE_REG(hw, IXGBE_WUC, 3834 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 3835 3836 /* X550EM baseT adapters need a special LPLU flow */ 3837 hw->phy.reset_disable = true; 3838 ixgbe_stop(adapter); 3839 error = hw->phy.ops.enter_lplu(hw); 3840 if (error) 3841 device_printf(dev, 3842 "Error entering LPLU: %d\n", error); 3843 hw->phy.reset_disable = false; 3844 } else { 3845 /* Just stop for other adapters */ 3846 ixgbe_stop(adapter); 3847 } 3848 3849 return error; 3850 } 3851 3852 /********************************************************************** 3853 * 3854 * Update the board statistics counters. 3855 * 3856 **********************************************************************/ 3857 static void 3858 ixgbe_update_stats_counters(struct adapter *adapter) 3859 { 3860 struct ixgbe_hw *hw = &adapter->hw; 3861 u32 missed_rx = 0, bprc, lxon, lxoff, total; 3862 u64 total_missed_rx = 0; 3863 3864 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3865 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3866 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3867 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3868 3869 for (int i = 0; i < 16; i++) { 3870 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3871 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3872 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3873 } 3874 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3875 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3876 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3877 3878 /* Hardware workaround, gprc counts missed packets */ 3879 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 3880 adapter->stats.pf.gprc -= missed_rx; 3881 3882 if (hw->mac.type != ixgbe_mac_82598EB) { 3883 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 3884 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3885 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 3886 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3887 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 3888 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3889 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3890 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3891 } else { 3892 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3893 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3894 /* 82598 only has a counter in the high register */ 3895 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3896 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3897 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3898 } 3899 3900 /* 3901 * Workaround: mprc hardware is incorrectly counting 3902 * broadcasts, so for now we subtract those. 3903 */ 3904 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3905 adapter->stats.pf.bprc += bprc; 3906 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3907 if (hw->mac.type == ixgbe_mac_82598EB) 3908 adapter->stats.pf.mprc -= bprc; 3909 3910 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3911 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3912 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3913 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3914 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3915 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3916 3917 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3918 adapter->stats.pf.lxontxc += lxon; 3919 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3920 adapter->stats.pf.lxofftxc += lxoff; 3921 total = lxon + lxoff; 3922 3923 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 3924 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3925 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3926 adapter->stats.pf.gptc -= total; 3927 adapter->stats.pf.mptc -= total; 3928 adapter->stats.pf.ptc64 -= total; 3929 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN; 3930 3931 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3932 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3933 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3934 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3935 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3936 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3937 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3938 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3939 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3940 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3941 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3942 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3943 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3944 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3945 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3946 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3947 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3948 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3949 /* Only read FCOE on 82599 */ 3950 if (hw->mac.type != ixgbe_mac_82598EB) { 3951 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3952 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3953 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3954 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3955 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3956 } 3957 3958 /* Fill out the OS statistics structure */ 3959 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc); 3960 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc); 3961 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc); 3962 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc); 3963 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc); 3964 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc); 3965 IXGBE_SET_COLLISIONS(adapter, 0); 3966 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 3967 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs 3968 + adapter->stats.pf.rlec); 3969 } 3970 3971 #if __FreeBSD_version >= 1100036 3972 static uint64_t 3973 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt) 3974 { 3975 struct adapter *adapter; 3976 struct tx_ring *txr; 3977 uint64_t rv; 3978 3979 adapter = if_getsoftc(ifp); 3980 3981 switch (cnt) { 3982 case IFCOUNTER_IPACKETS: 3983 return (adapter->ipackets); 3984 case IFCOUNTER_OPACKETS: 3985 return (adapter->opackets); 3986 case IFCOUNTER_IBYTES: 3987 return (adapter->ibytes); 3988 case IFCOUNTER_OBYTES: 3989 return (adapter->obytes); 3990 case IFCOUNTER_IMCASTS: 3991 return (adapter->imcasts); 3992 case IFCOUNTER_OMCASTS: 3993 return (adapter->omcasts); 3994 case IFCOUNTER_COLLISIONS: 3995 return (0); 3996 case IFCOUNTER_IQDROPS: 3997 return (adapter->iqdrops); 3998 case IFCOUNTER_OQDROPS: 3999 rv = 0; 4000 txr = adapter->tx_rings; 4001 for (int i = 0; i < adapter->num_queues; i++, txr++) 4002 rv += txr->br->br_drops; 4003 return (rv); 4004 case IFCOUNTER_IERRORS: 4005 return (adapter->ierrors); 4006 default: 4007 return (if_get_counter_default(ifp, cnt)); 4008 } 4009 } 4010 #endif 4011 4012 /** ixgbe_sysctl_tdh_handler - Handler function 4013 * Retrieves the TDH value from the hardware 4014 */ 4015 static int 4016 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 4017 { 4018 int error; 4019 4020 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 4021 if (!txr) return 0; 4022 4023 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 4024 error = sysctl_handle_int(oidp, &val, 0, req); 4025 if (error || !req->newptr) 4026 return error; 4027 return 0; 4028 } 4029 4030 /** ixgbe_sysctl_tdt_handler - Handler function 4031 * Retrieves the TDT value from the hardware 4032 */ 4033 static int 4034 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 4035 { 4036 int error; 4037 4038 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 4039 if (!txr) return 0; 4040 4041 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 4042 error = sysctl_handle_int(oidp, &val, 0, req); 4043 if (error || !req->newptr) 4044 return error; 4045 return 0; 4046 } 4047 4048 /** ixgbe_sysctl_rdh_handler - Handler function 4049 * Retrieves the RDH value from the hardware 4050 */ 4051 static int 4052 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 4053 { 4054 int error; 4055 4056 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 4057 if (!rxr) return 0; 4058 4059 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 4060 error = sysctl_handle_int(oidp, &val, 0, req); 4061 if (error || !req->newptr) 4062 return error; 4063 return 0; 4064 } 4065 4066 /** ixgbe_sysctl_rdt_handler - Handler function 4067 * Retrieves the RDT value from the hardware 4068 */ 4069 static int 4070 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 4071 { 4072 int error; 4073 4074 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 4075 if (!rxr) return 0; 4076 4077 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 4078 error = sysctl_handle_int(oidp, &val, 0, req); 4079 if (error || !req->newptr) 4080 return error; 4081 return 0; 4082 } 4083 4084 static int 4085 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 4086 { 4087 int error; 4088 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1); 4089 unsigned int reg, usec, rate; 4090 4091 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 4092 usec = ((reg & 0x0FF8) >> 3); 4093 if (usec > 0) 4094 rate = 500000 / usec; 4095 else 4096 rate = 0; 4097 error = sysctl_handle_int(oidp, &rate, 0, req); 4098 if (error || !req->newptr) 4099 return error; 4100 reg &= ~0xfff; /* default, no limitation */ 4101 ixgbe_max_interrupt_rate = 0; 4102 if (rate > 0 && rate < 500000) { 4103 if (rate < 1000) 4104 rate = 1000; 4105 ixgbe_max_interrupt_rate = rate; 4106 reg |= ((4000000/rate) & 0xff8 ); 4107 } 4108 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 4109 return 0; 4110 } 4111 4112 static void 4113 ixgbe_add_device_sysctls(struct adapter *adapter) 4114 { 4115 device_t dev = adapter->dev; 4116 struct ixgbe_hw *hw = &adapter->hw; 4117 struct sysctl_oid_list *child; 4118 struct sysctl_ctx_list *ctx; 4119 4120 ctx = device_get_sysctl_ctx(dev); 4121 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4122 4123 /* Sysctls for all devices */ 4124 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", 4125 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4126 ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC); 4127 4128 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", 4129 CTLFLAG_RW, 4130 &ixgbe_enable_aim, 1, "Interrupt Moderation"); 4131 4132 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed", 4133 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4134 ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED); 4135 4136 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test", 4137 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4138 ixgbe_sysctl_thermal_test, "I", "Thermal Test"); 4139 4140 /* for X550 devices */ 4141 if (hw->mac.type >= ixgbe_mac_X550) 4142 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac", 4143 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4144 ixgbe_sysctl_dmac, "I", "DMA Coalesce"); 4145 4146 /* for X550T and X550EM backplane devices */ 4147 if (hw->mac.ops.setup_eee) { 4148 struct sysctl_oid *eee_node; 4149 struct sysctl_oid_list *eee_list; 4150 4151 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee", 4152 CTLFLAG_RD, NULL, 4153 "Energy Efficient Ethernet sysctls"); 4154 eee_list = SYSCTL_CHILDREN(eee_node); 4155 4156 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable", 4157 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4158 ixgbe_sysctl_eee_enable, "I", 4159 "Enable or Disable EEE"); 4160 4161 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated", 4162 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4163 ixgbe_sysctl_eee_negotiated, "I", 4164 "EEE negotiated on link"); 4165 4166 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status", 4167 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4168 ixgbe_sysctl_eee_tx_lpi_status, "I", 4169 "Whether or not TX link is in LPI state"); 4170 4171 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status", 4172 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4173 ixgbe_sysctl_eee_rx_lpi_status, "I", 4174 "Whether or not RX link is in LPI state"); 4175 } 4176 4177 /* for certain 10GBaseT devices */ 4178 if (hw->device_id == IXGBE_DEV_ID_X550T || 4179 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 4180 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable", 4181 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4182 ixgbe_sysctl_wol_enable, "I", 4183 "Enable/Disable Wake on LAN"); 4184 4185 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc", 4186 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4187 ixgbe_sysctl_wufc, "I", 4188 "Enable/Disable Wake Up Filters"); 4189 } 4190 4191 /* for X550EM 10GBaseT devices */ 4192 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 4193 struct sysctl_oid *phy_node; 4194 struct sysctl_oid_list *phy_list; 4195 4196 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy", 4197 CTLFLAG_RD, NULL, 4198 "External PHY sysctls"); 4199 phy_list = SYSCTL_CHILDREN(phy_node); 4200 4201 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp", 4202 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4203 ixgbe_sysctl_phy_temp, "I", 4204 "Current External PHY Temperature (Celsius)"); 4205 4206 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred", 4207 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4208 ixgbe_sysctl_phy_overtemp_occurred, "I", 4209 "External PHY High Temperature Event Occurred"); 4210 } 4211 } 4212 4213 /* 4214 * Add sysctl variables, one per statistic, to the system. 4215 */ 4216 static void 4217 ixgbe_add_hw_stats(struct adapter *adapter) 4218 { 4219 device_t dev = adapter->dev; 4220 4221 struct tx_ring *txr = adapter->tx_rings; 4222 struct rx_ring *rxr = adapter->rx_rings; 4223 4224 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4225 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 4226 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 4227 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 4228 4229 struct sysctl_oid *stat_node, *queue_node; 4230 struct sysctl_oid_list *stat_list, *queue_list; 4231 4232 #define QUEUE_NAME_LEN 32 4233 char namebuf[QUEUE_NAME_LEN]; 4234 4235 /* Driver Statistics */ 4236 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 4237 CTLFLAG_RD, &adapter->dropped_pkts, 4238 "Driver dropped packets"); 4239 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 4240 CTLFLAG_RD, &adapter->mbuf_defrag_failed, 4241 "m_defrag() failed"); 4242 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 4243 CTLFLAG_RD, &adapter->watchdog_events, 4244 "Watchdog timeouts"); 4245 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 4246 CTLFLAG_RD, &adapter->link_irq, 4247 "Link MSIX IRQ Handled"); 4248 4249 for (int i = 0; i < adapter->num_queues; i++, txr++) { 4250 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4251 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4252 CTLFLAG_RD, NULL, "Queue Name"); 4253 queue_list = SYSCTL_CHILDREN(queue_node); 4254 4255 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 4256 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i], 4257 sizeof(&adapter->queues[i]), 4258 ixgbe_sysctl_interrupt_rate_handler, "IU", 4259 "Interrupt Rate"); 4260 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 4261 CTLFLAG_RD, &(adapter->queues[i].irqs), 4262 "irqs on this queue"); 4263 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 4264 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 4265 ixgbe_sysctl_tdh_handler, "IU", 4266 "Transmit Descriptor Head"); 4267 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 4268 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 4269 ixgbe_sysctl_tdt_handler, "IU", 4270 "Transmit Descriptor Tail"); 4271 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx", 4272 CTLFLAG_RD, &txr->tso_tx, 4273 "TSO"); 4274 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup", 4275 CTLFLAG_RD, &txr->no_tx_dma_setup, 4276 "Driver tx dma failure in xmit"); 4277 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 4278 CTLFLAG_RD, &txr->no_desc_avail, 4279 "Queue No Descriptor Available"); 4280 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 4281 CTLFLAG_RD, &txr->total_packets, 4282 "Queue Packets Transmitted"); 4283 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops", 4284 CTLFLAG_RD, &txr->br->br_drops, 4285 "Packets dropped in buf_ring"); 4286 } 4287 4288 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 4289 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4290 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4291 CTLFLAG_RD, NULL, "Queue Name"); 4292 queue_list = SYSCTL_CHILDREN(queue_node); 4293 4294 struct lro_ctrl *lro = &rxr->lro; 4295 4296 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4297 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4298 CTLFLAG_RD, NULL, "Queue Name"); 4299 queue_list = SYSCTL_CHILDREN(queue_node); 4300 4301 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 4302 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 4303 ixgbe_sysctl_rdh_handler, "IU", 4304 "Receive Descriptor Head"); 4305 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 4306 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 4307 ixgbe_sysctl_rdt_handler, "IU", 4308 "Receive Descriptor Tail"); 4309 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 4310 CTLFLAG_RD, &rxr->rx_packets, 4311 "Queue Packets Received"); 4312 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 4313 CTLFLAG_RD, &rxr->rx_bytes, 4314 "Queue Bytes Received"); 4315 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 4316 CTLFLAG_RD, &rxr->rx_copies, 4317 "Copied RX Frames"); 4318 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 4319 CTLFLAG_RD, &lro->lro_queued, 0, 4320 "LRO Queued"); 4321 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 4322 CTLFLAG_RD, &lro->lro_flushed, 0, 4323 "LRO Flushed"); 4324 } 4325 4326 /* MAC stats get the own sub node */ 4327 4328 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 4329 CTLFLAG_RD, NULL, "MAC Statistics"); 4330 stat_list = SYSCTL_CHILDREN(stat_node); 4331 4332 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 4333 CTLFLAG_RD, &stats->crcerrs, 4334 "CRC Errors"); 4335 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 4336 CTLFLAG_RD, &stats->illerrc, 4337 "Illegal Byte Errors"); 4338 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 4339 CTLFLAG_RD, &stats->errbc, 4340 "Byte Errors"); 4341 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 4342 CTLFLAG_RD, &stats->mspdc, 4343 "MAC Short Packets Discarded"); 4344 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 4345 CTLFLAG_RD, &stats->mlfc, 4346 "MAC Local Faults"); 4347 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 4348 CTLFLAG_RD, &stats->mrfc, 4349 "MAC Remote Faults"); 4350 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 4351 CTLFLAG_RD, &stats->rlec, 4352 "Receive Length Errors"); 4353 4354 /* Flow Control stats */ 4355 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 4356 CTLFLAG_RD, &stats->lxontxc, 4357 "Link XON Transmitted"); 4358 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 4359 CTLFLAG_RD, &stats->lxonrxc, 4360 "Link XON Received"); 4361 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 4362 CTLFLAG_RD, &stats->lxofftxc, 4363 "Link XOFF Transmitted"); 4364 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 4365 CTLFLAG_RD, &stats->lxoffrxc, 4366 "Link XOFF Received"); 4367 4368 /* Packet Reception Stats */ 4369 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 4370 CTLFLAG_RD, &stats->tor, 4371 "Total Octets Received"); 4372 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 4373 CTLFLAG_RD, &stats->gorc, 4374 "Good Octets Received"); 4375 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 4376 CTLFLAG_RD, &stats->tpr, 4377 "Total Packets Received"); 4378 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 4379 CTLFLAG_RD, &stats->gprc, 4380 "Good Packets Received"); 4381 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 4382 CTLFLAG_RD, &stats->mprc, 4383 "Multicast Packets Received"); 4384 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 4385 CTLFLAG_RD, &stats->bprc, 4386 "Broadcast Packets Received"); 4387 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 4388 CTLFLAG_RD, &stats->prc64, 4389 "64 byte frames received "); 4390 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 4391 CTLFLAG_RD, &stats->prc127, 4392 "65-127 byte frames received"); 4393 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 4394 CTLFLAG_RD, &stats->prc255, 4395 "128-255 byte frames received"); 4396 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 4397 CTLFLAG_RD, &stats->prc511, 4398 "256-511 byte frames received"); 4399 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 4400 CTLFLAG_RD, &stats->prc1023, 4401 "512-1023 byte frames received"); 4402 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 4403 CTLFLAG_RD, &stats->prc1522, 4404 "1023-1522 byte frames received"); 4405 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 4406 CTLFLAG_RD, &stats->ruc, 4407 "Receive Undersized"); 4408 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 4409 CTLFLAG_RD, &stats->rfc, 4410 "Fragmented Packets Received "); 4411 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 4412 CTLFLAG_RD, &stats->roc, 4413 "Oversized Packets Received"); 4414 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 4415 CTLFLAG_RD, &stats->rjc, 4416 "Received Jabber"); 4417 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 4418 CTLFLAG_RD, &stats->mngprc, 4419 "Management Packets Received"); 4420 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 4421 CTLFLAG_RD, &stats->mngptc, 4422 "Management Packets Dropped"); 4423 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 4424 CTLFLAG_RD, &stats->xec, 4425 "Checksum Errors"); 4426 4427 /* Packet Transmission Stats */ 4428 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 4429 CTLFLAG_RD, &stats->gotc, 4430 "Good Octets Transmitted"); 4431 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 4432 CTLFLAG_RD, &stats->tpt, 4433 "Total Packets Transmitted"); 4434 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 4435 CTLFLAG_RD, &stats->gptc, 4436 "Good Packets Transmitted"); 4437 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 4438 CTLFLAG_RD, &stats->bptc, 4439 "Broadcast Packets Transmitted"); 4440 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 4441 CTLFLAG_RD, &stats->mptc, 4442 "Multicast Packets Transmitted"); 4443 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 4444 CTLFLAG_RD, &stats->mngptc, 4445 "Management Packets Transmitted"); 4446 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 4447 CTLFLAG_RD, &stats->ptc64, 4448 "64 byte frames transmitted "); 4449 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 4450 CTLFLAG_RD, &stats->ptc127, 4451 "65-127 byte frames transmitted"); 4452 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 4453 CTLFLAG_RD, &stats->ptc255, 4454 "128-255 byte frames transmitted"); 4455 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 4456 CTLFLAG_RD, &stats->ptc511, 4457 "256-511 byte frames transmitted"); 4458 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 4459 CTLFLAG_RD, &stats->ptc1023, 4460 "512-1023 byte frames transmitted"); 4461 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 4462 CTLFLAG_RD, &stats->ptc1522, 4463 "1024-1522 byte frames transmitted"); 4464 } 4465 4466 static void 4467 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name, 4468 const char *description, int *limit, int value) 4469 { 4470 *limit = value; 4471 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 4472 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4473 OID_AUTO, name, CTLFLAG_RW, limit, value, description); 4474 } 4475 4476 /* 4477 ** Set flow control using sysctl: 4478 ** Flow control values: 4479 ** 0 - off 4480 ** 1 - rx pause 4481 ** 2 - tx pause 4482 ** 3 - full 4483 */ 4484 static int 4485 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS) 4486 { 4487 int error, last; 4488 struct adapter *adapter = (struct adapter *) arg1; 4489 4490 last = adapter->fc; 4491 error = sysctl_handle_int(oidp, &adapter->fc, 0, req); 4492 if ((error) || (req->newptr == NULL)) 4493 return (error); 4494 4495 /* Don't bother if it's not changed */ 4496 if (adapter->fc == last) 4497 return (0); 4498 4499 switch (adapter->fc) { 4500 case ixgbe_fc_rx_pause: 4501 case ixgbe_fc_tx_pause: 4502 case ixgbe_fc_full: 4503 adapter->hw.fc.requested_mode = adapter->fc; 4504 if (adapter->num_queues > 1) 4505 ixgbe_disable_rx_drop(adapter); 4506 break; 4507 case ixgbe_fc_none: 4508 adapter->hw.fc.requested_mode = ixgbe_fc_none; 4509 if (adapter->num_queues > 1) 4510 ixgbe_enable_rx_drop(adapter); 4511 break; 4512 default: 4513 adapter->fc = last; 4514 return (EINVAL); 4515 } 4516 /* Don't autoneg if forcing a value */ 4517 adapter->hw.fc.disable_fc_autoneg = TRUE; 4518 ixgbe_fc_enable(&adapter->hw); 4519 return error; 4520 } 4521 4522 /* 4523 ** Control advertised link speed: 4524 ** Flags: 4525 ** 0x1 - advertise 100 Mb 4526 ** 0x2 - advertise 1G 4527 ** 0x4 - advertise 10G 4528 */ 4529 static int 4530 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS) 4531 { 4532 int error = 0, requested; 4533 struct adapter *adapter; 4534 device_t dev; 4535 struct ixgbe_hw *hw; 4536 ixgbe_link_speed speed = 0; 4537 4538 adapter = (struct adapter *) arg1; 4539 dev = adapter->dev; 4540 hw = &adapter->hw; 4541 4542 requested = adapter->advertise; 4543 error = sysctl_handle_int(oidp, &requested, 0, req); 4544 if ((error) || (req->newptr == NULL)) 4545 return (error); 4546 4547 /* Checks to validate new value */ 4548 if (adapter->advertise == requested) /* no change */ 4549 return (0); 4550 4551 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4552 (hw->phy.multispeed_fiber))) { 4553 device_printf(dev, 4554 "Advertised speed can only be set on copper or " 4555 "multispeed fiber media types.\n"); 4556 return (EINVAL); 4557 } 4558 4559 if (requested < 0x1 || requested > 0x7) { 4560 device_printf(dev, 4561 "Invalid advertised speed; valid modes are 0x1 through 0x7\n"); 4562 return (EINVAL); 4563 } 4564 4565 if ((requested & 0x1) 4566 && (hw->mac.type != ixgbe_mac_X540) 4567 && (hw->mac.type != ixgbe_mac_X550)) { 4568 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n"); 4569 return (EINVAL); 4570 } 4571 4572 /* Set new value and report new advertised mode */ 4573 if (requested & 0x1) 4574 speed |= IXGBE_LINK_SPEED_100_FULL; 4575 if (requested & 0x2) 4576 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4577 if (requested & 0x4) 4578 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4579 4580 hw->mac.autotry_restart = TRUE; 4581 hw->mac.ops.setup_link(hw, speed, TRUE); 4582 adapter->advertise = requested; 4583 4584 return (error); 4585 } 4586 4587 /* 4588 * The following two sysctls are for X550 BaseT devices; 4589 * they deal with the external PHY used in them. 4590 */ 4591 static int 4592 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4593 { 4594 struct adapter *adapter = (struct adapter *) arg1; 4595 struct ixgbe_hw *hw = &adapter->hw; 4596 u16 reg; 4597 4598 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4599 device_printf(adapter->dev, 4600 "Device has no supported external thermal sensor.\n"); 4601 return (ENODEV); 4602 } 4603 4604 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4605 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 4606 ®)) { 4607 device_printf(adapter->dev, 4608 "Error reading from PHY's current temperature register\n"); 4609 return (EAGAIN); 4610 } 4611 4612 /* Shift temp for output */ 4613 reg = reg >> 8; 4614 4615 return (sysctl_handle_int(oidp, NULL, reg, req)); 4616 } 4617 4618 /* 4619 * Reports whether the current PHY temperature is over 4620 * the overtemp threshold. 4621 * - This is reported directly from the PHY 4622 */ 4623 static int 4624 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4625 { 4626 struct adapter *adapter = (struct adapter *) arg1; 4627 struct ixgbe_hw *hw = &adapter->hw; 4628 u16 reg; 4629 4630 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4631 device_printf(adapter->dev, 4632 "Device has no supported external thermal sensor.\n"); 4633 return (ENODEV); 4634 } 4635 4636 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4637 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 4638 ®)) { 4639 device_printf(adapter->dev, 4640 "Error reading from PHY's temperature status register\n"); 4641 return (EAGAIN); 4642 } 4643 4644 /* Get occurrence bit */ 4645 reg = !!(reg & 0x4000); 4646 return (sysctl_handle_int(oidp, 0, reg, req)); 4647 } 4648 4649 /* 4650 ** Thermal Shutdown Trigger (internal MAC) 4651 ** - Set this to 1 to cause an overtemp event to occur 4652 */ 4653 static int 4654 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS) 4655 { 4656 struct adapter *adapter = (struct adapter *) arg1; 4657 struct ixgbe_hw *hw = &adapter->hw; 4658 int error, fire = 0; 4659 4660 error = sysctl_handle_int(oidp, &fire, 0, req); 4661 if ((error) || (req->newptr == NULL)) 4662 return (error); 4663 4664 if (fire) { 4665 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS); 4666 reg |= IXGBE_EICR_TS; 4667 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg); 4668 } 4669 4670 return (0); 4671 } 4672 4673 /* 4674 ** Manage DMA Coalescing. 4675 ** Control values: 4676 ** 0/1 - off / on (use default value of 1000) 4677 ** 4678 ** Legal timer values are: 4679 ** 50,100,250,500,1000,2000,5000,10000 4680 ** 4681 ** Turning off interrupt moderation will also turn this off. 4682 */ 4683 static int 4684 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4685 { 4686 struct adapter *adapter = (struct adapter *) arg1; 4687 struct ixgbe_hw *hw = &adapter->hw; 4688 struct ifnet *ifp = adapter->ifp; 4689 int error; 4690 u16 oldval; 4691 4692 oldval = adapter->dmac; 4693 error = sysctl_handle_int(oidp, &adapter->dmac, 0, req); 4694 if ((error) || (req->newptr == NULL)) 4695 return (error); 4696 4697 switch (hw->mac.type) { 4698 case ixgbe_mac_X550: 4699 case ixgbe_mac_X550EM_x: 4700 break; 4701 default: 4702 device_printf(adapter->dev, 4703 "DMA Coalescing is only supported on X550 devices\n"); 4704 return (ENODEV); 4705 } 4706 4707 switch (adapter->dmac) { 4708 case 0: 4709 /* Disabled */ 4710 break; 4711 case 1: /* Enable and use default */ 4712 adapter->dmac = 1000; 4713 break; 4714 case 50: 4715 case 100: 4716 case 250: 4717 case 500: 4718 case 1000: 4719 case 2000: 4720 case 5000: 4721 case 10000: 4722 /* Legal values - allow */ 4723 break; 4724 default: 4725 /* Do nothing, illegal value */ 4726 adapter->dmac = oldval; 4727 return (EINVAL); 4728 } 4729 4730 /* Re-initialize hardware if it's already running */ 4731 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4732 ixgbe_init(adapter); 4733 4734 return (0); 4735 } 4736 4737 /* 4738 * Sysctl to enable/disable the WoL capability, if supported by the adapter. 4739 * Values: 4740 * 0 - disabled 4741 * 1 - enabled 4742 */ 4743 static int 4744 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4745 { 4746 struct adapter *adapter = (struct adapter *) arg1; 4747 struct ixgbe_hw *hw = &adapter->hw; 4748 int new_wol_enabled; 4749 int error = 0; 4750 4751 new_wol_enabled = hw->wol_enabled; 4752 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4753 if ((error) || (req->newptr == NULL)) 4754 return (error); 4755 if (new_wol_enabled == hw->wol_enabled) 4756 return (0); 4757 4758 if (new_wol_enabled > 0 && !adapter->wol_support) 4759 return (ENODEV); 4760 else 4761 hw->wol_enabled = !!(new_wol_enabled); 4762 4763 return (0); 4764 } 4765 4766 /* 4767 * Sysctl to enable/disable the Energy Efficient Ethernet capability, 4768 * if supported by the adapter. 4769 * Values: 4770 * 0 - disabled 4771 * 1 - enabled 4772 */ 4773 static int 4774 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4775 { 4776 struct adapter *adapter = (struct adapter *) arg1; 4777 struct ixgbe_hw *hw = &adapter->hw; 4778 struct ifnet *ifp = adapter->ifp; 4779 int new_eee_enabled, error = 0; 4780 4781 new_eee_enabled = adapter->eee_enabled; 4782 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req); 4783 if ((error) || (req->newptr == NULL)) 4784 return (error); 4785 if (new_eee_enabled == adapter->eee_enabled) 4786 return (0); 4787 4788 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee) 4789 return (ENODEV); 4790 else 4791 adapter->eee_enabled = !!(new_eee_enabled); 4792 4793 /* Re-initialize hardware if it's already running */ 4794 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4795 ixgbe_init(adapter); 4796 4797 return (0); 4798 } 4799 4800 /* 4801 * Read-only sysctl indicating whether EEE support was negotiated 4802 * on the link. 4803 */ 4804 static int 4805 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS) 4806 { 4807 struct adapter *adapter = (struct adapter *) arg1; 4808 struct ixgbe_hw *hw = &adapter->hw; 4809 bool status; 4810 4811 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG); 4812 4813 return (sysctl_handle_int(oidp, 0, status, req)); 4814 } 4815 4816 /* 4817 * Read-only sysctl indicating whether RX Link is in LPI state. 4818 */ 4819 static int 4820 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS) 4821 { 4822 struct adapter *adapter = (struct adapter *) arg1; 4823 struct ixgbe_hw *hw = &adapter->hw; 4824 bool status; 4825 4826 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & 4827 IXGBE_EEE_RX_LPI_STATUS); 4828 4829 return (sysctl_handle_int(oidp, 0, status, req)); 4830 } 4831 4832 /* 4833 * Read-only sysctl indicating whether TX Link is in LPI state. 4834 */ 4835 static int 4836 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS) 4837 { 4838 struct adapter *adapter = (struct adapter *) arg1; 4839 struct ixgbe_hw *hw = &adapter->hw; 4840 bool status; 4841 4842 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & 4843 IXGBE_EEE_TX_LPI_STATUS); 4844 4845 return (sysctl_handle_int(oidp, 0, status, req)); 4846 } 4847 4848 /* 4849 * Sysctl to enable/disable the types of packets that the 4850 * adapter will wake up on upon receipt. 4851 * WUFC - Wake Up Filter Control 4852 * Flags: 4853 * 0x1 - Link Status Change 4854 * 0x2 - Magic Packet 4855 * 0x4 - Direct Exact 4856 * 0x8 - Directed Multicast 4857 * 0x10 - Broadcast 4858 * 0x20 - ARP/IPv4 Request Packet 4859 * 0x40 - Direct IPv4 Packet 4860 * 0x80 - Direct IPv6 Packet 4861 * 4862 * Setting another flag will cause the sysctl to return an 4863 * error. 4864 */ 4865 static int 4866 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4867 { 4868 struct adapter *adapter = (struct adapter *) arg1; 4869 int error = 0; 4870 u32 new_wufc; 4871 4872 new_wufc = adapter->wufc; 4873 4874 error = sysctl_handle_int(oidp, &new_wufc, 0, req); 4875 if ((error) || (req->newptr == NULL)) 4876 return (error); 4877 if (new_wufc == adapter->wufc) 4878 return (0); 4879 4880 if (new_wufc & 0xffffff00) 4881 return (EINVAL); 4882 else { 4883 new_wufc &= 0xff; 4884 new_wufc |= (0xffffff & adapter->wufc); 4885 adapter->wufc = new_wufc; 4886 } 4887 4888 return (0); 4889 } 4890 4891 /* 4892 ** Enable the hardware to drop packets when the buffer is 4893 ** full. This is useful when multiqueue,so that no single 4894 ** queue being full stalls the entire RX engine. We only 4895 ** enable this when Multiqueue AND when Flow Control is 4896 ** disabled. 4897 */ 4898 static void 4899 ixgbe_enable_rx_drop(struct adapter *adapter) 4900 { 4901 struct ixgbe_hw *hw = &adapter->hw; 4902 4903 for (int i = 0; i < adapter->num_queues; i++) { 4904 struct rx_ring *rxr = &adapter->rx_rings[i]; 4905 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4906 srrctl |= IXGBE_SRRCTL_DROP_EN; 4907 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4908 } 4909 #ifdef PCI_IOV 4910 /* enable drop for each vf */ 4911 for (int i = 0; i < adapter->num_vfs; i++) { 4912 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4913 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4914 IXGBE_QDE_ENABLE)); 4915 } 4916 #endif 4917 } 4918 4919 static void 4920 ixgbe_disable_rx_drop(struct adapter *adapter) 4921 { 4922 struct ixgbe_hw *hw = &adapter->hw; 4923 4924 for (int i = 0; i < adapter->num_queues; i++) { 4925 struct rx_ring *rxr = &adapter->rx_rings[i]; 4926 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4927 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4928 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4929 } 4930 #ifdef PCI_IOV 4931 /* disable drop for each vf */ 4932 for (int i = 0; i < adapter->num_vfs; i++) { 4933 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4934 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4935 } 4936 #endif 4937 } 4938 4939 static void 4940 ixgbe_rearm_queues(struct adapter *adapter, u64 queues) 4941 { 4942 u32 mask; 4943 4944 switch (adapter->hw.mac.type) { 4945 case ixgbe_mac_82598EB: 4946 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 4947 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 4948 break; 4949 case ixgbe_mac_82599EB: 4950 case ixgbe_mac_X540: 4951 case ixgbe_mac_X550: 4952 case ixgbe_mac_X550EM_x: 4953 mask = (queues & 0xFFFFFFFF); 4954 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 4955 mask = (queues >> 32); 4956 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 4957 break; 4958 default: 4959 break; 4960 } 4961 } 4962 4963 #ifdef PCI_IOV 4964 4965 /* 4966 ** Support functions for SRIOV/VF management 4967 */ 4968 4969 static void 4970 ixgbe_ping_all_vfs(struct adapter *adapter) 4971 { 4972 struct ixgbe_vf *vf; 4973 4974 for (int i = 0; i < adapter->num_vfs; i++) { 4975 vf = &adapter->vfs[i]; 4976 if (vf->flags & IXGBE_VF_ACTIVE) 4977 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); 4978 } 4979 } 4980 4981 4982 static void 4983 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf, 4984 uint16_t tag) 4985 { 4986 struct ixgbe_hw *hw; 4987 uint32_t vmolr, vmvir; 4988 4989 hw = &adapter->hw; 4990 4991 vf->vlan_tag = tag; 4992 4993 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); 4994 4995 /* Do not receive packets that pass inexact filters. */ 4996 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); 4997 4998 /* Disable Multicast Promicuous Mode. */ 4999 vmolr &= ~IXGBE_VMOLR_MPE; 5000 5001 /* Accept broadcasts. */ 5002 vmolr |= IXGBE_VMOLR_BAM; 5003 5004 if (tag == 0) { 5005 /* Accept non-vlan tagged traffic. */ 5006 //vmolr |= IXGBE_VMOLR_AUPE; 5007 5008 /* Allow VM to tag outgoing traffic; no default tag. */ 5009 vmvir = 0; 5010 } else { 5011 /* Require vlan-tagged traffic. */ 5012 vmolr &= ~IXGBE_VMOLR_AUPE; 5013 5014 /* Tag all traffic with provided vlan tag. */ 5015 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); 5016 } 5017 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); 5018 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); 5019 } 5020 5021 5022 static boolean_t 5023 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf) 5024 { 5025 5026 /* 5027 * Frame size compatibility between PF and VF is only a problem on 5028 * 82599-based cards. X540 and later support any combination of jumbo 5029 * frames on PFs and VFs. 5030 */ 5031 if (adapter->hw.mac.type != ixgbe_mac_82599EB) 5032 return (TRUE); 5033 5034 switch (vf->api_ver) { 5035 case IXGBE_API_VER_1_0: 5036 case IXGBE_API_VER_UNKNOWN: 5037 /* 5038 * On legacy (1.0 and older) VF versions, we don't support jumbo 5039 * frames on either the PF or the VF. 5040 */ 5041 if (adapter->max_frame_size > ETHER_MAX_LEN || 5042 vf->max_frame_size > ETHER_MAX_LEN) 5043 return (FALSE); 5044 5045 return (TRUE); 5046 5047 break; 5048 case IXGBE_API_VER_1_1: 5049 default: 5050 /* 5051 * 1.1 or later VF versions always work if they aren't using 5052 * jumbo frames. 5053 */ 5054 if (vf->max_frame_size <= ETHER_MAX_LEN) 5055 return (TRUE); 5056 5057 /* 5058 * Jumbo frames only work with VFs if the PF is also using jumbo 5059 * frames. 5060 */ 5061 if (adapter->max_frame_size <= ETHER_MAX_LEN) 5062 return (TRUE); 5063 5064 return (FALSE); 5065 5066 } 5067 } 5068 5069 5070 static void 5071 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf) 5072 { 5073 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan); 5074 5075 // XXX clear multicast addresses 5076 5077 ixgbe_clear_rar(&adapter->hw, vf->rar_index); 5078 5079 vf->api_ver = IXGBE_API_VER_UNKNOWN; 5080 } 5081 5082 5083 static void 5084 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf) 5085 { 5086 struct ixgbe_hw *hw; 5087 uint32_t vf_index, vfte; 5088 5089 hw = &adapter->hw; 5090 5091 vf_index = IXGBE_VF_INDEX(vf->pool); 5092 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); 5093 vfte |= IXGBE_VF_BIT(vf->pool); 5094 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); 5095 } 5096 5097 5098 static void 5099 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf) 5100 { 5101 struct ixgbe_hw *hw; 5102 uint32_t vf_index, vfre; 5103 5104 hw = &adapter->hw; 5105 5106 vf_index = IXGBE_VF_INDEX(vf->pool); 5107 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); 5108 if (ixgbe_vf_frame_size_compatible(adapter, vf)) 5109 vfre |= IXGBE_VF_BIT(vf->pool); 5110 else 5111 vfre &= ~IXGBE_VF_BIT(vf->pool); 5112 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); 5113 } 5114 5115 5116 static void 5117 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5118 { 5119 struct ixgbe_hw *hw; 5120 uint32_t ack; 5121 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; 5122 5123 hw = &adapter->hw; 5124 5125 ixgbe_process_vf_reset(adapter, vf); 5126 5127 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { 5128 ixgbe_set_rar(&adapter->hw, vf->rar_index, 5129 vf->ether_addr, vf->pool, TRUE); 5130 ack = IXGBE_VT_MSGTYPE_ACK; 5131 } else 5132 ack = IXGBE_VT_MSGTYPE_NACK; 5133 5134 ixgbe_vf_enable_transmit(adapter, vf); 5135 ixgbe_vf_enable_receive(adapter, vf); 5136 5137 vf->flags |= IXGBE_VF_CTS; 5138 5139 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS; 5140 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); 5141 resp[3] = hw->mac.mc_filter_type; 5142 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); 5143 } 5144 5145 5146 static void 5147 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5148 { 5149 uint8_t *mac; 5150 5151 mac = (uint8_t*)&msg[1]; 5152 5153 /* Check that the VF has permission to change the MAC address. */ 5154 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { 5155 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5156 return; 5157 } 5158 5159 if (ixgbe_validate_mac_addr(mac) != 0) { 5160 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5161 return; 5162 } 5163 5164 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); 5165 5166 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, 5167 vf->pool, TRUE); 5168 5169 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5170 } 5171 5172 5173 /* 5174 ** VF multicast addresses are set by using the appropriate bit in 5175 ** 1 of 128 32 bit addresses (4096 possible). 5176 */ 5177 static void 5178 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg) 5179 { 5180 u16 *list = (u16*)&msg[1]; 5181 int entries; 5182 u32 vmolr, vec_bit, vec_reg, mta_reg; 5183 5184 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; 5185 entries = min(entries, IXGBE_MAX_VF_MC); 5186 5187 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool)); 5188 5189 vf->num_mc_hashes = entries; 5190 5191 /* Set the appropriate MTA bit */ 5192 for (int i = 0; i < entries; i++) { 5193 vf->mc_hash[i] = list[i]; 5194 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; 5195 vec_bit = vf->mc_hash[i] & 0x1F; 5196 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg)); 5197 mta_reg |= (1 << vec_bit); 5198 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg); 5199 } 5200 5201 vmolr |= IXGBE_VMOLR_ROMPE; 5202 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr); 5203 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5204 return; 5205 } 5206 5207 5208 static void 5209 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5210 { 5211 struct ixgbe_hw *hw; 5212 int enable; 5213 uint16_t tag; 5214 5215 hw = &adapter->hw; 5216 enable = IXGBE_VT_MSGINFO(msg[0]); 5217 tag = msg[1] & IXGBE_VLVF_VLANID_MASK; 5218 5219 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { 5220 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5221 return; 5222 } 5223 5224 /* It is illegal to enable vlan tag 0. */ 5225 if (tag == 0 && enable != 0){ 5226 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5227 return; 5228 } 5229 5230 ixgbe_set_vfta(hw, tag, vf->pool, enable); 5231 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5232 } 5233 5234 5235 static void 5236 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5237 { 5238 struct ixgbe_hw *hw; 5239 uint32_t vf_max_size, pf_max_size, mhadd; 5240 5241 hw = &adapter->hw; 5242 vf_max_size = msg[1]; 5243 5244 if (vf_max_size < ETHER_CRC_LEN) { 5245 /* We intentionally ACK invalid LPE requests. */ 5246 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5247 return; 5248 } 5249 5250 vf_max_size -= ETHER_CRC_LEN; 5251 5252 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { 5253 /* We intentionally ACK invalid LPE requests. */ 5254 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5255 return; 5256 } 5257 5258 vf->max_frame_size = vf_max_size; 5259 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5260 5261 /* 5262 * We might have to disable reception to this VF if the frame size is 5263 * not compatible with the config on the PF. 5264 */ 5265 ixgbe_vf_enable_receive(adapter, vf); 5266 5267 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 5268 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; 5269 5270 if (pf_max_size < adapter->max_frame_size) { 5271 mhadd &= ~IXGBE_MHADD_MFS_MASK; 5272 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 5273 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 5274 } 5275 5276 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5277 } 5278 5279 5280 static void 5281 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf, 5282 uint32_t *msg) 5283 { 5284 //XXX implement this 5285 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5286 } 5287 5288 5289 static void 5290 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf, 5291 uint32_t *msg) 5292 { 5293 5294 switch (msg[1]) { 5295 case IXGBE_API_VER_1_0: 5296 case IXGBE_API_VER_1_1: 5297 vf->api_ver = msg[1]; 5298 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5299 break; 5300 default: 5301 vf->api_ver = IXGBE_API_VER_UNKNOWN; 5302 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5303 break; 5304 } 5305 } 5306 5307 5308 static void 5309 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, 5310 uint32_t *msg) 5311 { 5312 struct ixgbe_hw *hw; 5313 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; 5314 int num_queues; 5315 5316 hw = &adapter->hw; 5317 5318 /* GET_QUEUES is not supported on pre-1.1 APIs. */ 5319 switch (msg[0]) { 5320 case IXGBE_API_VER_1_0: 5321 case IXGBE_API_VER_UNKNOWN: 5322 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5323 return; 5324 } 5325 5326 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | 5327 IXGBE_VT_MSGTYPE_CTS; 5328 5329 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter)); 5330 resp[IXGBE_VF_TX_QUEUES] = num_queues; 5331 resp[IXGBE_VF_RX_QUEUES] = num_queues; 5332 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); 5333 resp[IXGBE_VF_DEF_QUEUE] = 0; 5334 5335 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); 5336 } 5337 5338 5339 static void 5340 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf) 5341 { 5342 struct ixgbe_hw *hw; 5343 uint32_t msg[IXGBE_VFMAILBOX_SIZE]; 5344 int error; 5345 5346 hw = &adapter->hw; 5347 5348 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); 5349 5350 if (error != 0) 5351 return; 5352 5353 CTR3(KTR_MALLOC, "%s: received msg %x from %d", 5354 adapter->ifp->if_xname, msg[0], vf->pool); 5355 if (msg[0] == IXGBE_VF_RESET) { 5356 ixgbe_vf_reset_msg(adapter, vf, msg); 5357 return; 5358 } 5359 5360 if (!(vf->flags & IXGBE_VF_CTS)) { 5361 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5362 return; 5363 } 5364 5365 switch (msg[0] & IXGBE_VT_MSG_MASK) { 5366 case IXGBE_VF_SET_MAC_ADDR: 5367 ixgbe_vf_set_mac(adapter, vf, msg); 5368 break; 5369 case IXGBE_VF_SET_MULTICAST: 5370 ixgbe_vf_set_mc_addr(adapter, vf, msg); 5371 break; 5372 case IXGBE_VF_SET_VLAN: 5373 ixgbe_vf_set_vlan(adapter, vf, msg); 5374 break; 5375 case IXGBE_VF_SET_LPE: 5376 ixgbe_vf_set_lpe(adapter, vf, msg); 5377 break; 5378 case IXGBE_VF_SET_MACVLAN: 5379 ixgbe_vf_set_macvlan(adapter, vf, msg); 5380 break; 5381 case IXGBE_VF_API_NEGOTIATE: 5382 ixgbe_vf_api_negotiate(adapter, vf, msg); 5383 break; 5384 case IXGBE_VF_GET_QUEUES: 5385 ixgbe_vf_get_queues(adapter, vf, msg); 5386 break; 5387 default: 5388 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5389 } 5390 } 5391 5392 5393 /* 5394 * Tasklet for handling VF -> PF mailbox messages. 5395 */ 5396 static void 5397 ixgbe_handle_mbx(void *context, int pending) 5398 { 5399 struct adapter *adapter; 5400 struct ixgbe_hw *hw; 5401 struct ixgbe_vf *vf; 5402 int i; 5403 5404 adapter = context; 5405 hw = &adapter->hw; 5406 5407 IXGBE_CORE_LOCK(adapter); 5408 for (i = 0; i < adapter->num_vfs; i++) { 5409 vf = &adapter->vfs[i]; 5410 5411 if (vf->flags & IXGBE_VF_ACTIVE) { 5412 if (ixgbe_check_for_rst(hw, vf->pool) == 0) 5413 ixgbe_process_vf_reset(adapter, vf); 5414 5415 if (ixgbe_check_for_msg(hw, vf->pool) == 0) 5416 ixgbe_process_vf_msg(adapter, vf); 5417 5418 if (ixgbe_check_for_ack(hw, vf->pool) == 0) 5419 ixgbe_process_vf_ack(adapter, vf); 5420 } 5421 } 5422 IXGBE_CORE_UNLOCK(adapter); 5423 } 5424 5425 5426 static int 5427 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config) 5428 { 5429 struct adapter *adapter; 5430 enum ixgbe_iov_mode mode; 5431 5432 adapter = device_get_softc(dev); 5433 adapter->num_vfs = num_vfs; 5434 mode = ixgbe_get_iov_mode(adapter); 5435 5436 if (num_vfs > ixgbe_max_vfs(mode)) { 5437 adapter->num_vfs = 0; 5438 return (ENOSPC); 5439 } 5440 5441 IXGBE_CORE_LOCK(adapter); 5442 5443 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE, 5444 M_NOWAIT | M_ZERO); 5445 5446 if (adapter->vfs == NULL) { 5447 adapter->num_vfs = 0; 5448 IXGBE_CORE_UNLOCK(adapter); 5449 return (ENOMEM); 5450 } 5451 5452 ixgbe_init_locked(adapter); 5453 5454 IXGBE_CORE_UNLOCK(adapter); 5455 5456 return (0); 5457 } 5458 5459 5460 static void 5461 ixgbe_uninit_iov(device_t dev) 5462 { 5463 struct ixgbe_hw *hw; 5464 struct adapter *adapter; 5465 uint32_t pf_reg, vf_reg; 5466 5467 adapter = device_get_softc(dev); 5468 hw = &adapter->hw; 5469 5470 IXGBE_CORE_LOCK(adapter); 5471 5472 /* Enable rx/tx for the PF and disable it for all VFs. */ 5473 pf_reg = IXGBE_VF_INDEX(adapter->pool); 5474 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), 5475 IXGBE_VF_BIT(adapter->pool)); 5476 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), 5477 IXGBE_VF_BIT(adapter->pool)); 5478 5479 if (pf_reg == 0) 5480 vf_reg = 1; 5481 else 5482 vf_reg = 0; 5483 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); 5484 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); 5485 5486 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); 5487 5488 free(adapter->vfs, M_IXGBE); 5489 adapter->vfs = NULL; 5490 adapter->num_vfs = 0; 5491 5492 IXGBE_CORE_UNLOCK(adapter); 5493 } 5494 5495 5496 static void 5497 ixgbe_initialize_iov(struct adapter *adapter) 5498 { 5499 struct ixgbe_hw *hw = &adapter->hw; 5500 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; 5501 enum ixgbe_iov_mode mode; 5502 int i; 5503 5504 mode = ixgbe_get_iov_mode(adapter); 5505 if (mode == IXGBE_NO_VM) 5506 return; 5507 5508 IXGBE_CORE_LOCK_ASSERT(adapter); 5509 5510 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 5511 mrqc &= ~IXGBE_MRQC_MRQE_MASK; 5512 5513 switch (mode) { 5514 case IXGBE_64_VM: 5515 mrqc |= IXGBE_MRQC_VMDQRSS64EN; 5516 break; 5517 case IXGBE_32_VM: 5518 mrqc |= IXGBE_MRQC_VMDQRSS32EN; 5519 break; 5520 default: 5521 panic("Unexpected SR-IOV mode %d", mode); 5522 } 5523 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 5524 5525 mtqc = IXGBE_MTQC_VT_ENA; 5526 switch (mode) { 5527 case IXGBE_64_VM: 5528 mtqc |= IXGBE_MTQC_64VF; 5529 break; 5530 case IXGBE_32_VM: 5531 mtqc |= IXGBE_MTQC_32VF; 5532 break; 5533 default: 5534 panic("Unexpected SR-IOV mode %d", mode); 5535 } 5536 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); 5537 5538 5539 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 5540 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; 5541 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; 5542 switch (mode) { 5543 case IXGBE_64_VM: 5544 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; 5545 break; 5546 case IXGBE_32_VM: 5547 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; 5548 break; 5549 default: 5550 panic("Unexpected SR-IOV mode %d", mode); 5551 } 5552 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 5553 5554 5555 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5556 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK; 5557 switch (mode) { 5558 case IXGBE_64_VM: 5559 gpie |= IXGBE_GPIE_VTMODE_64; 5560 break; 5561 case IXGBE_32_VM: 5562 gpie |= IXGBE_GPIE_VTMODE_32; 5563 break; 5564 default: 5565 panic("Unexpected SR-IOV mode %d", mode); 5566 } 5567 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5568 5569 /* Enable rx/tx for the PF. */ 5570 vf_reg = IXGBE_VF_INDEX(adapter->pool); 5571 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 5572 IXGBE_VF_BIT(adapter->pool)); 5573 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 5574 IXGBE_VF_BIT(adapter->pool)); 5575 5576 /* Allow VM-to-VM communication. */ 5577 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 5578 5579 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 5580 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT); 5581 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); 5582 5583 for (i = 0; i < adapter->num_vfs; i++) 5584 ixgbe_init_vf(adapter, &adapter->vfs[i]); 5585 } 5586 5587 5588 /* 5589 ** Check the max frame setting of all active VF's 5590 */ 5591 static void 5592 ixgbe_recalculate_max_frame(struct adapter *adapter) 5593 { 5594 struct ixgbe_vf *vf; 5595 5596 IXGBE_CORE_LOCK_ASSERT(adapter); 5597 5598 for (int i = 0; i < adapter->num_vfs; i++) { 5599 vf = &adapter->vfs[i]; 5600 if (vf->flags & IXGBE_VF_ACTIVE) 5601 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5602 } 5603 } 5604 5605 5606 static void 5607 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf) 5608 { 5609 struct ixgbe_hw *hw; 5610 uint32_t vf_index, pfmbimr; 5611 5612 IXGBE_CORE_LOCK_ASSERT(adapter); 5613 5614 hw = &adapter->hw; 5615 5616 if (!(vf->flags & IXGBE_VF_ACTIVE)) 5617 return; 5618 5619 vf_index = IXGBE_VF_INDEX(vf->pool); 5620 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); 5621 pfmbimr |= IXGBE_VF_BIT(vf->pool); 5622 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); 5623 5624 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag); 5625 5626 // XXX multicast addresses 5627 5628 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { 5629 ixgbe_set_rar(&adapter->hw, vf->rar_index, 5630 vf->ether_addr, vf->pool, TRUE); 5631 } 5632 5633 ixgbe_vf_enable_transmit(adapter, vf); 5634 ixgbe_vf_enable_receive(adapter, vf); 5635 5636 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); 5637 } 5638 5639 static int 5640 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config) 5641 { 5642 struct adapter *adapter; 5643 struct ixgbe_vf *vf; 5644 const void *mac; 5645 5646 adapter = device_get_softc(dev); 5647 5648 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d", 5649 vfnum, adapter->num_vfs)); 5650 5651 IXGBE_CORE_LOCK(adapter); 5652 vf = &adapter->vfs[vfnum]; 5653 vf->pool= vfnum; 5654 5655 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ 5656 vf->rar_index = vfnum + 1; 5657 vf->default_vlan = 0; 5658 vf->max_frame_size = ETHER_MAX_LEN; 5659 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5660 5661 if (nvlist_exists_binary(config, "mac-addr")) { 5662 mac = nvlist_get_binary(config, "mac-addr", NULL); 5663 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); 5664 if (nvlist_get_bool(config, "allow-set-mac")) 5665 vf->flags |= IXGBE_VF_CAP_MAC; 5666 } else 5667 /* 5668 * If the administrator has not specified a MAC address then 5669 * we must allow the VF to choose one. 5670 */ 5671 vf->flags |= IXGBE_VF_CAP_MAC; 5672 5673 vf->flags = IXGBE_VF_ACTIVE; 5674 5675 ixgbe_init_vf(adapter, vf); 5676 IXGBE_CORE_UNLOCK(adapter); 5677 5678 return (0); 5679 } 5680 #endif /* PCI_IOV */ 5681 5682