1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_rss.h" 40 #endif 41 42 #include "ixgbe.h" 43 44 #ifdef RSS 45 #include <net/rss_config.h> 46 #include <netinet/in_rss.h> 47 #endif 48 49 /********************************************************************* 50 * Driver version 51 *********************************************************************/ 52 char ixgbe_driver_version[] = "3.1.13-k"; 53 54 55 /********************************************************************* 56 * PCI Device ID Table 57 * 58 * Used by probe to select devices to load on 59 * Last field stores an index into ixgbe_strings 60 * Last entry must be all 0s 61 * 62 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 63 *********************************************************************/ 64 65 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 66 { 67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 99 /* required last entry */ 100 {0, 0, 0, 0, 0} 101 }; 102 103 /********************************************************************* 104 * Table of branding strings 105 *********************************************************************/ 106 107 static char *ixgbe_strings[] = { 108 "Intel(R) PRO/10GbE PCI-Express Network Driver" 109 }; 110 111 /********************************************************************* 112 * Function prototypes 113 *********************************************************************/ 114 static int ixgbe_probe(device_t); 115 static int ixgbe_attach(device_t); 116 static int ixgbe_detach(device_t); 117 static int ixgbe_shutdown(device_t); 118 static int ixgbe_suspend(device_t); 119 static int ixgbe_resume(device_t); 120 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); 121 static void ixgbe_init(void *); 122 static void ixgbe_init_locked(struct adapter *); 123 static void ixgbe_stop(void *); 124 #if __FreeBSD_version >= 1100036 125 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter); 126 #endif 127 static void ixgbe_add_media_types(struct adapter *); 128 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 129 static int ixgbe_media_change(struct ifnet *); 130 static void ixgbe_identify_hardware(struct adapter *); 131 static int ixgbe_allocate_pci_resources(struct adapter *); 132 static void ixgbe_get_slot_info(struct adapter *); 133 static int ixgbe_allocate_msix(struct adapter *); 134 static int ixgbe_allocate_legacy(struct adapter *); 135 static int ixgbe_setup_msix(struct adapter *); 136 static void ixgbe_free_pci_resources(struct adapter *); 137 static void ixgbe_local_timer(void *); 138 static int ixgbe_setup_interface(device_t, struct adapter *); 139 static void ixgbe_config_gpie(struct adapter *); 140 static void ixgbe_config_dmac(struct adapter *); 141 static void ixgbe_config_delay_values(struct adapter *); 142 static void ixgbe_config_link(struct adapter *); 143 static void ixgbe_check_wol_support(struct adapter *); 144 static int ixgbe_setup_low_power_mode(struct adapter *); 145 static void ixgbe_rearm_queues(struct adapter *, u64); 146 147 static void ixgbe_initialize_transmit_units(struct adapter *); 148 static void ixgbe_initialize_receive_units(struct adapter *); 149 static void ixgbe_enable_rx_drop(struct adapter *); 150 static void ixgbe_disable_rx_drop(struct adapter *); 151 static void ixgbe_initialize_rss_mapping(struct adapter *); 152 153 static void ixgbe_enable_intr(struct adapter *); 154 static void ixgbe_disable_intr(struct adapter *); 155 static void ixgbe_update_stats_counters(struct adapter *); 156 static void ixgbe_set_promisc(struct adapter *); 157 static void ixgbe_set_multi(struct adapter *); 158 static void ixgbe_update_link_status(struct adapter *); 159 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 160 static void ixgbe_configure_ivars(struct adapter *); 161 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 162 163 static void ixgbe_setup_vlan_hw_support(struct adapter *); 164 static void ixgbe_register_vlan(void *, struct ifnet *, u16); 165 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16); 166 167 static void ixgbe_add_device_sysctls(struct adapter *); 168 static void ixgbe_add_hw_stats(struct adapter *); 169 static int ixgbe_set_flowcntl(struct adapter *, int); 170 static int ixgbe_set_advertise(struct adapter *, int); 171 172 /* Sysctl handlers */ 173 static void ixgbe_set_sysctl_value(struct adapter *, const char *, 174 const char *, int *, int); 175 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 176 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 177 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS); 178 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 179 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 180 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 181 #ifdef IXGBE_DEBUG 182 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 183 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 184 #endif 185 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 186 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 187 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 188 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS); 192 193 /* Support for pluggable optic modules */ 194 static bool ixgbe_sfp_probe(struct adapter *); 195 static void ixgbe_setup_optics(struct adapter *); 196 197 /* Legacy (single vector interrupt handler */ 198 static void ixgbe_legacy_irq(void *); 199 200 /* The MSI/X Interrupt handlers */ 201 static void ixgbe_msix_que(void *); 202 static void ixgbe_msix_link(void *); 203 204 /* Deferred interrupt tasklets */ 205 static void ixgbe_handle_que(void *, int); 206 static void ixgbe_handle_link(void *, int); 207 static void ixgbe_handle_msf(void *, int); 208 static void ixgbe_handle_mod(void *, int); 209 static void ixgbe_handle_phy(void *, int); 210 211 #ifdef IXGBE_FDIR 212 static void ixgbe_reinit_fdir(void *, int); 213 #endif 214 215 #ifdef PCI_IOV 216 static void ixgbe_ping_all_vfs(struct adapter *); 217 static void ixgbe_handle_mbx(void *, int); 218 static int ixgbe_init_iov(device_t, u16, const nvlist_t *); 219 static void ixgbe_uninit_iov(device_t); 220 static int ixgbe_add_vf(device_t, u16, const nvlist_t *); 221 static void ixgbe_initialize_iov(struct adapter *); 222 static void ixgbe_recalculate_max_frame(struct adapter *); 223 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *); 224 #endif /* PCI_IOV */ 225 226 227 /********************************************************************* 228 * FreeBSD Device Interface Entry Points 229 *********************************************************************/ 230 231 static device_method_t ix_methods[] = { 232 /* Device interface */ 233 DEVMETHOD(device_probe, ixgbe_probe), 234 DEVMETHOD(device_attach, ixgbe_attach), 235 DEVMETHOD(device_detach, ixgbe_detach), 236 DEVMETHOD(device_shutdown, ixgbe_shutdown), 237 DEVMETHOD(device_suspend, ixgbe_suspend), 238 DEVMETHOD(device_resume, ixgbe_resume), 239 #ifdef PCI_IOV 240 DEVMETHOD(pci_iov_init, ixgbe_init_iov), 241 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov), 242 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf), 243 #endif /* PCI_IOV */ 244 DEVMETHOD_END 245 }; 246 247 static driver_t ix_driver = { 248 "ix", ix_methods, sizeof(struct adapter), 249 }; 250 251 devclass_t ix_devclass; 252 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 253 254 MODULE_DEPEND(ix, pci, 1, 1, 1); 255 MODULE_DEPEND(ix, ether, 1, 1, 1); 256 #ifdef DEV_NETMAP 257 MODULE_DEPEND(ix, netmap, 1, 1, 1); 258 #endif /* DEV_NETMAP */ 259 260 /* 261 ** TUNEABLE PARAMETERS: 262 */ 263 264 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, 265 "IXGBE driver parameters"); 266 267 /* 268 ** AIM: Adaptive Interrupt Moderation 269 ** which means that the interrupt rate 270 ** is varied over time based on the 271 ** traffic for that interrupt vector 272 */ 273 static int ixgbe_enable_aim = TRUE; 274 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 275 "Enable adaptive interrupt moderation"); 276 277 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 278 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 279 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 280 281 /* How many packets rxeof tries to clean at a time */ 282 static int ixgbe_rx_process_limit = 256; 283 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 284 &ixgbe_rx_process_limit, 0, 285 "Maximum number of received packets to process at a time," 286 "-1 means unlimited"); 287 288 /* How many packets txeof tries to clean at a time */ 289 static int ixgbe_tx_process_limit = 256; 290 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 291 &ixgbe_tx_process_limit, 0, 292 "Maximum number of sent packets to process at a time," 293 "-1 means unlimited"); 294 295 /* Flow control setting, default to full */ 296 static int ixgbe_flow_control = ixgbe_fc_full; 297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 298 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 299 300 /* Advertise Speed, default to 0 (auto) */ 301 static int ixgbe_advertise_speed = 0; 302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 304 305 /* 306 ** Smart speed setting, default to on 307 ** this only works as a compile option 308 ** right now as its during attach, set 309 ** this to 'ixgbe_smart_speed_off' to 310 ** disable. 311 */ 312 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 313 314 /* 315 * MSIX should be the default for best performance, 316 * but this allows it to be forced off for testing. 317 */ 318 static int ixgbe_enable_msix = 1; 319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 320 "Enable MSI-X interrupts"); 321 322 /* 323 * Number of Queues, can be set to 0, 324 * it then autoconfigures based on the 325 * number of cpus with a max of 8. This 326 * can be overriden manually here. 327 */ 328 static int ixgbe_num_queues = 0; 329 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 330 "Number of queues to configure, 0 indicates autoconfigure"); 331 332 /* 333 ** Number of TX descriptors per ring, 334 ** setting higher than RX as this seems 335 ** the better performing choice. 336 */ 337 static int ixgbe_txd = PERFORM_TXD; 338 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 339 "Number of transmit descriptors per queue"); 340 341 /* Number of RX descriptors per ring */ 342 static int ixgbe_rxd = PERFORM_RXD; 343 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 344 "Number of receive descriptors per queue"); 345 346 /* 347 ** Defining this on will allow the use 348 ** of unsupported SFP+ modules, note that 349 ** doing so you are on your own :) 350 */ 351 static int allow_unsupported_sfp = FALSE; 352 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 353 354 /* Keep running tab on them for sanity check */ 355 static int ixgbe_total_ports; 356 357 #ifdef IXGBE_FDIR 358 /* 359 ** Flow Director actually 'steals' 360 ** part of the packet buffer as its 361 ** filter pool, this variable controls 362 ** how much it uses: 363 ** 0 = 64K, 1 = 128K, 2 = 256K 364 */ 365 static int fdir_pballoc = 1; 366 #endif 367 368 #ifdef DEV_NETMAP 369 /* 370 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to 371 * be a reference on how to implement netmap support in a driver. 372 * Additional comments are in ixgbe_netmap.h . 373 * 374 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support 375 * that extend the standard driver. 376 */ 377 #include <dev/netmap/ixgbe_netmap.h> 378 #endif /* DEV_NETMAP */ 379 380 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 381 382 /********************************************************************* 383 * Device identification routine 384 * 385 * ixgbe_probe determines if the driver should be loaded on 386 * adapter based on PCI vendor/device id of the adapter. 387 * 388 * return BUS_PROBE_DEFAULT on success, positive on failure 389 *********************************************************************/ 390 391 static int 392 ixgbe_probe(device_t dev) 393 { 394 ixgbe_vendor_info_t *ent; 395 396 u16 pci_vendor_id = 0; 397 u16 pci_device_id = 0; 398 u16 pci_subvendor_id = 0; 399 u16 pci_subdevice_id = 0; 400 char adapter_name[256]; 401 402 INIT_DEBUGOUT("ixgbe_probe: begin"); 403 404 pci_vendor_id = pci_get_vendor(dev); 405 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 406 return (ENXIO); 407 408 pci_device_id = pci_get_device(dev); 409 pci_subvendor_id = pci_get_subvendor(dev); 410 pci_subdevice_id = pci_get_subdevice(dev); 411 412 ent = ixgbe_vendor_info_array; 413 while (ent->vendor_id != 0) { 414 if ((pci_vendor_id == ent->vendor_id) && 415 (pci_device_id == ent->device_id) && 416 417 ((pci_subvendor_id == ent->subvendor_id) || 418 (ent->subvendor_id == 0)) && 419 420 ((pci_subdevice_id == ent->subdevice_id) || 421 (ent->subdevice_id == 0))) { 422 sprintf(adapter_name, "%s, Version - %s", 423 ixgbe_strings[ent->index], 424 ixgbe_driver_version); 425 device_set_desc_copy(dev, adapter_name); 426 ++ixgbe_total_ports; 427 return (BUS_PROBE_DEFAULT); 428 } 429 ent++; 430 } 431 return (ENXIO); 432 } 433 434 /********************************************************************* 435 * Device initialization routine 436 * 437 * The attach entry point is called when the driver is being loaded. 438 * This routine identifies the type of hardware, allocates all resources 439 * and initializes the hardware. 440 * 441 * return 0 on success, positive on failure 442 *********************************************************************/ 443 444 static int 445 ixgbe_attach(device_t dev) 446 { 447 struct adapter *adapter; 448 struct ixgbe_hw *hw; 449 int error = 0; 450 u16 csum; 451 u32 ctrl_ext; 452 453 INIT_DEBUGOUT("ixgbe_attach: begin"); 454 455 /* Allocate, clear, and link in our adapter structure */ 456 adapter = device_get_softc(dev); 457 adapter->dev = dev; 458 hw = &adapter->hw; 459 460 #ifdef DEV_NETMAP 461 adapter->init_locked = ixgbe_init_locked; 462 adapter->stop_locked = ixgbe_stop; 463 #endif 464 465 /* Core Lock Init*/ 466 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 467 468 /* Set up the timer callout */ 469 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 470 471 /* Determine hardware revision */ 472 ixgbe_identify_hardware(adapter); 473 474 /* Do base PCI setup - map BAR0 */ 475 if (ixgbe_allocate_pci_resources(adapter)) { 476 device_printf(dev, "Allocation of PCI resources failed\n"); 477 error = ENXIO; 478 goto err_out; 479 } 480 481 /* Sysctls for limiting the amount of work done in the taskqueues */ 482 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 483 "max number of rx packets to process", 484 &adapter->rx_process_limit, ixgbe_rx_process_limit); 485 486 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 487 "max number of tx packets to process", 488 &adapter->tx_process_limit, ixgbe_tx_process_limit); 489 490 /* Do descriptor calc and sanity checks */ 491 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 492 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 493 device_printf(dev, "TXD config issue, using default!\n"); 494 adapter->num_tx_desc = DEFAULT_TXD; 495 } else 496 adapter->num_tx_desc = ixgbe_txd; 497 498 /* 499 ** With many RX rings it is easy to exceed the 500 ** system mbuf allocation. Tuning nmbclusters 501 ** can alleviate this. 502 */ 503 if (nmbclusters > 0) { 504 int s; 505 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports; 506 if (s > nmbclusters) { 507 device_printf(dev, "RX Descriptors exceed " 508 "system mbuf max, using default instead!\n"); 509 ixgbe_rxd = DEFAULT_RXD; 510 } 511 } 512 513 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 514 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 515 device_printf(dev, "RXD config issue, using default!\n"); 516 adapter->num_rx_desc = DEFAULT_RXD; 517 } else 518 adapter->num_rx_desc = ixgbe_rxd; 519 520 /* Allocate our TX/RX Queues */ 521 if (ixgbe_allocate_queues(adapter)) { 522 error = ENOMEM; 523 goto err_out; 524 } 525 526 /* Allocate multicast array memory. */ 527 adapter->mta = malloc(sizeof(*adapter->mta) * 528 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 529 if (adapter->mta == NULL) { 530 device_printf(dev, "Can not allocate multicast setup array\n"); 531 error = ENOMEM; 532 goto err_late; 533 } 534 535 /* Initialize the shared code */ 536 hw->allow_unsupported_sfp = allow_unsupported_sfp; 537 error = ixgbe_init_shared_code(hw); 538 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 539 /* 540 ** No optics in this port, set up 541 ** so the timer routine will probe 542 ** for later insertion. 543 */ 544 adapter->sfp_probe = TRUE; 545 error = 0; 546 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 547 device_printf(dev, "Unsupported SFP+ module detected!\n"); 548 error = EIO; 549 goto err_late; 550 } else if (error) { 551 device_printf(dev, "Unable to initialize the shared code\n"); 552 error = EIO; 553 goto err_late; 554 } 555 556 /* Make sure we have a good EEPROM before we read from it */ 557 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) { 558 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 559 error = EIO; 560 goto err_late; 561 } 562 563 error = ixgbe_init_hw(hw); 564 switch (error) { 565 case IXGBE_ERR_EEPROM_VERSION: 566 device_printf(dev, "This device is a pre-production adapter/" 567 "LOM. Please be aware there may be issues associated " 568 "with your hardware.\nIf you are experiencing problems " 569 "please contact your Intel or hardware representative " 570 "who provided you with this hardware.\n"); 571 break; 572 case IXGBE_ERR_SFP_NOT_SUPPORTED: 573 device_printf(dev, "Unsupported SFP+ Module\n"); 574 error = EIO; 575 goto err_late; 576 case IXGBE_ERR_SFP_NOT_PRESENT: 577 device_printf(dev, "No SFP+ Module found\n"); 578 /* falls thru */ 579 default: 580 break; 581 } 582 583 /* hw.ix defaults init */ 584 ixgbe_set_advertise(adapter, ixgbe_advertise_speed); 585 ixgbe_set_flowcntl(adapter, ixgbe_flow_control); 586 adapter->enable_aim = ixgbe_enable_aim; 587 588 if ((adapter->msix > 1) && (ixgbe_enable_msix)) 589 error = ixgbe_allocate_msix(adapter); 590 else 591 error = ixgbe_allocate_legacy(adapter); 592 if (error) 593 goto err_late; 594 595 /* Enable the optics for 82599 SFP+ fiber */ 596 ixgbe_enable_tx_laser(hw); 597 598 /* Enable power to the phy. */ 599 ixgbe_set_phy_power(hw, TRUE); 600 601 /* Setup OS specific network interface */ 602 if (ixgbe_setup_interface(dev, adapter) != 0) 603 goto err_late; 604 605 /* Initialize statistics */ 606 ixgbe_update_stats_counters(adapter); 607 608 /* Register for VLAN events */ 609 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 610 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 611 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 612 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 613 614 /* Check PCIE slot type/speed/width */ 615 ixgbe_get_slot_info(adapter); 616 617 /* Set an initial default flow control & dmac value */ 618 adapter->fc = ixgbe_fc_full; 619 adapter->dmac = 0; 620 adapter->eee_enabled = 0; 621 622 #ifdef PCI_IOV 623 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) { 624 nvlist_t *pf_schema, *vf_schema; 625 626 hw->mbx.ops.init_params(hw); 627 pf_schema = pci_iov_schema_alloc_node(); 628 vf_schema = pci_iov_schema_alloc_node(); 629 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 630 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", 631 IOV_SCHEMA_HASDEFAULT, TRUE); 632 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 633 IOV_SCHEMA_HASDEFAULT, FALSE); 634 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 635 IOV_SCHEMA_HASDEFAULT, FALSE); 636 error = pci_iov_attach(dev, pf_schema, vf_schema); 637 if (error != 0) { 638 device_printf(dev, 639 "Error %d setting up SR-IOV\n", error); 640 } 641 } 642 #endif /* PCI_IOV */ 643 644 /* Check for certain supported features */ 645 ixgbe_check_wol_support(adapter); 646 647 /* Add sysctls */ 648 ixgbe_add_device_sysctls(adapter); 649 ixgbe_add_hw_stats(adapter); 650 651 /* let hardware know driver is loaded */ 652 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 653 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 654 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 655 656 #ifdef DEV_NETMAP 657 ixgbe_netmap_attach(adapter); 658 #endif /* DEV_NETMAP */ 659 INIT_DEBUGOUT("ixgbe_attach: end"); 660 return (0); 661 662 err_late: 663 ixgbe_free_transmit_structures(adapter); 664 ixgbe_free_receive_structures(adapter); 665 err_out: 666 if (adapter->ifp != NULL) 667 if_free(adapter->ifp); 668 ixgbe_free_pci_resources(adapter); 669 free(adapter->mta, M_DEVBUF); 670 return (error); 671 } 672 673 /********************************************************************* 674 * Device removal routine 675 * 676 * The detach entry point is called when the driver is being removed. 677 * This routine stops the adapter and deallocates all the resources 678 * that were allocated for driver operation. 679 * 680 * return 0 on success, positive on failure 681 *********************************************************************/ 682 683 static int 684 ixgbe_detach(device_t dev) 685 { 686 struct adapter *adapter = device_get_softc(dev); 687 struct ix_queue *que = adapter->queues; 688 struct tx_ring *txr = adapter->tx_rings; 689 u32 ctrl_ext; 690 691 INIT_DEBUGOUT("ixgbe_detach: begin"); 692 693 /* Make sure VLANS are not using driver */ 694 if (adapter->ifp->if_vlantrunk != NULL) { 695 device_printf(dev,"Vlan in use, detach first\n"); 696 return (EBUSY); 697 } 698 699 #ifdef PCI_IOV 700 if (pci_iov_detach(dev) != 0) { 701 device_printf(dev, "SR-IOV in use; detach first.\n"); 702 return (EBUSY); 703 } 704 #endif /* PCI_IOV */ 705 706 ether_ifdetach(adapter->ifp); 707 /* Stop the adapter */ 708 IXGBE_CORE_LOCK(adapter); 709 ixgbe_setup_low_power_mode(adapter); 710 IXGBE_CORE_UNLOCK(adapter); 711 712 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 713 if (que->tq) { 714 #ifndef IXGBE_LEGACY_TX 715 taskqueue_drain(que->tq, &txr->txq_task); 716 #endif 717 taskqueue_drain(que->tq, &que->que_task); 718 taskqueue_free(que->tq); 719 } 720 } 721 722 /* Drain the Link queue */ 723 if (adapter->tq) { 724 taskqueue_drain(adapter->tq, &adapter->link_task); 725 taskqueue_drain(adapter->tq, &adapter->mod_task); 726 taskqueue_drain(adapter->tq, &adapter->msf_task); 727 #ifdef PCI_IOV 728 taskqueue_drain(adapter->tq, &adapter->mbx_task); 729 #endif 730 taskqueue_drain(adapter->tq, &adapter->phy_task); 731 #ifdef IXGBE_FDIR 732 taskqueue_drain(adapter->tq, &adapter->fdir_task); 733 #endif 734 taskqueue_free(adapter->tq); 735 } 736 737 /* let hardware know driver is unloading */ 738 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 739 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 741 742 /* Unregister VLAN events */ 743 if (adapter->vlan_attach != NULL) 744 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 745 if (adapter->vlan_detach != NULL) 746 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 747 748 callout_drain(&adapter->timer); 749 #ifdef DEV_NETMAP 750 netmap_detach(adapter->ifp); 751 #endif /* DEV_NETMAP */ 752 ixgbe_free_pci_resources(adapter); 753 bus_generic_detach(dev); 754 if_free(adapter->ifp); 755 756 ixgbe_free_transmit_structures(adapter); 757 ixgbe_free_receive_structures(adapter); 758 free(adapter->mta, M_DEVBUF); 759 760 IXGBE_CORE_LOCK_DESTROY(adapter); 761 return (0); 762 } 763 764 /********************************************************************* 765 * 766 * Shutdown entry point 767 * 768 **********************************************************************/ 769 770 static int 771 ixgbe_shutdown(device_t dev) 772 { 773 struct adapter *adapter = device_get_softc(dev); 774 int error = 0; 775 776 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 777 778 IXGBE_CORE_LOCK(adapter); 779 error = ixgbe_setup_low_power_mode(adapter); 780 IXGBE_CORE_UNLOCK(adapter); 781 782 return (error); 783 } 784 785 /** 786 * Methods for going from: 787 * D0 -> D3: ixgbe_suspend 788 * D3 -> D0: ixgbe_resume 789 */ 790 static int 791 ixgbe_suspend(device_t dev) 792 { 793 struct adapter *adapter = device_get_softc(dev); 794 int error = 0; 795 796 INIT_DEBUGOUT("ixgbe_suspend: begin"); 797 798 IXGBE_CORE_LOCK(adapter); 799 800 error = ixgbe_setup_low_power_mode(adapter); 801 802 IXGBE_CORE_UNLOCK(adapter); 803 804 return (error); 805 } 806 807 static int 808 ixgbe_resume(device_t dev) 809 { 810 struct adapter *adapter = device_get_softc(dev); 811 struct ifnet *ifp = adapter->ifp; 812 struct ixgbe_hw *hw = &adapter->hw; 813 u32 wus; 814 815 INIT_DEBUGOUT("ixgbe_resume: begin"); 816 817 IXGBE_CORE_LOCK(adapter); 818 819 /* Read & clear WUS register */ 820 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 821 if (wus) 822 device_printf(dev, "Woken up by (WUS): %#010x\n", 823 IXGBE_READ_REG(hw, IXGBE_WUS)); 824 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 825 /* And clear WUFC until next low-power transition */ 826 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 827 828 /* 829 * Required after D3->D0 transition; 830 * will re-advertise all previous advertised speeds 831 */ 832 if (ifp->if_flags & IFF_UP) 833 ixgbe_init_locked(adapter); 834 835 IXGBE_CORE_UNLOCK(adapter); 836 837 return (0); 838 } 839 840 841 /********************************************************************* 842 * Ioctl entry point 843 * 844 * ixgbe_ioctl is called when the user wants to configure the 845 * interface. 846 * 847 * return 0 on success, positive on failure 848 **********************************************************************/ 849 850 static int 851 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 852 { 853 struct adapter *adapter = ifp->if_softc; 854 struct ifreq *ifr = (struct ifreq *) data; 855 #if defined(INET) || defined(INET6) 856 struct ifaddr *ifa = (struct ifaddr *)data; 857 #endif 858 int error = 0; 859 bool avoid_reset = FALSE; 860 861 switch (command) { 862 863 case SIOCSIFADDR: 864 #ifdef INET 865 if (ifa->ifa_addr->sa_family == AF_INET) 866 avoid_reset = TRUE; 867 #endif 868 #ifdef INET6 869 if (ifa->ifa_addr->sa_family == AF_INET6) 870 avoid_reset = TRUE; 871 #endif 872 /* 873 ** Calling init results in link renegotiation, 874 ** so we avoid doing it when possible. 875 */ 876 if (avoid_reset) { 877 ifp->if_flags |= IFF_UP; 878 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 879 ixgbe_init(adapter); 880 #ifdef INET 881 if (!(ifp->if_flags & IFF_NOARP)) 882 arp_ifinit(ifp, ifa); 883 #endif 884 } else 885 error = ether_ioctl(ifp, command, data); 886 break; 887 case SIOCSIFMTU: 888 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 889 if (ifr->ifr_mtu > IXGBE_MAX_MTU) { 890 error = EINVAL; 891 } else { 892 IXGBE_CORE_LOCK(adapter); 893 ifp->if_mtu = ifr->ifr_mtu; 894 adapter->max_frame_size = 895 ifp->if_mtu + IXGBE_MTU_HDR; 896 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 897 ixgbe_init_locked(adapter); 898 #ifdef PCI_IOV 899 ixgbe_recalculate_max_frame(adapter); 900 #endif 901 IXGBE_CORE_UNLOCK(adapter); 902 } 903 break; 904 case SIOCSIFFLAGS: 905 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 906 IXGBE_CORE_LOCK(adapter); 907 if (ifp->if_flags & IFF_UP) { 908 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 909 if ((ifp->if_flags ^ adapter->if_flags) & 910 (IFF_PROMISC | IFF_ALLMULTI)) { 911 ixgbe_set_promisc(adapter); 912 } 913 } else 914 ixgbe_init_locked(adapter); 915 } else 916 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 917 ixgbe_stop(adapter); 918 adapter->if_flags = ifp->if_flags; 919 IXGBE_CORE_UNLOCK(adapter); 920 break; 921 case SIOCADDMULTI: 922 case SIOCDELMULTI: 923 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 924 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 925 IXGBE_CORE_LOCK(adapter); 926 ixgbe_disable_intr(adapter); 927 ixgbe_set_multi(adapter); 928 ixgbe_enable_intr(adapter); 929 IXGBE_CORE_UNLOCK(adapter); 930 } 931 break; 932 case SIOCSIFMEDIA: 933 case SIOCGIFMEDIA: 934 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 935 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 936 break; 937 case SIOCSIFCAP: 938 { 939 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 940 941 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 942 if (!mask) 943 break; 944 945 /* HW cannot turn these on/off separately */ 946 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { 947 ifp->if_capenable ^= IFCAP_RXCSUM; 948 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 949 } 950 if (mask & IFCAP_TXCSUM) 951 ifp->if_capenable ^= IFCAP_TXCSUM; 952 if (mask & IFCAP_TXCSUM_IPV6) 953 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 954 if (mask & IFCAP_TSO4) 955 ifp->if_capenable ^= IFCAP_TSO4; 956 if (mask & IFCAP_TSO6) 957 ifp->if_capenable ^= IFCAP_TSO6; 958 if (mask & IFCAP_LRO) 959 ifp->if_capenable ^= IFCAP_LRO; 960 if (mask & IFCAP_VLAN_HWTAGGING) 961 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 962 if (mask & IFCAP_VLAN_HWFILTER) 963 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 964 if (mask & IFCAP_VLAN_HWTSO) 965 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 966 967 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 968 IXGBE_CORE_LOCK(adapter); 969 ixgbe_init_locked(adapter); 970 IXGBE_CORE_UNLOCK(adapter); 971 } 972 VLAN_CAPABILITIES(ifp); 973 break; 974 } 975 #if __FreeBSD_version >= 1100036 976 case SIOCGI2C: 977 { 978 struct ixgbe_hw *hw = &adapter->hw; 979 struct ifi2creq i2c; 980 int i; 981 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 982 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 983 if (error != 0) 984 break; 985 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 986 error = EINVAL; 987 break; 988 } 989 if (i2c.len > sizeof(i2c.data)) { 990 error = EINVAL; 991 break; 992 } 993 994 for (i = 0; i < i2c.len; i++) 995 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i, 996 i2c.dev_addr, &i2c.data[i]); 997 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 998 break; 999 } 1000 #endif 1001 default: 1002 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 1003 error = ether_ioctl(ifp, command, data); 1004 break; 1005 } 1006 1007 return (error); 1008 } 1009 1010 /* 1011 * Set the various hardware offload abilities. 1012 * 1013 * This takes the ifnet's if_capenable flags (e.g. set by the user using 1014 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what 1015 * mbuf offload flags the driver will understand. 1016 */ 1017 static void 1018 ixgbe_set_if_hwassist(struct adapter *adapter) 1019 { 1020 struct ifnet *ifp = adapter->ifp; 1021 struct ixgbe_hw *hw = &adapter->hw; 1022 1023 ifp->if_hwassist = 0; 1024 #if __FreeBSD_version >= 1000000 1025 if (ifp->if_capenable & IFCAP_TSO4) 1026 ifp->if_hwassist |= CSUM_IP_TSO; 1027 if (ifp->if_capenable & IFCAP_TSO6) 1028 ifp->if_hwassist |= CSUM_IP6_TSO; 1029 if (ifp->if_capenable & IFCAP_TXCSUM) { 1030 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP); 1031 if (hw->mac.type != ixgbe_mac_82598EB) 1032 ifp->if_hwassist |= CSUM_IP_SCTP; 1033 } 1034 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) { 1035 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP); 1036 if (hw->mac.type != ixgbe_mac_82598EB) 1037 ifp->if_hwassist |= CSUM_IP6_SCTP; 1038 } 1039 #else 1040 if (ifp->if_capenable & IFCAP_TSO) 1041 ifp->if_hwassist |= CSUM_TSO; 1042 if (ifp->if_capenable & IFCAP_TXCSUM) { 1043 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 1044 if (hw->mac.type != ixgbe_mac_82598EB) 1045 ifp->if_hwassist |= CSUM_SCTP; 1046 } 1047 #endif 1048 } 1049 1050 /********************************************************************* 1051 * Init entry point 1052 * 1053 * This routine is used in two ways. It is used by the stack as 1054 * init entry point in network interface structure. It is also used 1055 * by the driver as a hw/sw initialization routine to get to a 1056 * consistent state. 1057 * 1058 * return 0 on success, positive on failure 1059 **********************************************************************/ 1060 #define IXGBE_MHADD_MFS_SHIFT 16 1061 1062 static void 1063 ixgbe_init_locked(struct adapter *adapter) 1064 { 1065 struct ifnet *ifp = adapter->ifp; 1066 device_t dev = adapter->dev; 1067 struct ixgbe_hw *hw = &adapter->hw; 1068 struct tx_ring *txr; 1069 struct rx_ring *rxr; 1070 u32 txdctl, mhadd; 1071 u32 rxdctl, rxctrl; 1072 int err = 0; 1073 #ifdef PCI_IOV 1074 enum ixgbe_iov_mode mode; 1075 #endif 1076 1077 mtx_assert(&adapter->core_mtx, MA_OWNED); 1078 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 1079 1080 hw->adapter_stopped = FALSE; 1081 ixgbe_stop_adapter(hw); 1082 callout_stop(&adapter->timer); 1083 1084 #ifdef PCI_IOV 1085 mode = ixgbe_get_iov_mode(adapter); 1086 adapter->pool = ixgbe_max_vfs(mode); 1087 /* Queue indices may change with IOV mode */ 1088 for (int i = 0; i < adapter->num_queues; i++) { 1089 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i); 1090 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i); 1091 } 1092 #endif 1093 /* reprogram the RAR[0] in case user changed it. */ 1094 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 1095 1096 /* Get the latest mac address, User can use a LAA */ 1097 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 1098 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 1099 hw->addr_ctrl.rar_used_count = 1; 1100 1101 /* Set hardware offload abilities from ifnet flags */ 1102 ixgbe_set_if_hwassist(adapter); 1103 1104 /* Prepare transmit descriptors and buffers */ 1105 if (ixgbe_setup_transmit_structures(adapter)) { 1106 device_printf(dev, "Could not setup transmit structures\n"); 1107 ixgbe_stop(adapter); 1108 return; 1109 } 1110 1111 ixgbe_init_hw(hw); 1112 #ifdef PCI_IOV 1113 ixgbe_initialize_iov(adapter); 1114 #endif 1115 ixgbe_initialize_transmit_units(adapter); 1116 1117 /* Setup Multicast table */ 1118 ixgbe_set_multi(adapter); 1119 1120 /* Determine the correct mbuf pool, based on frame size */ 1121 if (adapter->max_frame_size <= MCLBYTES) 1122 adapter->rx_mbuf_sz = MCLBYTES; 1123 else 1124 adapter->rx_mbuf_sz = MJUMPAGESIZE; 1125 1126 /* Prepare receive descriptors and buffers */ 1127 if (ixgbe_setup_receive_structures(adapter)) { 1128 device_printf(dev, "Could not setup receive structures\n"); 1129 ixgbe_stop(adapter); 1130 return; 1131 } 1132 1133 /* Configure RX settings */ 1134 ixgbe_initialize_receive_units(adapter); 1135 1136 /* Enable SDP & MSIX interrupts based on adapter */ 1137 ixgbe_config_gpie(adapter); 1138 1139 /* Set MTU size */ 1140 if (ifp->if_mtu > ETHERMTU) { 1141 /* aka IXGBE_MAXFRS on 82599 and newer */ 1142 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 1143 mhadd &= ~IXGBE_MHADD_MFS_MASK; 1144 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 1145 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 1146 } 1147 1148 /* Now enable all the queues */ 1149 for (int i = 0; i < adapter->num_queues; i++) { 1150 txr = &adapter->tx_rings[i]; 1151 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 1152 txdctl |= IXGBE_TXDCTL_ENABLE; 1153 /* Set WTHRESH to 8, burst writeback */ 1154 txdctl |= (8 << 16); 1155 /* 1156 * When the internal queue falls below PTHRESH (32), 1157 * start prefetching as long as there are at least 1158 * HTHRESH (1) buffers ready. The values are taken 1159 * from the Intel linux driver 3.8.21. 1160 * Prefetching enables tx line rate even with 1 queue. 1161 */ 1162 txdctl |= (32 << 0) | (1 << 8); 1163 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 1164 } 1165 1166 for (int i = 0, j = 0; i < adapter->num_queues; i++) { 1167 rxr = &adapter->rx_rings[i]; 1168 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1169 if (hw->mac.type == ixgbe_mac_82598EB) { 1170 /* 1171 ** PTHRESH = 21 1172 ** HTHRESH = 4 1173 ** WTHRESH = 8 1174 */ 1175 rxdctl &= ~0x3FFFFF; 1176 rxdctl |= 0x080420; 1177 } 1178 rxdctl |= IXGBE_RXDCTL_ENABLE; 1179 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 1180 for (; j < 10; j++) { 1181 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 1182 IXGBE_RXDCTL_ENABLE) 1183 break; 1184 else 1185 msec_delay(1); 1186 } 1187 wmb(); 1188 #ifdef DEV_NETMAP 1189 /* 1190 * In netmap mode, we must preserve the buffers made 1191 * available to userspace before the if_init() 1192 * (this is true by default on the TX side, because 1193 * init makes all buffers available to userspace). 1194 * 1195 * netmap_reset() and the device specific routines 1196 * (e.g. ixgbe_setup_receive_rings()) map these 1197 * buffers at the end of the NIC ring, so here we 1198 * must set the RDT (tail) register to make sure 1199 * they are not overwritten. 1200 * 1201 * In this driver the NIC ring starts at RDH = 0, 1202 * RDT points to the last slot available for reception (?), 1203 * so RDT = num_rx_desc - 1 means the whole ring is available. 1204 */ 1205 if (ifp->if_capenable & IFCAP_NETMAP) { 1206 struct netmap_adapter *na = NA(adapter->ifp); 1207 struct netmap_kring *kring = &na->rx_rings[i]; 1208 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1209 1210 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 1211 } else 1212 #endif /* DEV_NETMAP */ 1213 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1); 1214 } 1215 1216 /* Enable Receive engine */ 1217 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1218 if (hw->mac.type == ixgbe_mac_82598EB) 1219 rxctrl |= IXGBE_RXCTRL_DMBYPS; 1220 rxctrl |= IXGBE_RXCTRL_RXEN; 1221 ixgbe_enable_rx_dma(hw, rxctrl); 1222 1223 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 1224 1225 /* Set up MSI/X routing */ 1226 if (ixgbe_enable_msix) { 1227 ixgbe_configure_ivars(adapter); 1228 /* Set up auto-mask */ 1229 if (hw->mac.type == ixgbe_mac_82598EB) 1230 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1231 else { 1232 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 1233 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 1234 } 1235 } else { /* Simple settings for Legacy/MSI */ 1236 ixgbe_set_ivar(adapter, 0, 0, 0); 1237 ixgbe_set_ivar(adapter, 0, 0, 1); 1238 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1239 } 1240 1241 #ifdef IXGBE_FDIR 1242 /* Init Flow director */ 1243 if (hw->mac.type != ixgbe_mac_82598EB) { 1244 u32 hdrm = 32 << fdir_pballoc; 1245 1246 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL); 1247 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc); 1248 } 1249 #endif 1250 1251 /* 1252 * Check on any SFP devices that 1253 * need to be kick-started 1254 */ 1255 if (hw->phy.type == ixgbe_phy_none) { 1256 err = hw->phy.ops.identify(hw); 1257 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 1258 device_printf(dev, 1259 "Unsupported SFP+ module type was detected.\n"); 1260 return; 1261 } 1262 } 1263 1264 /* Set moderation on the Link interrupt */ 1265 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 1266 1267 /* Configure Energy Efficient Ethernet for supported devices */ 1268 if (hw->mac.ops.setup_eee) { 1269 err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled); 1270 if (err) 1271 device_printf(dev, "Error setting up EEE: %d\n", err); 1272 } 1273 1274 /* Enable power to the phy. */ 1275 ixgbe_set_phy_power(hw, TRUE); 1276 1277 /* Config/Enable Link */ 1278 ixgbe_config_link(adapter); 1279 1280 /* Hardware Packet Buffer & Flow Control setup */ 1281 ixgbe_config_delay_values(adapter); 1282 1283 /* Initialize the FC settings */ 1284 ixgbe_start_hw(hw); 1285 1286 /* Set up VLAN support and filter */ 1287 ixgbe_setup_vlan_hw_support(adapter); 1288 1289 /* Setup DMA Coalescing */ 1290 ixgbe_config_dmac(adapter); 1291 1292 /* And now turn on interrupts */ 1293 ixgbe_enable_intr(adapter); 1294 1295 #ifdef PCI_IOV 1296 /* Enable the use of the MBX by the VF's */ 1297 { 1298 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1299 reg |= IXGBE_CTRL_EXT_PFRSTD; 1300 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg); 1301 } 1302 #endif 1303 1304 /* Now inform the stack we're ready */ 1305 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1306 1307 return; 1308 } 1309 1310 static void 1311 ixgbe_init(void *arg) 1312 { 1313 struct adapter *adapter = arg; 1314 1315 IXGBE_CORE_LOCK(adapter); 1316 ixgbe_init_locked(adapter); 1317 IXGBE_CORE_UNLOCK(adapter); 1318 return; 1319 } 1320 1321 static void 1322 ixgbe_config_gpie(struct adapter *adapter) 1323 { 1324 struct ixgbe_hw *hw = &adapter->hw; 1325 u32 gpie; 1326 1327 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 1328 1329 /* Fan Failure Interrupt */ 1330 if (hw->device_id == IXGBE_DEV_ID_82598AT) 1331 gpie |= IXGBE_SDP1_GPIEN; 1332 1333 /* 1334 * Module detection (SDP2) 1335 * Media ready (SDP1) 1336 */ 1337 if (hw->mac.type == ixgbe_mac_82599EB) { 1338 gpie |= IXGBE_SDP2_GPIEN; 1339 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP) 1340 gpie |= IXGBE_SDP1_GPIEN; 1341 } 1342 1343 /* 1344 * Thermal Failure Detection (X540) 1345 * Link Detection (X552 SFP+, X552/X557-AT) 1346 */ 1347 if (hw->mac.type == ixgbe_mac_X540 || 1348 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1349 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 1350 gpie |= IXGBE_SDP0_GPIEN_X540; 1351 1352 if (adapter->msix > 1) { 1353 /* Enable Enhanced MSIX mode */ 1354 gpie |= IXGBE_GPIE_MSIX_MODE; 1355 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | 1356 IXGBE_GPIE_OCD; 1357 } 1358 1359 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 1360 return; 1361 } 1362 1363 /* 1364 * Requires adapter->max_frame_size to be set. 1365 */ 1366 static void 1367 ixgbe_config_delay_values(struct adapter *adapter) 1368 { 1369 struct ixgbe_hw *hw = &adapter->hw; 1370 u32 rxpb, frame, size, tmp; 1371 1372 frame = adapter->max_frame_size; 1373 1374 /* Calculate High Water */ 1375 switch (hw->mac.type) { 1376 case ixgbe_mac_X540: 1377 case ixgbe_mac_X550: 1378 case ixgbe_mac_X550EM_x: 1379 tmp = IXGBE_DV_X540(frame, frame); 1380 break; 1381 default: 1382 tmp = IXGBE_DV(frame, frame); 1383 break; 1384 } 1385 size = IXGBE_BT2KB(tmp); 1386 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 1387 hw->fc.high_water[0] = rxpb - size; 1388 1389 /* Now calculate Low Water */ 1390 switch (hw->mac.type) { 1391 case ixgbe_mac_X540: 1392 case ixgbe_mac_X550: 1393 case ixgbe_mac_X550EM_x: 1394 tmp = IXGBE_LOW_DV_X540(frame); 1395 break; 1396 default: 1397 tmp = IXGBE_LOW_DV(frame); 1398 break; 1399 } 1400 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 1401 1402 hw->fc.requested_mode = adapter->fc; 1403 hw->fc.pause_time = IXGBE_FC_PAUSE; 1404 hw->fc.send_xon = TRUE; 1405 } 1406 1407 /* 1408 ** 1409 ** MSIX Interrupt Handlers and Tasklets 1410 ** 1411 */ 1412 1413 static inline void 1414 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 1415 { 1416 struct ixgbe_hw *hw = &adapter->hw; 1417 u64 queue = (u64)(1 << vector); 1418 u32 mask; 1419 1420 if (hw->mac.type == ixgbe_mac_82598EB) { 1421 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1422 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 1423 } else { 1424 mask = (queue & 0xFFFFFFFF); 1425 if (mask) 1426 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 1427 mask = (queue >> 32); 1428 if (mask) 1429 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 1430 } 1431 } 1432 1433 static inline void 1434 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 1435 { 1436 struct ixgbe_hw *hw = &adapter->hw; 1437 u64 queue = (u64)(1 << vector); 1438 u32 mask; 1439 1440 if (hw->mac.type == ixgbe_mac_82598EB) { 1441 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1442 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 1443 } else { 1444 mask = (queue & 0xFFFFFFFF); 1445 if (mask) 1446 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 1447 mask = (queue >> 32); 1448 if (mask) 1449 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 1450 } 1451 } 1452 1453 static void 1454 ixgbe_handle_que(void *context, int pending) 1455 { 1456 struct ix_queue *que = context; 1457 struct adapter *adapter = que->adapter; 1458 struct tx_ring *txr = que->txr; 1459 struct ifnet *ifp = adapter->ifp; 1460 1461 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1462 ixgbe_rxeof(que); 1463 IXGBE_TX_LOCK(txr); 1464 ixgbe_txeof(txr); 1465 #ifndef IXGBE_LEGACY_TX 1466 if (!drbr_empty(ifp, txr->br)) 1467 ixgbe_mq_start_locked(ifp, txr); 1468 #else 1469 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1470 ixgbe_start_locked(txr, ifp); 1471 #endif 1472 IXGBE_TX_UNLOCK(txr); 1473 } 1474 1475 /* Reenable this interrupt */ 1476 if (que->res != NULL) 1477 ixgbe_enable_queue(adapter, que->msix); 1478 else 1479 ixgbe_enable_intr(adapter); 1480 return; 1481 } 1482 1483 1484 /********************************************************************* 1485 * 1486 * Legacy Interrupt Service routine 1487 * 1488 **********************************************************************/ 1489 1490 static void 1491 ixgbe_legacy_irq(void *arg) 1492 { 1493 struct ix_queue *que = arg; 1494 struct adapter *adapter = que->adapter; 1495 struct ixgbe_hw *hw = &adapter->hw; 1496 struct ifnet *ifp = adapter->ifp; 1497 struct tx_ring *txr = adapter->tx_rings; 1498 bool more; 1499 u32 reg_eicr; 1500 1501 1502 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1503 1504 ++que->irqs; 1505 if (reg_eicr == 0) { 1506 ixgbe_enable_intr(adapter); 1507 return; 1508 } 1509 1510 more = ixgbe_rxeof(que); 1511 1512 IXGBE_TX_LOCK(txr); 1513 ixgbe_txeof(txr); 1514 #ifdef IXGBE_LEGACY_TX 1515 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1516 ixgbe_start_locked(txr, ifp); 1517 #else 1518 if (!drbr_empty(ifp, txr->br)) 1519 ixgbe_mq_start_locked(ifp, txr); 1520 #endif 1521 IXGBE_TX_UNLOCK(txr); 1522 1523 /* Check for fan failure */ 1524 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 1525 (reg_eicr & IXGBE_EICR_GPI_SDP1)) { 1526 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " 1527 "REPLACE IMMEDIATELY!!\n"); 1528 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 1529 } 1530 1531 /* Link status change */ 1532 if (reg_eicr & IXGBE_EICR_LSC) 1533 taskqueue_enqueue(adapter->tq, &adapter->link_task); 1534 1535 /* External PHY interrupt */ 1536 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 1537 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) 1538 taskqueue_enqueue(adapter->tq, &adapter->phy_task); 1539 1540 if (more) 1541 taskqueue_enqueue(que->tq, &que->que_task); 1542 else 1543 ixgbe_enable_intr(adapter); 1544 return; 1545 } 1546 1547 1548 /********************************************************************* 1549 * 1550 * MSIX Queue Interrupt Service routine 1551 * 1552 **********************************************************************/ 1553 void 1554 ixgbe_msix_que(void *arg) 1555 { 1556 struct ix_queue *que = arg; 1557 struct adapter *adapter = que->adapter; 1558 struct ifnet *ifp = adapter->ifp; 1559 struct tx_ring *txr = que->txr; 1560 struct rx_ring *rxr = que->rxr; 1561 bool more; 1562 u32 newitr = 0; 1563 1564 1565 /* Protect against spurious interrupts */ 1566 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1567 return; 1568 1569 ixgbe_disable_queue(adapter, que->msix); 1570 ++que->irqs; 1571 1572 more = ixgbe_rxeof(que); 1573 1574 IXGBE_TX_LOCK(txr); 1575 ixgbe_txeof(txr); 1576 #ifdef IXGBE_LEGACY_TX 1577 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd)) 1578 ixgbe_start_locked(txr, ifp); 1579 #else 1580 if (!drbr_empty(ifp, txr->br)) 1581 ixgbe_mq_start_locked(ifp, txr); 1582 #endif 1583 IXGBE_TX_UNLOCK(txr); 1584 1585 /* Do AIM now? */ 1586 1587 if (adapter->enable_aim == FALSE) 1588 goto no_calc; 1589 /* 1590 ** Do Adaptive Interrupt Moderation: 1591 ** - Write out last calculated setting 1592 ** - Calculate based on average size over 1593 ** the last interval. 1594 */ 1595 if (que->eitr_setting) 1596 IXGBE_WRITE_REG(&adapter->hw, 1597 IXGBE_EITR(que->msix), que->eitr_setting); 1598 1599 que->eitr_setting = 0; 1600 1601 /* Idle, do nothing */ 1602 if ((txr->bytes == 0) && (rxr->bytes == 0)) 1603 goto no_calc; 1604 1605 if ((txr->bytes) && (txr->packets)) 1606 newitr = txr->bytes/txr->packets; 1607 if ((rxr->bytes) && (rxr->packets)) 1608 newitr = max(newitr, 1609 (rxr->bytes / rxr->packets)); 1610 newitr += 24; /* account for hardware frame, crc */ 1611 1612 /* set an upper boundary */ 1613 newitr = min(newitr, 3000); 1614 1615 /* Be nice to the mid range */ 1616 if ((newitr > 300) && (newitr < 1200)) 1617 newitr = (newitr / 3); 1618 else 1619 newitr = (newitr / 2); 1620 1621 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1622 newitr |= newitr << 16; 1623 else 1624 newitr |= IXGBE_EITR_CNT_WDIS; 1625 1626 /* save for next interrupt */ 1627 que->eitr_setting = newitr; 1628 1629 /* Reset state */ 1630 txr->bytes = 0; 1631 txr->packets = 0; 1632 rxr->bytes = 0; 1633 rxr->packets = 0; 1634 1635 no_calc: 1636 if (more) 1637 taskqueue_enqueue(que->tq, &que->que_task); 1638 else 1639 ixgbe_enable_queue(adapter, que->msix); 1640 return; 1641 } 1642 1643 1644 static void 1645 ixgbe_msix_link(void *arg) 1646 { 1647 struct adapter *adapter = arg; 1648 struct ixgbe_hw *hw = &adapter->hw; 1649 u32 reg_eicr, mod_mask; 1650 1651 ++adapter->link_irq; 1652 1653 /* Pause other interrupts */ 1654 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 1655 1656 /* First get the cause */ 1657 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 1658 /* Be sure the queue bits are not cleared */ 1659 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE; 1660 /* Clear interrupt with write */ 1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr); 1662 1663 /* Link status change */ 1664 if (reg_eicr & IXGBE_EICR_LSC) { 1665 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1666 taskqueue_enqueue(adapter->tq, &adapter->link_task); 1667 } 1668 1669 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 1670 #ifdef IXGBE_FDIR 1671 if (reg_eicr & IXGBE_EICR_FLOW_DIR) { 1672 /* This is probably overkill :) */ 1673 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 1674 return; 1675 /* Disable the interrupt */ 1676 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 1677 taskqueue_enqueue(adapter->tq, &adapter->fdir_task); 1678 } else 1679 #endif 1680 if (reg_eicr & IXGBE_EICR_ECC) { 1681 device_printf(adapter->dev, "CRITICAL: ECC ERROR!! " 1682 "Please Reboot!!\n"); 1683 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 1684 } 1685 1686 /* Check for over temp condition */ 1687 if (reg_eicr & IXGBE_EICR_TS) { 1688 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! " 1689 "PHY IS SHUT DOWN!!\n"); 1690 device_printf(adapter->dev, "System shutdown required!\n"); 1691 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 1692 } 1693 #ifdef PCI_IOV 1694 if (reg_eicr & IXGBE_EICR_MAILBOX) 1695 taskqueue_enqueue(adapter->tq, &adapter->mbx_task); 1696 #endif 1697 } 1698 1699 /* Pluggable optics-related interrupt */ 1700 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1701 mod_mask = IXGBE_EICR_GPI_SDP0_X540; 1702 else 1703 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 1704 1705 if (ixgbe_is_sfp(hw)) { 1706 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 1707 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 1708 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 1709 } else if (reg_eicr & mod_mask) { 1710 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask); 1711 taskqueue_enqueue(adapter->tq, &adapter->mod_task); 1712 } 1713 } 1714 1715 /* Check for fan failure */ 1716 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 1717 (reg_eicr & IXGBE_EICR_GPI_SDP1)) { 1718 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1719 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " 1720 "REPLACE IMMEDIATELY!!\n"); 1721 } 1722 1723 /* External PHY interrupt */ 1724 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 1725 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) { 1726 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 1727 taskqueue_enqueue(adapter->tq, &adapter->phy_task); 1728 } 1729 1730 /* Re-enable other interrupts */ 1731 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1732 return; 1733 } 1734 1735 /********************************************************************* 1736 * 1737 * Media Ioctl callback 1738 * 1739 * This routine is called whenever the user queries the status of 1740 * the interface using ifconfig. 1741 * 1742 **********************************************************************/ 1743 static void 1744 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 1745 { 1746 struct adapter *adapter = ifp->if_softc; 1747 struct ixgbe_hw *hw = &adapter->hw; 1748 int layer; 1749 1750 INIT_DEBUGOUT("ixgbe_media_status: begin"); 1751 IXGBE_CORE_LOCK(adapter); 1752 ixgbe_update_link_status(adapter); 1753 1754 ifmr->ifm_status = IFM_AVALID; 1755 ifmr->ifm_active = IFM_ETHER; 1756 1757 if (!adapter->link_active) { 1758 IXGBE_CORE_UNLOCK(adapter); 1759 return; 1760 } 1761 1762 ifmr->ifm_status |= IFM_ACTIVE; 1763 layer = adapter->phy_layer; 1764 1765 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 1766 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 1767 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1768 switch (adapter->link_speed) { 1769 case IXGBE_LINK_SPEED_10GB_FULL: 1770 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 1771 break; 1772 case IXGBE_LINK_SPEED_1GB_FULL: 1773 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 1774 break; 1775 case IXGBE_LINK_SPEED_100_FULL: 1776 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1777 break; 1778 } 1779 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1780 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1781 switch (adapter->link_speed) { 1782 case IXGBE_LINK_SPEED_10GB_FULL: 1783 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 1784 break; 1785 } 1786 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 1787 switch (adapter->link_speed) { 1788 case IXGBE_LINK_SPEED_10GB_FULL: 1789 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 1790 break; 1791 case IXGBE_LINK_SPEED_1GB_FULL: 1792 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1793 break; 1794 } 1795 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 1796 switch (adapter->link_speed) { 1797 case IXGBE_LINK_SPEED_10GB_FULL: 1798 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 1799 break; 1800 case IXGBE_LINK_SPEED_1GB_FULL: 1801 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1802 break; 1803 } 1804 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 1805 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1806 switch (adapter->link_speed) { 1807 case IXGBE_LINK_SPEED_10GB_FULL: 1808 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 1809 break; 1810 case IXGBE_LINK_SPEED_1GB_FULL: 1811 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1812 break; 1813 } 1814 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1815 switch (adapter->link_speed) { 1816 case IXGBE_LINK_SPEED_10GB_FULL: 1817 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 1818 break; 1819 } 1820 /* 1821 ** XXX: These need to use the proper media types once 1822 ** they're added. 1823 */ 1824 #ifndef IFM_ETH_XTYPE 1825 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1826 switch (adapter->link_speed) { 1827 case IXGBE_LINK_SPEED_10GB_FULL: 1828 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 1829 break; 1830 case IXGBE_LINK_SPEED_2_5GB_FULL: 1831 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 1832 break; 1833 case IXGBE_LINK_SPEED_1GB_FULL: 1834 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 1835 break; 1836 } 1837 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 1838 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1839 switch (adapter->link_speed) { 1840 case IXGBE_LINK_SPEED_10GB_FULL: 1841 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 1842 break; 1843 case IXGBE_LINK_SPEED_2_5GB_FULL: 1844 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 1845 break; 1846 case IXGBE_LINK_SPEED_1GB_FULL: 1847 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 1848 break; 1849 } 1850 #else 1851 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1852 switch (adapter->link_speed) { 1853 case IXGBE_LINK_SPEED_10GB_FULL: 1854 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 1855 break; 1856 case IXGBE_LINK_SPEED_2_5GB_FULL: 1857 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 1858 break; 1859 case IXGBE_LINK_SPEED_1GB_FULL: 1860 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 1861 break; 1862 } 1863 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 1864 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1865 switch (adapter->link_speed) { 1866 case IXGBE_LINK_SPEED_10GB_FULL: 1867 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 1868 break; 1869 case IXGBE_LINK_SPEED_2_5GB_FULL: 1870 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 1871 break; 1872 case IXGBE_LINK_SPEED_1GB_FULL: 1873 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 1874 break; 1875 } 1876 #endif 1877 1878 /* If nothing is recognized... */ 1879 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 1880 ifmr->ifm_active |= IFM_UNKNOWN; 1881 1882 #if __FreeBSD_version >= 900025 1883 /* Display current flow control setting used on link */ 1884 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 1885 hw->fc.current_mode == ixgbe_fc_full) 1886 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1887 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 1888 hw->fc.current_mode == ixgbe_fc_full) 1889 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1890 #endif 1891 1892 IXGBE_CORE_UNLOCK(adapter); 1893 1894 return; 1895 } 1896 1897 /********************************************************************* 1898 * 1899 * Media Ioctl callback 1900 * 1901 * This routine is called when the user changes speed/duplex using 1902 * media/mediopt option with ifconfig. 1903 * 1904 **********************************************************************/ 1905 static int 1906 ixgbe_media_change(struct ifnet * ifp) 1907 { 1908 struct adapter *adapter = ifp->if_softc; 1909 struct ifmedia *ifm = &adapter->media; 1910 struct ixgbe_hw *hw = &adapter->hw; 1911 ixgbe_link_speed speed = 0; 1912 1913 INIT_DEBUGOUT("ixgbe_media_change: begin"); 1914 1915 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1916 return (EINVAL); 1917 1918 if (hw->phy.media_type == ixgbe_media_type_backplane) 1919 return (ENODEV); 1920 1921 /* 1922 ** We don't actually need to check against the supported 1923 ** media types of the adapter; ifmedia will take care of 1924 ** that for us. 1925 */ 1926 #ifndef IFM_ETH_XTYPE 1927 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1928 case IFM_AUTO: 1929 case IFM_10G_T: 1930 speed |= IXGBE_LINK_SPEED_100_FULL; 1931 case IFM_10G_LRM: 1932 case IFM_10G_SR: /* KR, too */ 1933 case IFM_10G_LR: 1934 case IFM_10G_CX4: /* KX4 */ 1935 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1936 case IFM_10G_TWINAX: 1937 speed |= IXGBE_LINK_SPEED_10GB_FULL; 1938 break; 1939 case IFM_1000_T: 1940 speed |= IXGBE_LINK_SPEED_100_FULL; 1941 case IFM_1000_LX: 1942 case IFM_1000_SX: 1943 case IFM_1000_CX: /* KX */ 1944 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1945 break; 1946 case IFM_100_TX: 1947 speed |= IXGBE_LINK_SPEED_100_FULL; 1948 break; 1949 default: 1950 goto invalid; 1951 } 1952 #else 1953 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1954 case IFM_AUTO: 1955 case IFM_10G_T: 1956 speed |= IXGBE_LINK_SPEED_100_FULL; 1957 case IFM_10G_LRM: 1958 case IFM_10G_KR: 1959 case IFM_10G_LR: 1960 case IFM_10G_KX4: 1961 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1962 case IFM_10G_TWINAX: 1963 speed |= IXGBE_LINK_SPEED_10GB_FULL; 1964 break; 1965 case IFM_1000_T: 1966 speed |= IXGBE_LINK_SPEED_100_FULL; 1967 case IFM_1000_LX: 1968 case IFM_1000_SX: 1969 case IFM_1000_KX: 1970 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1971 break; 1972 case IFM_100_TX: 1973 speed |= IXGBE_LINK_SPEED_100_FULL; 1974 break; 1975 default: 1976 goto invalid; 1977 } 1978 #endif 1979 1980 hw->mac.autotry_restart = TRUE; 1981 hw->mac.ops.setup_link(hw, speed, TRUE); 1982 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1983 adapter->advertise = 0; 1984 } else { 1985 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0) 1986 adapter->advertise |= 1 << 2; 1987 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0) 1988 adapter->advertise |= 1 << 1; 1989 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0) 1990 adapter->advertise |= 1 << 0; 1991 } 1992 1993 return (0); 1994 1995 invalid: 1996 device_printf(adapter->dev, "Invalid media type!\n"); 1997 return (EINVAL); 1998 } 1999 2000 static void 2001 ixgbe_set_promisc(struct adapter *adapter) 2002 { 2003 u_int32_t reg_rctl; 2004 struct ifnet *ifp = adapter->ifp; 2005 int mcnt = 0; 2006 2007 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2008 reg_rctl &= (~IXGBE_FCTRL_UPE); 2009 if (ifp->if_flags & IFF_ALLMULTI) 2010 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2011 else { 2012 struct ifmultiaddr *ifma; 2013 #if __FreeBSD_version < 800000 2014 IF_ADDR_LOCK(ifp); 2015 #else 2016 if_maddr_rlock(ifp); 2017 #endif 2018 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2019 if (ifma->ifma_addr->sa_family != AF_LINK) 2020 continue; 2021 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2022 break; 2023 mcnt++; 2024 } 2025 #if __FreeBSD_version < 800000 2026 IF_ADDR_UNLOCK(ifp); 2027 #else 2028 if_maddr_runlock(ifp); 2029 #endif 2030 } 2031 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2032 reg_rctl &= (~IXGBE_FCTRL_MPE); 2033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 2034 2035 if (ifp->if_flags & IFF_PROMISC) { 2036 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 2038 } else if (ifp->if_flags & IFF_ALLMULTI) { 2039 reg_rctl |= IXGBE_FCTRL_MPE; 2040 reg_rctl &= ~IXGBE_FCTRL_UPE; 2041 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 2042 } 2043 return; 2044 } 2045 2046 2047 /********************************************************************* 2048 * Multicast Update 2049 * 2050 * This routine is called whenever multicast address list is updated. 2051 * 2052 **********************************************************************/ 2053 #define IXGBE_RAR_ENTRIES 16 2054 2055 static void 2056 ixgbe_set_multi(struct adapter *adapter) 2057 { 2058 u32 fctrl; 2059 u8 *update_ptr; 2060 struct ifmultiaddr *ifma; 2061 struct ixgbe_mc_addr *mta; 2062 int mcnt = 0; 2063 struct ifnet *ifp = adapter->ifp; 2064 2065 IOCTL_DEBUGOUT("ixgbe_set_multi: begin"); 2066 2067 mta = adapter->mta; 2068 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 2069 2070 #if __FreeBSD_version < 800000 2071 IF_ADDR_LOCK(ifp); 2072 #else 2073 if_maddr_rlock(ifp); 2074 #endif 2075 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2076 if (ifma->ifma_addr->sa_family != AF_LINK) 2077 continue; 2078 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2079 break; 2080 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2081 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 2082 mta[mcnt].vmdq = adapter->pool; 2083 mcnt++; 2084 } 2085 #if __FreeBSD_version < 800000 2086 IF_ADDR_UNLOCK(ifp); 2087 #else 2088 if_maddr_runlock(ifp); 2089 #endif 2090 2091 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2092 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2093 if (ifp->if_flags & IFF_PROMISC) 2094 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2095 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 2096 ifp->if_flags & IFF_ALLMULTI) { 2097 fctrl |= IXGBE_FCTRL_MPE; 2098 fctrl &= ~IXGBE_FCTRL_UPE; 2099 } else 2100 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2101 2102 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 2103 2104 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 2105 update_ptr = (u8 *)mta; 2106 ixgbe_update_mc_addr_list(&adapter->hw, 2107 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE); 2108 } 2109 2110 return; 2111 } 2112 2113 /* 2114 * This is an iterator function now needed by the multicast 2115 * shared code. It simply feeds the shared code routine the 2116 * addresses in the array of ixgbe_set_multi() one by one. 2117 */ 2118 static u8 * 2119 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 2120 { 2121 struct ixgbe_mc_addr *mta; 2122 2123 mta = (struct ixgbe_mc_addr *)*update_ptr; 2124 *vmdq = mta->vmdq; 2125 2126 *update_ptr = (u8*)(mta + 1); 2127 return (mta->addr); 2128 } 2129 2130 2131 /********************************************************************* 2132 * Timer routine 2133 * 2134 * This routine checks for link status,updates statistics, 2135 * and runs the watchdog check. 2136 * 2137 **********************************************************************/ 2138 2139 static void 2140 ixgbe_local_timer(void *arg) 2141 { 2142 struct adapter *adapter = arg; 2143 device_t dev = adapter->dev; 2144 struct ix_queue *que = adapter->queues; 2145 u64 queues = 0; 2146 int hung = 0; 2147 2148 mtx_assert(&adapter->core_mtx, MA_OWNED); 2149 2150 /* Check for pluggable optics */ 2151 if (adapter->sfp_probe) 2152 if (!ixgbe_sfp_probe(adapter)) 2153 goto out; /* Nothing to do */ 2154 2155 ixgbe_update_link_status(adapter); 2156 ixgbe_update_stats_counters(adapter); 2157 2158 /* 2159 ** Check the TX queues status 2160 ** - mark hung queues so we don't schedule on them 2161 ** - watchdog only if all queues show hung 2162 */ 2163 for (int i = 0; i < adapter->num_queues; i++, que++) { 2164 /* Keep track of queues with work for soft irq */ 2165 if (que->txr->busy) 2166 queues |= ((u64)1 << que->me); 2167 /* 2168 ** Each time txeof runs without cleaning, but there 2169 ** are uncleaned descriptors it increments busy. If 2170 ** we get to the MAX we declare it hung. 2171 */ 2172 if (que->busy == IXGBE_QUEUE_HUNG) { 2173 ++hung; 2174 /* Mark the queue as inactive */ 2175 adapter->active_queues &= ~((u64)1 << que->me); 2176 continue; 2177 } else { 2178 /* Check if we've come back from hung */ 2179 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 2180 adapter->active_queues |= ((u64)1 << que->me); 2181 } 2182 if (que->busy >= IXGBE_MAX_TX_BUSY) { 2183 device_printf(dev,"Warning queue %d " 2184 "appears to be hung!\n", i); 2185 que->txr->busy = IXGBE_QUEUE_HUNG; 2186 ++hung; 2187 } 2188 2189 } 2190 2191 /* Only truly watchdog if all queues show hung */ 2192 if (hung == adapter->num_queues) 2193 goto watchdog; 2194 else if (queues != 0) { /* Force an IRQ on queues with work */ 2195 ixgbe_rearm_queues(adapter, queues); 2196 } 2197 2198 out: 2199 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 2200 return; 2201 2202 watchdog: 2203 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 2204 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2205 adapter->watchdog_events++; 2206 ixgbe_init_locked(adapter); 2207 } 2208 2209 2210 /* 2211 ** Note: this routine updates the OS on the link state 2212 ** the real check of the hardware only happens with 2213 ** a link interrupt. 2214 */ 2215 static void 2216 ixgbe_update_link_status(struct adapter *adapter) 2217 { 2218 struct ifnet *ifp = adapter->ifp; 2219 device_t dev = adapter->dev; 2220 2221 if (adapter->link_up){ 2222 if (adapter->link_active == FALSE) { 2223 if (bootverbose) 2224 device_printf(dev,"Link is up %d Gbps %s \n", 2225 ((adapter->link_speed == 128)? 10:1), 2226 "Full Duplex"); 2227 adapter->link_active = TRUE; 2228 /* Update any Flow Control changes */ 2229 ixgbe_fc_enable(&adapter->hw); 2230 /* Update DMA coalescing config */ 2231 ixgbe_config_dmac(adapter); 2232 if_link_state_change(ifp, LINK_STATE_UP); 2233 #ifdef PCI_IOV 2234 ixgbe_ping_all_vfs(adapter); 2235 #endif 2236 } 2237 } else { /* Link down */ 2238 if (adapter->link_active == TRUE) { 2239 if (bootverbose) 2240 device_printf(dev,"Link is Down\n"); 2241 if_link_state_change(ifp, LINK_STATE_DOWN); 2242 adapter->link_active = FALSE; 2243 #ifdef PCI_IOV 2244 ixgbe_ping_all_vfs(adapter); 2245 #endif 2246 } 2247 } 2248 2249 return; 2250 } 2251 2252 2253 /********************************************************************* 2254 * 2255 * This routine disables all traffic on the adapter by issuing a 2256 * global reset on the MAC and deallocates TX/RX buffers. 2257 * 2258 **********************************************************************/ 2259 2260 static void 2261 ixgbe_stop(void *arg) 2262 { 2263 struct ifnet *ifp; 2264 struct adapter *adapter = arg; 2265 struct ixgbe_hw *hw = &adapter->hw; 2266 ifp = adapter->ifp; 2267 2268 mtx_assert(&adapter->core_mtx, MA_OWNED); 2269 2270 INIT_DEBUGOUT("ixgbe_stop: begin\n"); 2271 ixgbe_disable_intr(adapter); 2272 callout_stop(&adapter->timer); 2273 2274 /* Let the stack know...*/ 2275 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2276 2277 ixgbe_reset_hw(hw); 2278 hw->adapter_stopped = FALSE; 2279 ixgbe_stop_adapter(hw); 2280 if (hw->mac.type == ixgbe_mac_82599EB) 2281 ixgbe_stop_mac_link_on_d3_82599(hw); 2282 /* Turn off the laser - noop with no optics */ 2283 ixgbe_disable_tx_laser(hw); 2284 2285 /* Update the stack */ 2286 adapter->link_up = FALSE; 2287 ixgbe_update_link_status(adapter); 2288 2289 /* reprogram the RAR[0] in case user changed it. */ 2290 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 2291 2292 return; 2293 } 2294 2295 2296 /********************************************************************* 2297 * 2298 * Determine hardware revision. 2299 * 2300 **********************************************************************/ 2301 static void 2302 ixgbe_identify_hardware(struct adapter *adapter) 2303 { 2304 device_t dev = adapter->dev; 2305 struct ixgbe_hw *hw = &adapter->hw; 2306 2307 /* Save off the information about this board */ 2308 hw->vendor_id = pci_get_vendor(dev); 2309 hw->device_id = pci_get_device(dev); 2310 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 2311 hw->subsystem_vendor_id = 2312 pci_read_config(dev, PCIR_SUBVEND_0, 2); 2313 hw->subsystem_device_id = 2314 pci_read_config(dev, PCIR_SUBDEV_0, 2); 2315 2316 /* 2317 ** Make sure BUSMASTER is set 2318 */ 2319 pci_enable_busmaster(dev); 2320 2321 /* We need this here to set the num_segs below */ 2322 ixgbe_set_mac_type(hw); 2323 2324 /* Pick up the 82599 settings */ 2325 if (hw->mac.type != ixgbe_mac_82598EB) { 2326 hw->phy.smart_speed = ixgbe_smart_speed; 2327 adapter->num_segs = IXGBE_82599_SCATTER; 2328 } else 2329 adapter->num_segs = IXGBE_82598_SCATTER; 2330 2331 return; 2332 } 2333 2334 /********************************************************************* 2335 * 2336 * Determine optic type 2337 * 2338 **********************************************************************/ 2339 static void 2340 ixgbe_setup_optics(struct adapter *adapter) 2341 { 2342 struct ixgbe_hw *hw = &adapter->hw; 2343 int layer; 2344 2345 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 2346 2347 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 2348 adapter->optics = IFM_10G_T; 2349 return; 2350 } 2351 2352 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 2353 adapter->optics = IFM_1000_T; 2354 return; 2355 } 2356 2357 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 2358 adapter->optics = IFM_1000_SX; 2359 return; 2360 } 2361 2362 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR | 2363 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) { 2364 adapter->optics = IFM_10G_LR; 2365 return; 2366 } 2367 2368 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 2369 adapter->optics = IFM_10G_SR; 2370 return; 2371 } 2372 2373 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) { 2374 adapter->optics = IFM_10G_TWINAX; 2375 return; 2376 } 2377 2378 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | 2379 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) { 2380 adapter->optics = IFM_10G_CX4; 2381 return; 2382 } 2383 2384 /* If we get here just set the default */ 2385 adapter->optics = IFM_ETHER | IFM_AUTO; 2386 return; 2387 } 2388 2389 /********************************************************************* 2390 * 2391 * Setup the Legacy or MSI Interrupt handler 2392 * 2393 **********************************************************************/ 2394 static int 2395 ixgbe_allocate_legacy(struct adapter *adapter) 2396 { 2397 device_t dev = adapter->dev; 2398 struct ix_queue *que = adapter->queues; 2399 #ifndef IXGBE_LEGACY_TX 2400 struct tx_ring *txr = adapter->tx_rings; 2401 #endif 2402 int error, rid = 0; 2403 2404 /* MSI RID at 1 */ 2405 if (adapter->msix == 1) 2406 rid = 1; 2407 2408 /* We allocate a single interrupt resource */ 2409 adapter->res = bus_alloc_resource_any(dev, 2410 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2411 if (adapter->res == NULL) { 2412 device_printf(dev, "Unable to allocate bus resource: " 2413 "interrupt\n"); 2414 return (ENXIO); 2415 } 2416 2417 /* 2418 * Try allocating a fast interrupt and the associated deferred 2419 * processing contexts. 2420 */ 2421 #ifndef IXGBE_LEGACY_TX 2422 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2423 #endif 2424 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 2425 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 2426 taskqueue_thread_enqueue, &que->tq); 2427 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq", 2428 device_get_nameunit(adapter->dev)); 2429 2430 /* Tasklets for Link, SFP and Multispeed Fiber */ 2431 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); 2432 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); 2433 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); 2434 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); 2435 #ifdef IXGBE_FDIR 2436 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); 2437 #endif 2438 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, 2439 taskqueue_thread_enqueue, &adapter->tq); 2440 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", 2441 device_get_nameunit(adapter->dev)); 2442 2443 if ((error = bus_setup_intr(dev, adapter->res, 2444 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, 2445 que, &adapter->tag)) != 0) { 2446 device_printf(dev, "Failed to register fast interrupt " 2447 "handler: %d\n", error); 2448 taskqueue_free(que->tq); 2449 taskqueue_free(adapter->tq); 2450 que->tq = NULL; 2451 adapter->tq = NULL; 2452 return (error); 2453 } 2454 /* For simplicity in the handlers */ 2455 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK; 2456 2457 return (0); 2458 } 2459 2460 2461 /********************************************************************* 2462 * 2463 * Setup MSIX Interrupt resources and handlers 2464 * 2465 **********************************************************************/ 2466 static int 2467 ixgbe_allocate_msix(struct adapter *adapter) 2468 { 2469 device_t dev = adapter->dev; 2470 struct ix_queue *que = adapter->queues; 2471 struct tx_ring *txr = adapter->tx_rings; 2472 int error, rid, vector = 0; 2473 int cpu_id = 0; 2474 #ifdef RSS 2475 cpuset_t cpu_mask; 2476 #endif 2477 2478 #ifdef RSS 2479 /* 2480 * If we're doing RSS, the number of queues needs to 2481 * match the number of RSS buckets that are configured. 2482 * 2483 * + If there's more queues than RSS buckets, we'll end 2484 * up with queues that get no traffic. 2485 * 2486 * + If there's more RSS buckets than queues, we'll end 2487 * up having multiple RSS buckets map to the same queue, 2488 * so there'll be some contention. 2489 */ 2490 if (adapter->num_queues != rss_getnumbuckets()) { 2491 device_printf(dev, 2492 "%s: number of queues (%d) != number of RSS buckets (%d)" 2493 "; performance will be impacted.\n", 2494 __func__, 2495 adapter->num_queues, 2496 rss_getnumbuckets()); 2497 } 2498 #endif 2499 2500 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 2501 rid = vector + 1; 2502 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2503 RF_SHAREABLE | RF_ACTIVE); 2504 if (que->res == NULL) { 2505 device_printf(dev,"Unable to allocate" 2506 " bus resource: que interrupt [%d]\n", vector); 2507 return (ENXIO); 2508 } 2509 /* Set the handler function */ 2510 error = bus_setup_intr(dev, que->res, 2511 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2512 ixgbe_msix_que, que, &que->tag); 2513 if (error) { 2514 que->res = NULL; 2515 device_printf(dev, "Failed to register QUE handler"); 2516 return (error); 2517 } 2518 #if __FreeBSD_version >= 800504 2519 bus_describe_intr(dev, que->res, que->tag, "q%d", i); 2520 #endif 2521 que->msix = vector; 2522 adapter->active_queues |= (u64)(1 << que->msix); 2523 #ifdef RSS 2524 /* 2525 * The queue ID is used as the RSS layer bucket ID. 2526 * We look up the queue ID -> RSS CPU ID and select 2527 * that. 2528 */ 2529 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2530 #else 2531 /* 2532 * Bind the msix vector, and thus the 2533 * rings to the corresponding cpu. 2534 * 2535 * This just happens to match the default RSS round-robin 2536 * bucket -> queue -> CPU allocation. 2537 */ 2538 if (adapter->num_queues > 1) 2539 cpu_id = i; 2540 #endif 2541 if (adapter->num_queues > 1) 2542 bus_bind_intr(dev, que->res, cpu_id); 2543 #ifdef IXGBE_DEBUG 2544 #ifdef RSS 2545 device_printf(dev, 2546 "Bound RSS bucket %d to CPU %d\n", 2547 i, cpu_id); 2548 #else 2549 device_printf(dev, 2550 "Bound queue %d to cpu %d\n", 2551 i, cpu_id); 2552 #endif 2553 #endif /* IXGBE_DEBUG */ 2554 2555 2556 #ifndef IXGBE_LEGACY_TX 2557 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2558 #endif 2559 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 2560 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 2561 taskqueue_thread_enqueue, &que->tq); 2562 #ifdef RSS 2563 CPU_SETOF(cpu_id, &cpu_mask); 2564 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, 2565 &cpu_mask, 2566 "%s (bucket %d)", 2567 device_get_nameunit(adapter->dev), 2568 cpu_id); 2569 #else 2570 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d", 2571 device_get_nameunit(adapter->dev), i); 2572 #endif 2573 } 2574 2575 /* and Link */ 2576 rid = vector + 1; 2577 adapter->res = bus_alloc_resource_any(dev, 2578 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2579 if (!adapter->res) { 2580 device_printf(dev,"Unable to allocate" 2581 " bus resource: Link interrupt [%d]\n", rid); 2582 return (ENXIO); 2583 } 2584 /* Set the link handler function */ 2585 error = bus_setup_intr(dev, adapter->res, 2586 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2587 ixgbe_msix_link, adapter, &adapter->tag); 2588 if (error) { 2589 adapter->res = NULL; 2590 device_printf(dev, "Failed to register LINK handler"); 2591 return (error); 2592 } 2593 #if __FreeBSD_version >= 800504 2594 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); 2595 #endif 2596 adapter->vector = vector; 2597 /* Tasklets for Link, SFP and Multispeed Fiber */ 2598 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); 2599 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); 2600 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); 2601 #ifdef PCI_IOV 2602 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter); 2603 #endif 2604 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); 2605 #ifdef IXGBE_FDIR 2606 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); 2607 #endif 2608 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, 2609 taskqueue_thread_enqueue, &adapter->tq); 2610 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", 2611 device_get_nameunit(adapter->dev)); 2612 2613 return (0); 2614 } 2615 2616 /* 2617 * Setup Either MSI/X or MSI 2618 */ 2619 static int 2620 ixgbe_setup_msix(struct adapter *adapter) 2621 { 2622 device_t dev = adapter->dev; 2623 int rid, want, queues, msgs; 2624 2625 /* Override by tuneable */ 2626 if (ixgbe_enable_msix == 0) 2627 goto msi; 2628 2629 /* First try MSI/X */ 2630 msgs = pci_msix_count(dev); 2631 if (msgs == 0) 2632 goto msi; 2633 rid = PCIR_BAR(MSIX_82598_BAR); 2634 adapter->msix_mem = bus_alloc_resource_any(dev, 2635 SYS_RES_MEMORY, &rid, RF_ACTIVE); 2636 if (adapter->msix_mem == NULL) { 2637 rid += 4; /* 82599 maps in higher BAR */ 2638 adapter->msix_mem = bus_alloc_resource_any(dev, 2639 SYS_RES_MEMORY, &rid, RF_ACTIVE); 2640 } 2641 if (adapter->msix_mem == NULL) { 2642 /* May not be enabled */ 2643 device_printf(adapter->dev, 2644 "Unable to map MSIX table \n"); 2645 goto msi; 2646 } 2647 2648 /* Figure out a reasonable auto config value */ 2649 queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus; 2650 2651 #ifdef RSS 2652 /* If we're doing RSS, clamp at the number of RSS buckets */ 2653 if (queues > rss_getnumbuckets()) 2654 queues = rss_getnumbuckets(); 2655 #endif 2656 2657 if (ixgbe_num_queues != 0) 2658 queues = ixgbe_num_queues; 2659 /* Set max queues to 8 when autoconfiguring */ 2660 else if ((ixgbe_num_queues == 0) && (queues > 8)) 2661 queues = 8; 2662 2663 /* reflect correct sysctl value */ 2664 ixgbe_num_queues = queues; 2665 2666 /* 2667 ** Want one vector (RX/TX pair) per queue 2668 ** plus an additional for Link. 2669 */ 2670 want = queues + 1; 2671 if (msgs >= want) 2672 msgs = want; 2673 else { 2674 device_printf(adapter->dev, 2675 "MSIX Configuration Problem, " 2676 "%d vectors but %d queues wanted!\n", 2677 msgs, want); 2678 goto msi; 2679 } 2680 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 2681 device_printf(adapter->dev, 2682 "Using MSIX interrupts with %d vectors\n", msgs); 2683 adapter->num_queues = queues; 2684 return (msgs); 2685 } 2686 /* 2687 ** If MSIX alloc failed or provided us with 2688 ** less than needed, free and fall through to MSI 2689 */ 2690 pci_release_msi(dev); 2691 2692 msi: 2693 if (adapter->msix_mem != NULL) { 2694 bus_release_resource(dev, SYS_RES_MEMORY, 2695 rid, adapter->msix_mem); 2696 adapter->msix_mem = NULL; 2697 } 2698 msgs = 1; 2699 if (pci_alloc_msi(dev, &msgs) == 0) { 2700 device_printf(adapter->dev, "Using an MSI interrupt\n"); 2701 return (msgs); 2702 } 2703 device_printf(adapter->dev, "Using a Legacy interrupt\n"); 2704 return (0); 2705 } 2706 2707 2708 static int 2709 ixgbe_allocate_pci_resources(struct adapter *adapter) 2710 { 2711 int rid; 2712 device_t dev = adapter->dev; 2713 2714 rid = PCIR_BAR(0); 2715 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2716 &rid, RF_ACTIVE); 2717 2718 if (!(adapter->pci_mem)) { 2719 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2720 return (ENXIO); 2721 } 2722 2723 /* Save bus_space values for READ/WRITE_REG macros */ 2724 adapter->osdep.mem_bus_space_tag = 2725 rman_get_bustag(adapter->pci_mem); 2726 adapter->osdep.mem_bus_space_handle = 2727 rman_get_bushandle(adapter->pci_mem); 2728 /* Set hw values for shared code */ 2729 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; 2730 adapter->hw.back = adapter; 2731 2732 /* Default to 1 queue if MSI-X setup fails */ 2733 adapter->num_queues = 1; 2734 2735 /* 2736 ** Now setup MSI or MSI-X, should 2737 ** return us the number of supported 2738 ** vectors. (Will be 1 for MSI) 2739 */ 2740 adapter->msix = ixgbe_setup_msix(adapter); 2741 return (0); 2742 } 2743 2744 static void 2745 ixgbe_free_pci_resources(struct adapter * adapter) 2746 { 2747 struct ix_queue *que = adapter->queues; 2748 device_t dev = adapter->dev; 2749 int rid, memrid; 2750 2751 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2752 memrid = PCIR_BAR(MSIX_82598_BAR); 2753 else 2754 memrid = PCIR_BAR(MSIX_82599_BAR); 2755 2756 /* 2757 ** There is a slight possibility of a failure mode 2758 ** in attach that will result in entering this function 2759 ** before interrupt resources have been initialized, and 2760 ** in that case we do not want to execute the loops below 2761 ** We can detect this reliably by the state of the adapter 2762 ** res pointer. 2763 */ 2764 if (adapter->res == NULL) 2765 goto mem; 2766 2767 /* 2768 ** Release all msix queue resources: 2769 */ 2770 for (int i = 0; i < adapter->num_queues; i++, que++) { 2771 rid = que->msix + 1; 2772 if (que->tag != NULL) { 2773 bus_teardown_intr(dev, que->res, que->tag); 2774 que->tag = NULL; 2775 } 2776 if (que->res != NULL) 2777 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 2778 } 2779 2780 2781 /* Clean the Legacy or Link interrupt last */ 2782 if (adapter->vector) /* we are doing MSIX */ 2783 rid = adapter->vector + 1; 2784 else 2785 (adapter->msix != 0) ? (rid = 1):(rid = 0); 2786 2787 if (adapter->tag != NULL) { 2788 bus_teardown_intr(dev, adapter->res, adapter->tag); 2789 adapter->tag = NULL; 2790 } 2791 if (adapter->res != NULL) 2792 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 2793 2794 mem: 2795 if (adapter->msix) 2796 pci_release_msi(dev); 2797 2798 if (adapter->msix_mem != NULL) 2799 bus_release_resource(dev, SYS_RES_MEMORY, 2800 memrid, adapter->msix_mem); 2801 2802 if (adapter->pci_mem != NULL) 2803 bus_release_resource(dev, SYS_RES_MEMORY, 2804 PCIR_BAR(0), adapter->pci_mem); 2805 2806 return; 2807 } 2808 2809 /********************************************************************* 2810 * 2811 * Setup networking device structure and register an interface. 2812 * 2813 **********************************************************************/ 2814 static int 2815 ixgbe_setup_interface(device_t dev, struct adapter *adapter) 2816 { 2817 struct ifnet *ifp; 2818 2819 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 2820 2821 ifp = adapter->ifp = if_alloc(IFT_ETHER); 2822 if (ifp == NULL) { 2823 device_printf(dev, "can not allocate ifnet structure\n"); 2824 return (-1); 2825 } 2826 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2827 ifp->if_baudrate = IF_Gbps(10); 2828 ifp->if_init = ixgbe_init; 2829 ifp->if_softc = adapter; 2830 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2831 ifp->if_ioctl = ixgbe_ioctl; 2832 #if __FreeBSD_version >= 1100036 2833 if_setgetcounterfn(ifp, ixgbe_get_counter); 2834 #endif 2835 #if __FreeBSD_version >= 1100045 2836 /* TSO parameters */ 2837 ifp->if_hw_tsomax = 65518; 2838 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 2839 ifp->if_hw_tsomaxsegsize = 2048; 2840 #endif 2841 #ifndef IXGBE_LEGACY_TX 2842 ifp->if_transmit = ixgbe_mq_start; 2843 ifp->if_qflush = ixgbe_qflush; 2844 #else 2845 ifp->if_start = ixgbe_start; 2846 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 2847 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2; 2848 IFQ_SET_READY(&ifp->if_snd); 2849 #endif 2850 2851 ether_ifattach(ifp, adapter->hw.mac.addr); 2852 2853 adapter->max_frame_size = 2854 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2855 2856 /* 2857 * Tell the upper layer(s) we support long frames. 2858 */ 2859 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2860 2861 /* Set capability flags */ 2862 ifp->if_capabilities |= IFCAP_RXCSUM 2863 | IFCAP_TXCSUM 2864 | IFCAP_RXCSUM_IPV6 2865 | IFCAP_TXCSUM_IPV6 2866 | IFCAP_TSO4 2867 | IFCAP_TSO6 2868 | IFCAP_LRO 2869 | IFCAP_VLAN_HWTAGGING 2870 | IFCAP_VLAN_HWTSO 2871 | IFCAP_VLAN_HWCSUM 2872 | IFCAP_JUMBO_MTU 2873 | IFCAP_VLAN_MTU 2874 | IFCAP_HWSTATS; 2875 2876 /* Enable the above capabilities by default */ 2877 ifp->if_capenable = ifp->if_capabilities; 2878 2879 /* 2880 ** Don't turn this on by default, if vlans are 2881 ** created on another pseudo device (eg. lagg) 2882 ** then vlan events are not passed thru, breaking 2883 ** operation, but with HW FILTER off it works. If 2884 ** using vlans directly on the ixgbe driver you can 2885 ** enable this and get full hardware tag filtering. 2886 */ 2887 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2888 2889 /* 2890 * Specify the media types supported by this adapter and register 2891 * callbacks to update media and link information 2892 */ 2893 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, 2894 ixgbe_media_status); 2895 2896 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 2897 ixgbe_add_media_types(adapter); 2898 2899 /* Set autoselect media by default */ 2900 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2901 2902 return (0); 2903 } 2904 2905 static void 2906 ixgbe_add_media_types(struct adapter *adapter) 2907 { 2908 struct ixgbe_hw *hw = &adapter->hw; 2909 device_t dev = adapter->dev; 2910 int layer; 2911 2912 layer = adapter->phy_layer; 2913 2914 /* Media types with matching FreeBSD media defines */ 2915 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 2916 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 2917 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 2918 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 2919 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 2920 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2921 2922 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2923 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2924 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 2925 2926 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 2927 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 2928 if (hw->phy.multispeed_fiber) 2929 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL); 2930 } 2931 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 2932 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 2933 if (hw->phy.multispeed_fiber) 2934 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2935 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2936 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2937 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2938 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 2939 2940 #ifdef IFM_ETH_XTYPE 2941 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2942 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 2943 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 2944 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 2945 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2946 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 2947 #else 2948 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 2949 device_printf(dev, "Media supported: 10GbaseKR\n"); 2950 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 2951 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 2952 } 2953 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 2954 device_printf(dev, "Media supported: 10GbaseKX4\n"); 2955 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 2956 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 2957 } 2958 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 2959 device_printf(dev, "Media supported: 1000baseKX\n"); 2960 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 2961 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 2962 } 2963 #endif 2964 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 2965 device_printf(dev, "Media supported: 1000baseBX\n"); 2966 2967 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 2968 ifmedia_add(&adapter->media, 2969 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2970 ifmedia_add(&adapter->media, 2971 IFM_ETHER | IFM_1000_T, 0, NULL); 2972 } 2973 2974 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2975 } 2976 2977 static void 2978 ixgbe_config_link(struct adapter *adapter) 2979 { 2980 struct ixgbe_hw *hw = &adapter->hw; 2981 u32 autoneg, err = 0; 2982 bool sfp, negotiate; 2983 2984 sfp = ixgbe_is_sfp(hw); 2985 2986 if (sfp) { 2987 taskqueue_enqueue(adapter->tq, &adapter->mod_task); 2988 } else { 2989 if (hw->mac.ops.check_link) 2990 err = ixgbe_check_link(hw, &adapter->link_speed, 2991 &adapter->link_up, FALSE); 2992 if (err) 2993 goto out; 2994 autoneg = hw->phy.autoneg_advertised; 2995 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 2996 err = hw->mac.ops.get_link_capabilities(hw, 2997 &autoneg, &negotiate); 2998 if (err) 2999 goto out; 3000 if (hw->mac.ops.setup_link) 3001 err = hw->mac.ops.setup_link(hw, 3002 autoneg, adapter->link_up); 3003 } 3004 out: 3005 return; 3006 } 3007 3008 3009 /********************************************************************* 3010 * 3011 * Enable transmit units. 3012 * 3013 **********************************************************************/ 3014 static void 3015 ixgbe_initialize_transmit_units(struct adapter *adapter) 3016 { 3017 struct tx_ring *txr = adapter->tx_rings; 3018 struct ixgbe_hw *hw = &adapter->hw; 3019 3020 /* Setup the Base and Length of the Tx Descriptor Ring */ 3021 for (int i = 0; i < adapter->num_queues; i++, txr++) { 3022 u64 tdba = txr->txdma.dma_paddr; 3023 u32 txctrl = 0; 3024 int j = txr->me; 3025 3026 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 3027 (tdba & 0x00000000ffffffffULL)); 3028 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 3029 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 3030 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 3031 3032 /* Setup the HW Tx Head and Tail descriptor pointers */ 3033 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 3034 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 3035 3036 /* Cache the tail address */ 3037 txr->tail = IXGBE_TDT(j); 3038 3039 /* Disable Head Writeback */ 3040 /* 3041 * Note: for X550 series devices, these registers are actually 3042 * prefixed with TPH_ isntead of DCA_, but the addresses and 3043 * fields remain the same. 3044 */ 3045 switch (hw->mac.type) { 3046 case ixgbe_mac_82598EB: 3047 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 3048 break; 3049 default: 3050 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 3051 break; 3052 } 3053 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 3054 switch (hw->mac.type) { 3055 case ixgbe_mac_82598EB: 3056 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 3057 break; 3058 default: 3059 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 3060 break; 3061 } 3062 3063 } 3064 3065 if (hw->mac.type != ixgbe_mac_82598EB) { 3066 u32 dmatxctl, rttdcs; 3067 #ifdef PCI_IOV 3068 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter); 3069 #endif 3070 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 3071 dmatxctl |= IXGBE_DMATXCTL_TE; 3072 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 3073 /* Disable arbiter to set MTQC */ 3074 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 3075 rttdcs |= IXGBE_RTTDCS_ARBDIS; 3076 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3077 #ifdef PCI_IOV 3078 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode)); 3079 #else 3080 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 3081 #endif 3082 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 3083 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3084 } 3085 3086 return; 3087 } 3088 3089 static void 3090 ixgbe_initialize_rss_mapping(struct adapter *adapter) 3091 { 3092 struct ixgbe_hw *hw = &adapter->hw; 3093 u32 reta = 0, mrqc, rss_key[10]; 3094 int queue_id, table_size, index_mult; 3095 #ifdef RSS 3096 u32 rss_hash_config; 3097 #endif 3098 #ifdef PCI_IOV 3099 enum ixgbe_iov_mode mode; 3100 #endif 3101 3102 #ifdef RSS 3103 /* Fetch the configured RSS key */ 3104 rss_getkey((uint8_t *) &rss_key); 3105 #else 3106 /* set up random bits */ 3107 arc4rand(&rss_key, sizeof(rss_key), 0); 3108 #endif 3109 3110 /* Set multiplier for RETA setup and table size based on MAC */ 3111 index_mult = 0x1; 3112 table_size = 128; 3113 switch (adapter->hw.mac.type) { 3114 case ixgbe_mac_82598EB: 3115 index_mult = 0x11; 3116 break; 3117 case ixgbe_mac_X550: 3118 case ixgbe_mac_X550EM_x: 3119 table_size = 512; 3120 break; 3121 default: 3122 break; 3123 } 3124 3125 /* Set up the redirection table */ 3126 for (int i = 0, j = 0; i < table_size; i++, j++) { 3127 if (j == adapter->num_queues) j = 0; 3128 #ifdef RSS 3129 /* 3130 * Fetch the RSS bucket id for the given indirection entry. 3131 * Cap it at the number of configured buckets (which is 3132 * num_queues.) 3133 */ 3134 queue_id = rss_get_indirection_to_bucket(i); 3135 queue_id = queue_id % adapter->num_queues; 3136 #else 3137 queue_id = (j * index_mult); 3138 #endif 3139 /* 3140 * The low 8 bits are for hash value (n+0); 3141 * The next 8 bits are for hash value (n+1), etc. 3142 */ 3143 reta = reta >> 8; 3144 reta = reta | ( ((uint32_t) queue_id) << 24); 3145 if ((i & 3) == 3) { 3146 if (i < 128) 3147 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3148 else 3149 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); 3150 reta = 0; 3151 } 3152 } 3153 3154 /* Now fill our hash function seeds */ 3155 for (int i = 0; i < 10; i++) 3156 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 3157 3158 /* Perform hash on these packet types */ 3159 #ifdef RSS 3160 mrqc = IXGBE_MRQC_RSSEN; 3161 rss_hash_config = rss_gethashconfig(); 3162 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 3163 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 3164 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 3165 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 3166 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 3167 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 3168 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 3169 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 3170 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 3171 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 3172 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 3173 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 3174 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 3175 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 3176 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX) 3177 device_printf(adapter->dev, 3178 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, " 3179 "but not supported\n", __func__); 3180 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 3181 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3182 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 3183 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 3184 #else 3185 /* 3186 * Disable UDP - IP fragments aren't currently being handled 3187 * and so we end up with a mix of 2-tuple and 4-tuple 3188 * traffic. 3189 */ 3190 mrqc = IXGBE_MRQC_RSSEN 3191 | IXGBE_MRQC_RSS_FIELD_IPV4 3192 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 3193 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 3194 | IXGBE_MRQC_RSS_FIELD_IPV6_EX 3195 | IXGBE_MRQC_RSS_FIELD_IPV6 3196 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 3197 ; 3198 #endif /* RSS */ 3199 #ifdef PCI_IOV 3200 mode = ixgbe_get_iov_mode(adapter); 3201 mrqc |= ixgbe_get_mrqc(mode); 3202 #endif 3203 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3204 } 3205 3206 3207 /********************************************************************* 3208 * 3209 * Setup receive registers and features. 3210 * 3211 **********************************************************************/ 3212 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 3213 3214 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 3215 3216 static void 3217 ixgbe_initialize_receive_units(struct adapter *adapter) 3218 { 3219 struct rx_ring *rxr = adapter->rx_rings; 3220 struct ixgbe_hw *hw = &adapter->hw; 3221 struct ifnet *ifp = adapter->ifp; 3222 u32 bufsz, fctrl, srrctl, rxcsum; 3223 u32 hlreg; 3224 3225 /* 3226 * Make sure receives are disabled while 3227 * setting up the descriptor ring 3228 */ 3229 ixgbe_disable_rx(hw); 3230 3231 /* Enable broadcasts */ 3232 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3233 fctrl |= IXGBE_FCTRL_BAM; 3234 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3235 fctrl |= IXGBE_FCTRL_DPF; 3236 fctrl |= IXGBE_FCTRL_PMCF; 3237 } 3238 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3239 3240 /* Set for Jumbo Frames? */ 3241 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3242 if (ifp->if_mtu > ETHERMTU) 3243 hlreg |= IXGBE_HLREG0_JUMBOEN; 3244 else 3245 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 3246 #ifdef DEV_NETMAP 3247 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */ 3248 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip) 3249 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 3250 else 3251 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 3252 #endif /* DEV_NETMAP */ 3253 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 3254 3255 bufsz = (adapter->rx_mbuf_sz + 3256 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3257 3258 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 3259 u64 rdba = rxr->rxdma.dma_paddr; 3260 int j = rxr->me; 3261 3262 /* Setup the Base and Length of the Rx Descriptor Ring */ 3263 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 3264 (rdba & 0x00000000ffffffffULL)); 3265 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 3266 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 3267 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 3268 3269 /* Set up the SRRCTL register */ 3270 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 3271 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 3272 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 3273 srrctl |= bufsz; 3274 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 3275 3276 /* 3277 * Set DROP_EN iff we have no flow control and >1 queue. 3278 * Note that srrctl was cleared shortly before during reset, 3279 * so we do not need to clear the bit, but do it just in case 3280 * this code is moved elsewhere. 3281 */ 3282 if (adapter->num_queues > 1 && 3283 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 3284 srrctl |= IXGBE_SRRCTL_DROP_EN; 3285 } else { 3286 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3287 } 3288 3289 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 3290 3291 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 3292 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 3293 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 3294 3295 /* Set the driver rx tail address */ 3296 rxr->tail = IXGBE_RDT(rxr->me); 3297 } 3298 3299 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 3300 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 3301 IXGBE_PSRTYPE_UDPHDR | 3302 IXGBE_PSRTYPE_IPV4HDR | 3303 IXGBE_PSRTYPE_IPV6HDR; 3304 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 3305 } 3306 3307 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3308 3309 ixgbe_initialize_rss_mapping(adapter); 3310 3311 if (adapter->num_queues > 1) { 3312 /* RSS and RX IPP Checksum are mutually exclusive */ 3313 rxcsum |= IXGBE_RXCSUM_PCSD; 3314 } 3315 3316 if (ifp->if_capenable & IFCAP_RXCSUM) 3317 rxcsum |= IXGBE_RXCSUM_PCSD; 3318 3319 /* This is useful for calculating UDP/IP fragment checksums */ 3320 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 3321 rxcsum |= IXGBE_RXCSUM_IPPCSE; 3322 3323 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3324 3325 return; 3326 } 3327 3328 3329 /* 3330 ** This routine is run via an vlan config EVENT, 3331 ** it enables us to use the HW Filter table since 3332 ** we can get the vlan id. This just creates the 3333 ** entry in the soft version of the VFTA, init will 3334 ** repopulate the real table. 3335 */ 3336 static void 3337 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3338 { 3339 struct adapter *adapter = ifp->if_softc; 3340 u16 index, bit; 3341 3342 if (ifp->if_softc != arg) /* Not our event */ 3343 return; 3344 3345 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3346 return; 3347 3348 IXGBE_CORE_LOCK(adapter); 3349 index = (vtag >> 5) & 0x7F; 3350 bit = vtag & 0x1F; 3351 adapter->shadow_vfta[index] |= (1 << bit); 3352 ++adapter->num_vlans; 3353 ixgbe_setup_vlan_hw_support(adapter); 3354 IXGBE_CORE_UNLOCK(adapter); 3355 } 3356 3357 /* 3358 ** This routine is run via an vlan 3359 ** unconfig EVENT, remove our entry 3360 ** in the soft vfta. 3361 */ 3362 static void 3363 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3364 { 3365 struct adapter *adapter = ifp->if_softc; 3366 u16 index, bit; 3367 3368 if (ifp->if_softc != arg) 3369 return; 3370 3371 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3372 return; 3373 3374 IXGBE_CORE_LOCK(adapter); 3375 index = (vtag >> 5) & 0x7F; 3376 bit = vtag & 0x1F; 3377 adapter->shadow_vfta[index] &= ~(1 << bit); 3378 --adapter->num_vlans; 3379 /* Re-init to load the changes */ 3380 ixgbe_setup_vlan_hw_support(adapter); 3381 IXGBE_CORE_UNLOCK(adapter); 3382 } 3383 3384 static void 3385 ixgbe_setup_vlan_hw_support(struct adapter *adapter) 3386 { 3387 struct ifnet *ifp = adapter->ifp; 3388 struct ixgbe_hw *hw = &adapter->hw; 3389 struct rx_ring *rxr; 3390 u32 ctrl; 3391 3392 3393 /* 3394 ** We get here thru init_locked, meaning 3395 ** a soft reset, this has already cleared 3396 ** the VFTA and other state, so if there 3397 ** have been no vlan's registered do nothing. 3398 */ 3399 if (adapter->num_vlans == 0) 3400 return; 3401 3402 /* Setup the queues for vlans */ 3403 for (int i = 0; i < adapter->num_queues; i++) { 3404 rxr = &adapter->rx_rings[i]; 3405 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 3406 if (hw->mac.type != ixgbe_mac_82598EB) { 3407 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3408 ctrl |= IXGBE_RXDCTL_VME; 3409 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 3410 } 3411 rxr->vtag_strip = TRUE; 3412 } 3413 3414 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 3415 return; 3416 /* 3417 ** A soft reset zero's out the VFTA, so 3418 ** we need to repopulate it now. 3419 */ 3420 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) 3421 if (adapter->shadow_vfta[i] != 0) 3422 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 3423 adapter->shadow_vfta[i]); 3424 3425 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3426 /* Enable the Filter Table if enabled */ 3427 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 3428 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 3429 ctrl |= IXGBE_VLNCTRL_VFE; 3430 } 3431 if (hw->mac.type == ixgbe_mac_82598EB) 3432 ctrl |= IXGBE_VLNCTRL_VME; 3433 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 3434 } 3435 3436 static void 3437 ixgbe_enable_intr(struct adapter *adapter) 3438 { 3439 struct ixgbe_hw *hw = &adapter->hw; 3440 struct ix_queue *que = adapter->queues; 3441 u32 mask, fwsm; 3442 3443 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3444 /* Enable Fan Failure detection */ 3445 if (hw->device_id == IXGBE_DEV_ID_82598AT) 3446 mask |= IXGBE_EIMS_GPI_SDP1; 3447 3448 switch (adapter->hw.mac.type) { 3449 case ixgbe_mac_82599EB: 3450 mask |= IXGBE_EIMS_ECC; 3451 /* Temperature sensor on some adapters */ 3452 mask |= IXGBE_EIMS_GPI_SDP0; 3453 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3454 mask |= IXGBE_EIMS_GPI_SDP1; 3455 mask |= IXGBE_EIMS_GPI_SDP2; 3456 #ifdef IXGBE_FDIR 3457 mask |= IXGBE_EIMS_FLOW_DIR; 3458 #endif 3459 #ifdef PCI_IOV 3460 mask |= IXGBE_EIMS_MAILBOX; 3461 #endif 3462 break; 3463 case ixgbe_mac_X540: 3464 /* Detect if Thermal Sensor is enabled */ 3465 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3466 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3467 mask |= IXGBE_EIMS_TS; 3468 mask |= IXGBE_EIMS_ECC; 3469 #ifdef IXGBE_FDIR 3470 mask |= IXGBE_EIMS_FLOW_DIR; 3471 #endif 3472 break; 3473 case ixgbe_mac_X550: 3474 case ixgbe_mac_X550EM_x: 3475 /* MAC thermal sensor is automatically enabled */ 3476 mask |= IXGBE_EIMS_TS; 3477 /* Some devices use SDP0 for important information */ 3478 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3479 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3480 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3481 mask |= IXGBE_EIMS_ECC; 3482 #ifdef IXGBE_FDIR 3483 mask |= IXGBE_EIMS_FLOW_DIR; 3484 #endif 3485 #ifdef PCI_IOV 3486 mask |= IXGBE_EIMS_MAILBOX; 3487 #endif 3488 /* falls through */ 3489 default: 3490 break; 3491 } 3492 3493 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3494 3495 /* With MSI-X we use auto clear */ 3496 if (adapter->msix_mem) { 3497 mask = IXGBE_EIMS_ENABLE_MASK; 3498 /* Don't autoclear Link */ 3499 mask &= ~IXGBE_EIMS_OTHER; 3500 mask &= ~IXGBE_EIMS_LSC; 3501 #ifdef PCI_IOV 3502 mask &= ~IXGBE_EIMS_MAILBOX; 3503 #endif 3504 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3505 } 3506 3507 /* 3508 ** Now enable all queues, this is done separately to 3509 ** allow for handling the extended (beyond 32) MSIX 3510 ** vectors that can be used by 82599 3511 */ 3512 for (int i = 0; i < adapter->num_queues; i++, que++) 3513 ixgbe_enable_queue(adapter, que->msix); 3514 3515 IXGBE_WRITE_FLUSH(hw); 3516 3517 return; 3518 } 3519 3520 static void 3521 ixgbe_disable_intr(struct adapter *adapter) 3522 { 3523 if (adapter->msix_mem) 3524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3525 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3527 } else { 3528 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3529 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3531 } 3532 IXGBE_WRITE_FLUSH(&adapter->hw); 3533 return; 3534 } 3535 3536 /* 3537 ** Get the width and transaction speed of 3538 ** the slot this adapter is plugged into. 3539 */ 3540 static void 3541 ixgbe_get_slot_info(struct adapter *adapter) 3542 { 3543 device_t dev = adapter->dev; 3544 struct ixgbe_hw *hw = &adapter->hw; 3545 struct ixgbe_mac_info *mac = &hw->mac; 3546 u16 link; 3547 u32 offset; 3548 3549 /* For most devices simply call the shared code routine */ 3550 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) { 3551 ixgbe_get_bus_info(hw); 3552 /* These devices don't use PCI-E */ 3553 switch (hw->mac.type) { 3554 case ixgbe_mac_X550EM_x: 3555 return; 3556 default: 3557 goto display; 3558 } 3559 } 3560 3561 /* 3562 ** For the Quad port adapter we need to parse back 3563 ** up the PCI tree to find the speed of the expansion 3564 ** slot into which this adapter is plugged. A bit more work. 3565 */ 3566 dev = device_get_parent(device_get_parent(dev)); 3567 #ifdef IXGBE_DEBUG 3568 device_printf(dev, "parent pcib = %x,%x,%x\n", 3569 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 3570 #endif 3571 dev = device_get_parent(device_get_parent(dev)); 3572 #ifdef IXGBE_DEBUG 3573 device_printf(dev, "slot pcib = %x,%x,%x\n", 3574 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 3575 #endif 3576 /* Now get the PCI Express Capabilities offset */ 3577 pci_find_cap(dev, PCIY_EXPRESS, &offset); 3578 /* ...and read the Link Status Register */ 3579 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 3580 switch (link & IXGBE_PCI_LINK_WIDTH) { 3581 case IXGBE_PCI_LINK_WIDTH_1: 3582 hw->bus.width = ixgbe_bus_width_pcie_x1; 3583 break; 3584 case IXGBE_PCI_LINK_WIDTH_2: 3585 hw->bus.width = ixgbe_bus_width_pcie_x2; 3586 break; 3587 case IXGBE_PCI_LINK_WIDTH_4: 3588 hw->bus.width = ixgbe_bus_width_pcie_x4; 3589 break; 3590 case IXGBE_PCI_LINK_WIDTH_8: 3591 hw->bus.width = ixgbe_bus_width_pcie_x8; 3592 break; 3593 default: 3594 hw->bus.width = ixgbe_bus_width_unknown; 3595 break; 3596 } 3597 3598 switch (link & IXGBE_PCI_LINK_SPEED) { 3599 case IXGBE_PCI_LINK_SPEED_2500: 3600 hw->bus.speed = ixgbe_bus_speed_2500; 3601 break; 3602 case IXGBE_PCI_LINK_SPEED_5000: 3603 hw->bus.speed = ixgbe_bus_speed_5000; 3604 break; 3605 case IXGBE_PCI_LINK_SPEED_8000: 3606 hw->bus.speed = ixgbe_bus_speed_8000; 3607 break; 3608 default: 3609 hw->bus.speed = ixgbe_bus_speed_unknown; 3610 break; 3611 } 3612 3613 mac->ops.set_lan_id(hw); 3614 3615 display: 3616 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 3617 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s": 3618 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s": 3619 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"), 3620 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 3621 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 3622 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 3623 ("Unknown")); 3624 3625 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 3626 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 3627 (hw->bus.speed == ixgbe_bus_speed_2500))) { 3628 device_printf(dev, "PCI-Express bandwidth available" 3629 " for this card\n is not sufficient for" 3630 " optimal performance.\n"); 3631 device_printf(dev, "For optimal performance a x8 " 3632 "PCIE, or x4 PCIE Gen2 slot is required.\n"); 3633 } 3634 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 3635 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 3636 (hw->bus.speed < ixgbe_bus_speed_8000))) { 3637 device_printf(dev, "PCI-Express bandwidth available" 3638 " for this card\n is not sufficient for" 3639 " optimal performance.\n"); 3640 device_printf(dev, "For optimal performance a x8 " 3641 "PCIE Gen3 slot is required.\n"); 3642 } 3643 3644 return; 3645 } 3646 3647 3648 /* 3649 ** Setup the correct IVAR register for a particular MSIX interrupt 3650 ** (yes this is all very magic and confusing :) 3651 ** - entry is the register array entry 3652 ** - vector is the MSIX vector for this queue 3653 ** - type is RX/TX/MISC 3654 */ 3655 static void 3656 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3657 { 3658 struct ixgbe_hw *hw = &adapter->hw; 3659 u32 ivar, index; 3660 3661 vector |= IXGBE_IVAR_ALLOC_VAL; 3662 3663 switch (hw->mac.type) { 3664 3665 case ixgbe_mac_82598EB: 3666 if (type == -1) 3667 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3668 else 3669 entry += (type * 64); 3670 index = (entry >> 2) & 0x1F; 3671 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3672 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3673 ivar |= (vector << (8 * (entry & 0x3))); 3674 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3675 break; 3676 3677 case ixgbe_mac_82599EB: 3678 case ixgbe_mac_X540: 3679 case ixgbe_mac_X550: 3680 case ixgbe_mac_X550EM_x: 3681 if (type == -1) { /* MISC IVAR */ 3682 index = (entry & 1) * 8; 3683 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3684 ivar &= ~(0xFF << index); 3685 ivar |= (vector << index); 3686 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3687 } else { /* RX/TX IVARS */ 3688 index = (16 * (entry & 1)) + (8 * type); 3689 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3690 ivar &= ~(0xFF << index); 3691 ivar |= (vector << index); 3692 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3693 } 3694 3695 default: 3696 break; 3697 } 3698 } 3699 3700 static void 3701 ixgbe_configure_ivars(struct adapter *adapter) 3702 { 3703 struct ix_queue *que = adapter->queues; 3704 u32 newitr; 3705 3706 if (ixgbe_max_interrupt_rate > 0) 3707 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3708 else { 3709 /* 3710 ** Disable DMA coalescing if interrupt moderation is 3711 ** disabled. 3712 */ 3713 adapter->dmac = 0; 3714 newitr = 0; 3715 } 3716 3717 for (int i = 0; i < adapter->num_queues; i++, que++) { 3718 struct rx_ring *rxr = &adapter->rx_rings[i]; 3719 struct tx_ring *txr = &adapter->tx_rings[i]; 3720 /* First the RX queue entry */ 3721 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0); 3722 /* ... and the TX */ 3723 ixgbe_set_ivar(adapter, txr->me, que->msix, 1); 3724 /* Set an Initial EITR value */ 3725 IXGBE_WRITE_REG(&adapter->hw, 3726 IXGBE_EITR(que->msix), newitr); 3727 } 3728 3729 /* For the Link interrupt */ 3730 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3731 } 3732 3733 /* 3734 ** ixgbe_sfp_probe - called in the local timer to 3735 ** determine if a port had optics inserted. 3736 */ 3737 static bool 3738 ixgbe_sfp_probe(struct adapter *adapter) 3739 { 3740 struct ixgbe_hw *hw = &adapter->hw; 3741 device_t dev = adapter->dev; 3742 bool result = FALSE; 3743 3744 if ((hw->phy.type == ixgbe_phy_nl) && 3745 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3746 s32 ret = hw->phy.ops.identify_sfp(hw); 3747 if (ret) 3748 goto out; 3749 ret = hw->phy.ops.reset(hw); 3750 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3751 device_printf(dev, "Unsupported SFP+ module detected!"); 3752 device_printf(dev, "Reload driver with supported module.\n"); 3753 adapter->sfp_probe = FALSE; 3754 goto out; 3755 } else 3756 device_printf(dev, "SFP+ module detected!\n"); 3757 /* We now have supported optics */ 3758 adapter->sfp_probe = FALSE; 3759 /* Set the optics type so system reports correctly */ 3760 ixgbe_setup_optics(adapter); 3761 result = TRUE; 3762 } 3763 out: 3764 return (result); 3765 } 3766 3767 /* 3768 ** Tasklet handler for MSIX Link interrupts 3769 ** - do outside interrupt since it might sleep 3770 */ 3771 static void 3772 ixgbe_handle_link(void *context, int pending) 3773 { 3774 struct adapter *adapter = context; 3775 struct ixgbe_hw *hw = &adapter->hw; 3776 3777 ixgbe_check_link(hw, 3778 &adapter->link_speed, &adapter->link_up, 0); 3779 ixgbe_update_link_status(adapter); 3780 3781 /* Re-enable link interrupts */ 3782 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC); 3783 } 3784 3785 /* 3786 ** Tasklet for handling SFP module interrupts 3787 */ 3788 static void 3789 ixgbe_handle_mod(void *context, int pending) 3790 { 3791 struct adapter *adapter = context; 3792 struct ixgbe_hw *hw = &adapter->hw; 3793 enum ixgbe_phy_type orig_type = hw->phy.type; 3794 device_t dev = adapter->dev; 3795 u32 err; 3796 3797 IXGBE_CORE_LOCK(adapter); 3798 3799 /* Check to see if the PHY type changed */ 3800 if (hw->phy.ops.identify) { 3801 hw->phy.type = ixgbe_phy_unknown; 3802 hw->phy.ops.identify(hw); 3803 } 3804 3805 if (hw->phy.type != orig_type) { 3806 device_printf(dev, "Detected phy_type %d\n", hw->phy.type); 3807 3808 if (hw->phy.type == ixgbe_phy_none) { 3809 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 3810 goto out; 3811 } 3812 3813 /* Try to do the initialization that was skipped before */ 3814 if (hw->phy.ops.init) 3815 hw->phy.ops.init(hw); 3816 if (hw->phy.ops.reset) 3817 hw->phy.ops.reset(hw); 3818 } 3819 3820 err = hw->phy.ops.identify_sfp(hw); 3821 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3822 device_printf(dev, 3823 "Unsupported SFP+ module type was detected.\n"); 3824 goto out; 3825 } 3826 3827 err = hw->mac.ops.setup_sfp(hw); 3828 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3829 device_printf(dev, 3830 "Setup failure - unsupported SFP+ module type.\n"); 3831 goto out; 3832 } 3833 if (hw->phy.multispeed_fiber) 3834 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 3835 out: 3836 /* Update media type */ 3837 switch (hw->mac.ops.get_media_type(hw)) { 3838 case ixgbe_media_type_fiber: 3839 adapter->optics = IFM_10G_SR; 3840 break; 3841 case ixgbe_media_type_copper: 3842 adapter->optics = IFM_10G_TWINAX; 3843 break; 3844 case ixgbe_media_type_cx4: 3845 adapter->optics = IFM_10G_CX4; 3846 break; 3847 default: 3848 adapter->optics = 0; 3849 break; 3850 } 3851 3852 IXGBE_CORE_UNLOCK(adapter); 3853 return; 3854 } 3855 3856 3857 /* 3858 ** Tasklet for handling MSF (multispeed fiber) interrupts 3859 */ 3860 static void 3861 ixgbe_handle_msf(void *context, int pending) 3862 { 3863 struct adapter *adapter = context; 3864 struct ixgbe_hw *hw = &adapter->hw; 3865 u32 autoneg; 3866 bool negotiate; 3867 3868 IXGBE_CORE_LOCK(adapter); 3869 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3870 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 3871 3872 autoneg = hw->phy.autoneg_advertised; 3873 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3874 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3875 if (hw->mac.ops.setup_link) 3876 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3877 3878 /* Adjust media types shown in ifconfig */ 3879 ifmedia_removeall(&adapter->media); 3880 ixgbe_add_media_types(adapter); 3881 IXGBE_CORE_UNLOCK(adapter); 3882 return; 3883 } 3884 3885 /* 3886 ** Tasklet for handling interrupts from an external PHY 3887 */ 3888 static void 3889 ixgbe_handle_phy(void *context, int pending) 3890 { 3891 struct adapter *adapter = context; 3892 struct ixgbe_hw *hw = &adapter->hw; 3893 int error; 3894 3895 error = hw->phy.ops.handle_lasi(hw); 3896 if (error == IXGBE_ERR_OVERTEMP) 3897 device_printf(adapter->dev, 3898 "CRITICAL: EXTERNAL PHY OVER TEMP!! " 3899 " PHY will downshift to lower power state!\n"); 3900 else if (error) 3901 device_printf(adapter->dev, 3902 "Error handling LASI interrupt: %d\n", 3903 error); 3904 return; 3905 } 3906 3907 #ifdef IXGBE_FDIR 3908 /* 3909 ** Tasklet for reinitializing the Flow Director filter table 3910 */ 3911 static void 3912 ixgbe_reinit_fdir(void *context, int pending) 3913 { 3914 struct adapter *adapter = context; 3915 struct ifnet *ifp = adapter->ifp; 3916 3917 if (adapter->fdir_reinit != 1) /* Shouldn't happen */ 3918 return; 3919 ixgbe_reinit_fdir_tables_82599(&adapter->hw); 3920 adapter->fdir_reinit = 0; 3921 /* re-enable flow director interrupts */ 3922 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 3923 /* Restart the interface */ 3924 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3925 return; 3926 } 3927 #endif 3928 3929 /********************************************************************* 3930 * 3931 * Configure DMA Coalescing 3932 * 3933 **********************************************************************/ 3934 static void 3935 ixgbe_config_dmac(struct adapter *adapter) 3936 { 3937 struct ixgbe_hw *hw = &adapter->hw; 3938 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3939 3940 if (hw->mac.type < ixgbe_mac_X550 || 3941 !hw->mac.ops.dmac_config) 3942 return; 3943 3944 if (dcfg->watchdog_timer ^ adapter->dmac || 3945 dcfg->link_speed ^ adapter->link_speed) { 3946 dcfg->watchdog_timer = adapter->dmac; 3947 dcfg->fcoe_en = false; 3948 dcfg->link_speed = adapter->link_speed; 3949 dcfg->num_tcs = 1; 3950 3951 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3952 dcfg->watchdog_timer, dcfg->link_speed); 3953 3954 hw->mac.ops.dmac_config(hw); 3955 } 3956 } 3957 3958 /* 3959 * Checks whether the adapter's ports are capable of 3960 * Wake On LAN by reading the adapter's NVM. 3961 * 3962 * Sets each port's hw->wol_enabled value depending 3963 * on the value read here. 3964 */ 3965 static void 3966 ixgbe_check_wol_support(struct adapter *adapter) 3967 { 3968 struct ixgbe_hw *hw = &adapter->hw; 3969 u16 dev_caps = 0; 3970 3971 /* Find out WoL support for port */ 3972 adapter->wol_support = hw->wol_enabled = 0; 3973 ixgbe_get_device_caps(hw, &dev_caps); 3974 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 3975 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 3976 hw->bus.func == 0)) 3977 adapter->wol_support = hw->wol_enabled = 1; 3978 3979 /* Save initial wake up filter configuration */ 3980 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 3981 3982 return; 3983 } 3984 3985 /* 3986 * Prepare the adapter/port for LPLU and/or WoL 3987 */ 3988 static int 3989 ixgbe_setup_low_power_mode(struct adapter *adapter) 3990 { 3991 struct ixgbe_hw *hw = &adapter->hw; 3992 device_t dev = adapter->dev; 3993 s32 error = 0; 3994 3995 mtx_assert(&adapter->core_mtx, MA_OWNED); 3996 3997 if (!hw->wol_enabled) 3998 ixgbe_set_phy_power(hw, FALSE); 3999 4000 /* Limit power management flow to X550EM baseT */ 4001 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T 4002 && hw->phy.ops.enter_lplu) { 4003 /* Turn off support for APM wakeup. (Using ACPI instead) */ 4004 IXGBE_WRITE_REG(hw, IXGBE_GRC, 4005 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 4006 4007 /* 4008 * Clear Wake Up Status register to prevent any previous wakeup 4009 * events from waking us up immediately after we suspend. 4010 */ 4011 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 4012 4013 /* 4014 * Program the Wakeup Filter Control register with user filter 4015 * settings 4016 */ 4017 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 4018 4019 /* Enable wakeups and power management in Wakeup Control */ 4020 IXGBE_WRITE_REG(hw, IXGBE_WUC, 4021 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 4022 4023 /* X550EM baseT adapters need a special LPLU flow */ 4024 hw->phy.reset_disable = true; 4025 ixgbe_stop(adapter); 4026 error = hw->phy.ops.enter_lplu(hw); 4027 if (error) 4028 device_printf(dev, 4029 "Error entering LPLU: %d\n", error); 4030 hw->phy.reset_disable = false; 4031 } else { 4032 /* Just stop for other adapters */ 4033 ixgbe_stop(adapter); 4034 } 4035 4036 return error; 4037 } 4038 4039 /********************************************************************** 4040 * 4041 * Update the board statistics counters. 4042 * 4043 **********************************************************************/ 4044 static void 4045 ixgbe_update_stats_counters(struct adapter *adapter) 4046 { 4047 struct ixgbe_hw *hw = &adapter->hw; 4048 u32 missed_rx = 0, bprc, lxon, lxoff, total; 4049 u64 total_missed_rx = 0; 4050 4051 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 4052 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 4053 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 4054 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 4055 4056 for (int i = 0; i < 16; i++) { 4057 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 4058 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 4059 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 4060 } 4061 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 4062 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 4063 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 4064 4065 /* Hardware workaround, gprc counts missed packets */ 4066 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 4067 adapter->stats.pf.gprc -= missed_rx; 4068 4069 if (hw->mac.type != ixgbe_mac_82598EB) { 4070 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 4071 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 4072 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 4073 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 4074 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 4075 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 4076 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4077 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 4078 } else { 4079 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 4080 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 4081 /* 82598 only has a counter in the high register */ 4082 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 4083 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 4084 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 4085 } 4086 4087 /* 4088 * Workaround: mprc hardware is incorrectly counting 4089 * broadcasts, so for now we subtract those. 4090 */ 4091 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 4092 adapter->stats.pf.bprc += bprc; 4093 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 4094 if (hw->mac.type == ixgbe_mac_82598EB) 4095 adapter->stats.pf.mprc -= bprc; 4096 4097 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 4098 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 4099 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 4100 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 4101 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 4102 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 4103 4104 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 4105 adapter->stats.pf.lxontxc += lxon; 4106 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 4107 adapter->stats.pf.lxofftxc += lxoff; 4108 total = lxon + lxoff; 4109 4110 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 4111 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 4112 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 4113 adapter->stats.pf.gptc -= total; 4114 adapter->stats.pf.mptc -= total; 4115 adapter->stats.pf.ptc64 -= total; 4116 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN; 4117 4118 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 4119 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 4120 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 4121 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 4122 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 4123 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 4124 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 4125 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 4126 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 4127 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 4128 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 4129 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 4130 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 4131 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 4132 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 4133 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC); 4134 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 4135 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 4136 /* Only read FCOE on 82599 */ 4137 if (hw->mac.type != ixgbe_mac_82598EB) { 4138 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 4139 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 4140 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 4141 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 4142 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 4143 } 4144 4145 /* Fill out the OS statistics structure */ 4146 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc); 4147 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc); 4148 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc); 4149 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc); 4150 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc); 4151 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc); 4152 IXGBE_SET_COLLISIONS(adapter, 0); 4153 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 4154 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs 4155 + adapter->stats.pf.rlec); 4156 } 4157 4158 #if __FreeBSD_version >= 1100036 4159 static uint64_t 4160 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt) 4161 { 4162 struct adapter *adapter; 4163 struct tx_ring *txr; 4164 uint64_t rv; 4165 4166 adapter = if_getsoftc(ifp); 4167 4168 switch (cnt) { 4169 case IFCOUNTER_IPACKETS: 4170 return (adapter->ipackets); 4171 case IFCOUNTER_OPACKETS: 4172 return (adapter->opackets); 4173 case IFCOUNTER_IBYTES: 4174 return (adapter->ibytes); 4175 case IFCOUNTER_OBYTES: 4176 return (adapter->obytes); 4177 case IFCOUNTER_IMCASTS: 4178 return (adapter->imcasts); 4179 case IFCOUNTER_OMCASTS: 4180 return (adapter->omcasts); 4181 case IFCOUNTER_COLLISIONS: 4182 return (0); 4183 case IFCOUNTER_IQDROPS: 4184 return (adapter->iqdrops); 4185 case IFCOUNTER_OQDROPS: 4186 rv = 0; 4187 txr = adapter->tx_rings; 4188 for (int i = 0; i < adapter->num_queues; i++, txr++) 4189 rv += txr->br->br_drops; 4190 return (rv); 4191 case IFCOUNTER_IERRORS: 4192 return (adapter->ierrors); 4193 default: 4194 return (if_get_counter_default(ifp, cnt)); 4195 } 4196 } 4197 #endif 4198 4199 /** ixgbe_sysctl_tdh_handler - Handler function 4200 * Retrieves the TDH value from the hardware 4201 */ 4202 static int 4203 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 4204 { 4205 int error; 4206 4207 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 4208 if (!txr) return 0; 4209 4210 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 4211 error = sysctl_handle_int(oidp, &val, 0, req); 4212 if (error || !req->newptr) 4213 return error; 4214 return 0; 4215 } 4216 4217 /** ixgbe_sysctl_tdt_handler - Handler function 4218 * Retrieves the TDT value from the hardware 4219 */ 4220 static int 4221 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 4222 { 4223 int error; 4224 4225 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 4226 if (!txr) return 0; 4227 4228 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 4229 error = sysctl_handle_int(oidp, &val, 0, req); 4230 if (error || !req->newptr) 4231 return error; 4232 return 0; 4233 } 4234 4235 /** ixgbe_sysctl_rdh_handler - Handler function 4236 * Retrieves the RDH value from the hardware 4237 */ 4238 static int 4239 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 4240 { 4241 int error; 4242 4243 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 4244 if (!rxr) return 0; 4245 4246 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 4247 error = sysctl_handle_int(oidp, &val, 0, req); 4248 if (error || !req->newptr) 4249 return error; 4250 return 0; 4251 } 4252 4253 /** ixgbe_sysctl_rdt_handler - Handler function 4254 * Retrieves the RDT value from the hardware 4255 */ 4256 static int 4257 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 4258 { 4259 int error; 4260 4261 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 4262 if (!rxr) return 0; 4263 4264 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 4265 error = sysctl_handle_int(oidp, &val, 0, req); 4266 if (error || !req->newptr) 4267 return error; 4268 return 0; 4269 } 4270 4271 static int 4272 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 4273 { 4274 int error; 4275 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1); 4276 unsigned int reg, usec, rate; 4277 4278 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 4279 usec = ((reg & 0x0FF8) >> 3); 4280 if (usec > 0) 4281 rate = 500000 / usec; 4282 else 4283 rate = 0; 4284 error = sysctl_handle_int(oidp, &rate, 0, req); 4285 if (error || !req->newptr) 4286 return error; 4287 reg &= ~0xfff; /* default, no limitation */ 4288 ixgbe_max_interrupt_rate = 0; 4289 if (rate > 0 && rate < 500000) { 4290 if (rate < 1000) 4291 rate = 1000; 4292 ixgbe_max_interrupt_rate = rate; 4293 reg |= ((4000000/rate) & 0xff8 ); 4294 } 4295 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 4296 return 0; 4297 } 4298 4299 static void 4300 ixgbe_add_device_sysctls(struct adapter *adapter) 4301 { 4302 device_t dev = adapter->dev; 4303 struct ixgbe_hw *hw = &adapter->hw; 4304 struct sysctl_oid_list *child; 4305 struct sysctl_ctx_list *ctx; 4306 4307 ctx = device_get_sysctl_ctx(dev); 4308 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4309 4310 /* Sysctls for all devices */ 4311 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", 4312 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4313 ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC); 4314 4315 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", 4316 CTLFLAG_RW, 4317 &ixgbe_enable_aim, 1, "Interrupt Moderation"); 4318 4319 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed", 4320 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4321 ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED); 4322 4323 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test", 4324 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4325 ixgbe_sysctl_thermal_test, "I", "Thermal Test"); 4326 4327 #ifdef IXGBE_DEBUG 4328 /* testing sysctls (for all devices) */ 4329 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state", 4330 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4331 ixgbe_sysctl_power_state, "I", "PCI Power State"); 4332 4333 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config", 4334 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, 4335 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 4336 #endif 4337 /* for X550 series devices */ 4338 if (hw->mac.type >= ixgbe_mac_X550) 4339 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac", 4340 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4341 ixgbe_sysctl_dmac, "I", "DMA Coalesce"); 4342 4343 /* for X552 backplane devices */ 4344 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { 4345 struct sysctl_oid *eee_node; 4346 struct sysctl_oid_list *eee_list; 4347 4348 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee", 4349 CTLFLAG_RD, NULL, 4350 "Energy Efficient Ethernet sysctls"); 4351 eee_list = SYSCTL_CHILDREN(eee_node); 4352 4353 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable", 4354 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4355 ixgbe_sysctl_eee_enable, "I", 4356 "Enable or Disable EEE"); 4357 4358 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated", 4359 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4360 ixgbe_sysctl_eee_negotiated, "I", 4361 "EEE negotiated on link"); 4362 4363 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status", 4364 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4365 ixgbe_sysctl_eee_tx_lpi_status, "I", 4366 "Whether or not TX link is in LPI state"); 4367 4368 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status", 4369 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4370 ixgbe_sysctl_eee_rx_lpi_status, "I", 4371 "Whether or not RX link is in LPI state"); 4372 4373 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay", 4374 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4375 ixgbe_sysctl_eee_tx_lpi_delay, "I", 4376 "TX LPI entry delay in microseconds"); 4377 } 4378 4379 /* for WoL-capable devices */ 4380 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 4381 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable", 4382 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4383 ixgbe_sysctl_wol_enable, "I", 4384 "Enable/Disable Wake on LAN"); 4385 4386 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc", 4387 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4388 ixgbe_sysctl_wufc, "I", 4389 "Enable/Disable Wake Up Filters"); 4390 } 4391 4392 /* for X552/X557-AT devices */ 4393 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 4394 struct sysctl_oid *phy_node; 4395 struct sysctl_oid_list *phy_list; 4396 4397 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy", 4398 CTLFLAG_RD, NULL, 4399 "External PHY sysctls"); 4400 phy_list = SYSCTL_CHILDREN(phy_node); 4401 4402 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp", 4403 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4404 ixgbe_sysctl_phy_temp, "I", 4405 "Current External PHY Temperature (Celsius)"); 4406 4407 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred", 4408 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4409 ixgbe_sysctl_phy_overtemp_occurred, "I", 4410 "External PHY High Temperature Event Occurred"); 4411 } 4412 } 4413 4414 /* 4415 * Add sysctl variables, one per statistic, to the system. 4416 */ 4417 static void 4418 ixgbe_add_hw_stats(struct adapter *adapter) 4419 { 4420 device_t dev = adapter->dev; 4421 4422 struct tx_ring *txr = adapter->tx_rings; 4423 struct rx_ring *rxr = adapter->rx_rings; 4424 4425 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4426 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 4427 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 4428 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 4429 4430 struct sysctl_oid *stat_node, *queue_node; 4431 struct sysctl_oid_list *stat_list, *queue_list; 4432 4433 #define QUEUE_NAME_LEN 32 4434 char namebuf[QUEUE_NAME_LEN]; 4435 4436 /* Driver Statistics */ 4437 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 4438 CTLFLAG_RD, &adapter->dropped_pkts, 4439 "Driver dropped packets"); 4440 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 4441 CTLFLAG_RD, &adapter->mbuf_defrag_failed, 4442 "m_defrag() failed"); 4443 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 4444 CTLFLAG_RD, &adapter->watchdog_events, 4445 "Watchdog timeouts"); 4446 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 4447 CTLFLAG_RD, &adapter->link_irq, 4448 "Link MSIX IRQ Handled"); 4449 4450 for (int i = 0; i < adapter->num_queues; i++, txr++) { 4451 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4452 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4453 CTLFLAG_RD, NULL, "Queue Name"); 4454 queue_list = SYSCTL_CHILDREN(queue_node); 4455 4456 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 4457 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i], 4458 sizeof(&adapter->queues[i]), 4459 ixgbe_sysctl_interrupt_rate_handler, "IU", 4460 "Interrupt Rate"); 4461 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 4462 CTLFLAG_RD, &(adapter->queues[i].irqs), 4463 "irqs on this queue"); 4464 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 4465 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 4466 ixgbe_sysctl_tdh_handler, "IU", 4467 "Transmit Descriptor Head"); 4468 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 4469 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 4470 ixgbe_sysctl_tdt_handler, "IU", 4471 "Transmit Descriptor Tail"); 4472 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx", 4473 CTLFLAG_RD, &txr->tso_tx, 4474 "TSO"); 4475 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup", 4476 CTLFLAG_RD, &txr->no_tx_dma_setup, 4477 "Driver tx dma failure in xmit"); 4478 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 4479 CTLFLAG_RD, &txr->no_desc_avail, 4480 "Queue No Descriptor Available"); 4481 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 4482 CTLFLAG_RD, &txr->total_packets, 4483 "Queue Packets Transmitted"); 4484 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops", 4485 CTLFLAG_RD, &txr->br->br_drops, 4486 "Packets dropped in buf_ring"); 4487 } 4488 4489 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 4490 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4491 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4492 CTLFLAG_RD, NULL, "Queue Name"); 4493 queue_list = SYSCTL_CHILDREN(queue_node); 4494 4495 struct lro_ctrl *lro = &rxr->lro; 4496 4497 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4498 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4499 CTLFLAG_RD, NULL, "Queue Name"); 4500 queue_list = SYSCTL_CHILDREN(queue_node); 4501 4502 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 4503 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 4504 ixgbe_sysctl_rdh_handler, "IU", 4505 "Receive Descriptor Head"); 4506 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 4507 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 4508 ixgbe_sysctl_rdt_handler, "IU", 4509 "Receive Descriptor Tail"); 4510 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 4511 CTLFLAG_RD, &rxr->rx_packets, 4512 "Queue Packets Received"); 4513 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 4514 CTLFLAG_RD, &rxr->rx_bytes, 4515 "Queue Bytes Received"); 4516 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 4517 CTLFLAG_RD, &rxr->rx_copies, 4518 "Copied RX Frames"); 4519 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued", 4520 CTLFLAG_RD, &lro->lro_queued, 0, 4521 "LRO Queued"); 4522 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed", 4523 CTLFLAG_RD, &lro->lro_flushed, 0, 4524 "LRO Flushed"); 4525 } 4526 4527 /* MAC stats get the own sub node */ 4528 4529 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 4530 CTLFLAG_RD, NULL, "MAC Statistics"); 4531 stat_list = SYSCTL_CHILDREN(stat_node); 4532 4533 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 4534 CTLFLAG_RD, &stats->crcerrs, 4535 "CRC Errors"); 4536 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 4537 CTLFLAG_RD, &stats->illerrc, 4538 "Illegal Byte Errors"); 4539 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 4540 CTLFLAG_RD, &stats->errbc, 4541 "Byte Errors"); 4542 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 4543 CTLFLAG_RD, &stats->mspdc, 4544 "MAC Short Packets Discarded"); 4545 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 4546 CTLFLAG_RD, &stats->mlfc, 4547 "MAC Local Faults"); 4548 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 4549 CTLFLAG_RD, &stats->mrfc, 4550 "MAC Remote Faults"); 4551 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 4552 CTLFLAG_RD, &stats->rlec, 4553 "Receive Length Errors"); 4554 4555 /* Flow Control stats */ 4556 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 4557 CTLFLAG_RD, &stats->lxontxc, 4558 "Link XON Transmitted"); 4559 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 4560 CTLFLAG_RD, &stats->lxonrxc, 4561 "Link XON Received"); 4562 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 4563 CTLFLAG_RD, &stats->lxofftxc, 4564 "Link XOFF Transmitted"); 4565 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 4566 CTLFLAG_RD, &stats->lxoffrxc, 4567 "Link XOFF Received"); 4568 4569 /* Packet Reception Stats */ 4570 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 4571 CTLFLAG_RD, &stats->tor, 4572 "Total Octets Received"); 4573 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 4574 CTLFLAG_RD, &stats->gorc, 4575 "Good Octets Received"); 4576 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 4577 CTLFLAG_RD, &stats->tpr, 4578 "Total Packets Received"); 4579 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 4580 CTLFLAG_RD, &stats->gprc, 4581 "Good Packets Received"); 4582 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 4583 CTLFLAG_RD, &stats->mprc, 4584 "Multicast Packets Received"); 4585 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 4586 CTLFLAG_RD, &stats->bprc, 4587 "Broadcast Packets Received"); 4588 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 4589 CTLFLAG_RD, &stats->prc64, 4590 "64 byte frames received "); 4591 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 4592 CTLFLAG_RD, &stats->prc127, 4593 "65-127 byte frames received"); 4594 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 4595 CTLFLAG_RD, &stats->prc255, 4596 "128-255 byte frames received"); 4597 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 4598 CTLFLAG_RD, &stats->prc511, 4599 "256-511 byte frames received"); 4600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 4601 CTLFLAG_RD, &stats->prc1023, 4602 "512-1023 byte frames received"); 4603 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 4604 CTLFLAG_RD, &stats->prc1522, 4605 "1023-1522 byte frames received"); 4606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 4607 CTLFLAG_RD, &stats->ruc, 4608 "Receive Undersized"); 4609 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 4610 CTLFLAG_RD, &stats->rfc, 4611 "Fragmented Packets Received "); 4612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 4613 CTLFLAG_RD, &stats->roc, 4614 "Oversized Packets Received"); 4615 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 4616 CTLFLAG_RD, &stats->rjc, 4617 "Received Jabber"); 4618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 4619 CTLFLAG_RD, &stats->mngprc, 4620 "Management Packets Received"); 4621 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 4622 CTLFLAG_RD, &stats->mngptc, 4623 "Management Packets Dropped"); 4624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 4625 CTLFLAG_RD, &stats->xec, 4626 "Checksum Errors"); 4627 4628 /* Packet Transmission Stats */ 4629 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 4630 CTLFLAG_RD, &stats->gotc, 4631 "Good Octets Transmitted"); 4632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 4633 CTLFLAG_RD, &stats->tpt, 4634 "Total Packets Transmitted"); 4635 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 4636 CTLFLAG_RD, &stats->gptc, 4637 "Good Packets Transmitted"); 4638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 4639 CTLFLAG_RD, &stats->bptc, 4640 "Broadcast Packets Transmitted"); 4641 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 4642 CTLFLAG_RD, &stats->mptc, 4643 "Multicast Packets Transmitted"); 4644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 4645 CTLFLAG_RD, &stats->mngptc, 4646 "Management Packets Transmitted"); 4647 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 4648 CTLFLAG_RD, &stats->ptc64, 4649 "64 byte frames transmitted "); 4650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 4651 CTLFLAG_RD, &stats->ptc127, 4652 "65-127 byte frames transmitted"); 4653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 4654 CTLFLAG_RD, &stats->ptc255, 4655 "128-255 byte frames transmitted"); 4656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 4657 CTLFLAG_RD, &stats->ptc511, 4658 "256-511 byte frames transmitted"); 4659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 4660 CTLFLAG_RD, &stats->ptc1023, 4661 "512-1023 byte frames transmitted"); 4662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 4663 CTLFLAG_RD, &stats->ptc1522, 4664 "1024-1522 byte frames transmitted"); 4665 } 4666 4667 static void 4668 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name, 4669 const char *description, int *limit, int value) 4670 { 4671 *limit = value; 4672 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 4673 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4674 OID_AUTO, name, CTLFLAG_RW, limit, value, description); 4675 } 4676 4677 /* 4678 ** Set flow control using sysctl: 4679 ** Flow control values: 4680 ** 0 - off 4681 ** 1 - rx pause 4682 ** 2 - tx pause 4683 ** 3 - full 4684 */ 4685 static int 4686 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 4687 { 4688 int error, fc; 4689 struct adapter *adapter; 4690 4691 adapter = (struct adapter *) arg1; 4692 fc = adapter->fc; 4693 4694 error = sysctl_handle_int(oidp, &fc, 0, req); 4695 if ((error) || (req->newptr == NULL)) 4696 return (error); 4697 4698 /* Don't bother if it's not changed */ 4699 if (adapter->fc == fc) 4700 return (0); 4701 4702 return ixgbe_set_flowcntl(adapter, fc); 4703 } 4704 4705 4706 static int 4707 ixgbe_set_flowcntl(struct adapter *adapter, int fc) 4708 { 4709 4710 switch (fc) { 4711 case ixgbe_fc_rx_pause: 4712 case ixgbe_fc_tx_pause: 4713 case ixgbe_fc_full: 4714 adapter->hw.fc.requested_mode = adapter->fc; 4715 if (adapter->num_queues > 1) 4716 ixgbe_disable_rx_drop(adapter); 4717 break; 4718 case ixgbe_fc_none: 4719 adapter->hw.fc.requested_mode = ixgbe_fc_none; 4720 if (adapter->num_queues > 1) 4721 ixgbe_enable_rx_drop(adapter); 4722 break; 4723 default: 4724 return (EINVAL); 4725 } 4726 adapter->fc = fc; 4727 /* Don't autoneg if forcing a value */ 4728 adapter->hw.fc.disable_fc_autoneg = TRUE; 4729 ixgbe_fc_enable(&adapter->hw); 4730 return (0); 4731 } 4732 4733 /* 4734 ** Control advertised link speed: 4735 ** Flags: 4736 ** 0x1 - advertise 100 Mb 4737 ** 0x2 - advertise 1G 4738 ** 0x4 - advertise 10G 4739 */ 4740 static int 4741 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4742 { 4743 int error, advertise; 4744 struct adapter *adapter; 4745 4746 adapter = (struct adapter *) arg1; 4747 advertise = adapter->advertise; 4748 4749 error = sysctl_handle_int(oidp, &advertise, 0, req); 4750 if ((error) || (req->newptr == NULL)) 4751 return (error); 4752 4753 return ixgbe_set_advertise(adapter, advertise); 4754 } 4755 4756 static int 4757 ixgbe_set_advertise(struct adapter *adapter, int advertise) 4758 { 4759 device_t dev; 4760 struct ixgbe_hw *hw; 4761 ixgbe_link_speed speed; 4762 4763 /* Checks to validate new value */ 4764 if (adapter->advertise == advertise) /* no change */ 4765 return (0); 4766 4767 hw = &adapter->hw; 4768 dev = adapter->dev; 4769 4770 /* No speed changes for backplane media */ 4771 if (hw->phy.media_type == ixgbe_media_type_backplane) 4772 return (ENODEV); 4773 4774 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4775 (hw->phy.multispeed_fiber))) { 4776 device_printf(dev, 4777 "Advertised speed can only be set on copper or " 4778 "multispeed fiber media types.\n"); 4779 return (EINVAL); 4780 } 4781 4782 if (advertise < 0x1 || advertise > 0x7) { 4783 device_printf(dev, 4784 "Invalid advertised speed; valid modes are 0x1 through 0x7\n"); 4785 return (EINVAL); 4786 } 4787 4788 if ((advertise & 0x1) 4789 && (hw->mac.type != ixgbe_mac_X540) 4790 && (hw->mac.type != ixgbe_mac_X550)) { 4791 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n"); 4792 return (EINVAL); 4793 } 4794 4795 /* Set new value and report new advertised mode */ 4796 speed = 0; 4797 if (advertise & 0x1) 4798 speed |= IXGBE_LINK_SPEED_100_FULL; 4799 if (advertise & 0x2) 4800 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4801 if (advertise & 0x4) 4802 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4803 adapter->advertise = advertise; 4804 4805 hw->mac.autotry_restart = TRUE; 4806 hw->mac.ops.setup_link(hw, speed, TRUE); 4807 4808 return (0); 4809 } 4810 4811 /* 4812 * The following two sysctls are for X552/X557-AT devices; 4813 * they deal with the external PHY used in them. 4814 */ 4815 static int 4816 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4817 { 4818 struct adapter *adapter = (struct adapter *) arg1; 4819 struct ixgbe_hw *hw = &adapter->hw; 4820 u16 reg; 4821 4822 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4823 device_printf(adapter->dev, 4824 "Device has no supported external thermal sensor.\n"); 4825 return (ENODEV); 4826 } 4827 4828 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4829 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 4830 ®)) { 4831 device_printf(adapter->dev, 4832 "Error reading from PHY's current temperature register\n"); 4833 return (EAGAIN); 4834 } 4835 4836 /* Shift temp for output */ 4837 reg = reg >> 8; 4838 4839 return (sysctl_handle_int(oidp, NULL, reg, req)); 4840 } 4841 4842 /* 4843 * Reports whether the current PHY temperature is over 4844 * the overtemp threshold. 4845 * - This is reported directly from the PHY 4846 */ 4847 static int 4848 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4849 { 4850 struct adapter *adapter = (struct adapter *) arg1; 4851 struct ixgbe_hw *hw = &adapter->hw; 4852 u16 reg; 4853 4854 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4855 device_printf(adapter->dev, 4856 "Device has no supported external thermal sensor.\n"); 4857 return (ENODEV); 4858 } 4859 4860 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4861 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 4862 ®)) { 4863 device_printf(adapter->dev, 4864 "Error reading from PHY's temperature status register\n"); 4865 return (EAGAIN); 4866 } 4867 4868 /* Get occurrence bit */ 4869 reg = !!(reg & 0x4000); 4870 return (sysctl_handle_int(oidp, 0, reg, req)); 4871 } 4872 4873 /* 4874 ** Thermal Shutdown Trigger (internal MAC) 4875 ** - Set this to 1 to cause an overtemp event to occur 4876 */ 4877 static int 4878 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS) 4879 { 4880 struct adapter *adapter = (struct adapter *) arg1; 4881 struct ixgbe_hw *hw = &adapter->hw; 4882 int error, fire = 0; 4883 4884 error = sysctl_handle_int(oidp, &fire, 0, req); 4885 if ((error) || (req->newptr == NULL)) 4886 return (error); 4887 4888 if (fire) { 4889 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS); 4890 reg |= IXGBE_EICR_TS; 4891 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg); 4892 } 4893 4894 return (0); 4895 } 4896 4897 /* 4898 ** Manage DMA Coalescing. 4899 ** Control values: 4900 ** 0/1 - off / on (use default value of 1000) 4901 ** 4902 ** Legal timer values are: 4903 ** 50,100,250,500,1000,2000,5000,10000 4904 ** 4905 ** Turning off interrupt moderation will also turn this off. 4906 */ 4907 static int 4908 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4909 { 4910 struct adapter *adapter = (struct adapter *) arg1; 4911 struct ifnet *ifp = adapter->ifp; 4912 int error; 4913 u32 newval; 4914 4915 newval = adapter->dmac; 4916 error = sysctl_handle_int(oidp, &newval, 0, req); 4917 if ((error) || (req->newptr == NULL)) 4918 return (error); 4919 4920 switch (newval) { 4921 case 0: 4922 /* Disabled */ 4923 adapter->dmac = 0; 4924 break; 4925 case 1: 4926 /* Enable and use default */ 4927 adapter->dmac = 1000; 4928 break; 4929 case 50: 4930 case 100: 4931 case 250: 4932 case 500: 4933 case 1000: 4934 case 2000: 4935 case 5000: 4936 case 10000: 4937 /* Legal values - allow */ 4938 adapter->dmac = newval; 4939 break; 4940 default: 4941 /* Do nothing, illegal value */ 4942 return (EINVAL); 4943 } 4944 4945 /* Re-initialize hardware if it's already running */ 4946 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4947 ixgbe_init(adapter); 4948 4949 return (0); 4950 } 4951 4952 #ifdef IXGBE_DEBUG 4953 /** 4954 * Sysctl to test power states 4955 * Values: 4956 * 0 - set device to D0 4957 * 3 - set device to D3 4958 * (none) - get current device power state 4959 */ 4960 static int 4961 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4962 { 4963 struct adapter *adapter = (struct adapter *) arg1; 4964 device_t dev = adapter->dev; 4965 int curr_ps, new_ps, error = 0; 4966 4967 curr_ps = new_ps = pci_get_powerstate(dev); 4968 4969 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4970 if ((error) || (req->newptr == NULL)) 4971 return (error); 4972 4973 if (new_ps == curr_ps) 4974 return (0); 4975 4976 if (new_ps == 3 && curr_ps == 0) 4977 error = DEVICE_SUSPEND(dev); 4978 else if (new_ps == 0 && curr_ps == 3) 4979 error = DEVICE_RESUME(dev); 4980 else 4981 return (EINVAL); 4982 4983 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4984 4985 return (error); 4986 } 4987 #endif 4988 /* 4989 * Sysctl to enable/disable the WoL capability, if supported by the adapter. 4990 * Values: 4991 * 0 - disabled 4992 * 1 - enabled 4993 */ 4994 static int 4995 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4996 { 4997 struct adapter *adapter = (struct adapter *) arg1; 4998 struct ixgbe_hw *hw = &adapter->hw; 4999 int new_wol_enabled; 5000 int error = 0; 5001 5002 new_wol_enabled = hw->wol_enabled; 5003 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 5004 if ((error) || (req->newptr == NULL)) 5005 return (error); 5006 new_wol_enabled = !!(new_wol_enabled); 5007 if (new_wol_enabled == hw->wol_enabled) 5008 return (0); 5009 5010 if (new_wol_enabled > 0 && !adapter->wol_support) 5011 return (ENODEV); 5012 else 5013 hw->wol_enabled = new_wol_enabled; 5014 5015 return (0); 5016 } 5017 5018 /* 5019 * Sysctl to enable/disable the Energy Efficient Ethernet capability, 5020 * if supported by the adapter. 5021 * Values: 5022 * 0 - disabled 5023 * 1 - enabled 5024 */ 5025 static int 5026 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 5027 { 5028 struct adapter *adapter = (struct adapter *) arg1; 5029 struct ixgbe_hw *hw = &adapter->hw; 5030 struct ifnet *ifp = adapter->ifp; 5031 int new_eee_enabled, error = 0; 5032 5033 new_eee_enabled = adapter->eee_enabled; 5034 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req); 5035 if ((error) || (req->newptr == NULL)) 5036 return (error); 5037 new_eee_enabled = !!(new_eee_enabled); 5038 if (new_eee_enabled == adapter->eee_enabled) 5039 return (0); 5040 5041 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee) 5042 return (ENODEV); 5043 else 5044 adapter->eee_enabled = new_eee_enabled; 5045 5046 /* Re-initialize hardware if it's already running */ 5047 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 5048 ixgbe_init(adapter); 5049 5050 return (0); 5051 } 5052 5053 /* 5054 * Read-only sysctl indicating whether EEE support was negotiated 5055 * on the link. 5056 */ 5057 static int 5058 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS) 5059 { 5060 struct adapter *adapter = (struct adapter *) arg1; 5061 struct ixgbe_hw *hw = &adapter->hw; 5062 bool status; 5063 5064 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG); 5065 5066 return (sysctl_handle_int(oidp, 0, status, req)); 5067 } 5068 5069 /* 5070 * Read-only sysctl indicating whether RX Link is in LPI state. 5071 */ 5072 static int 5073 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS) 5074 { 5075 struct adapter *adapter = (struct adapter *) arg1; 5076 struct ixgbe_hw *hw = &adapter->hw; 5077 bool status; 5078 5079 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & 5080 IXGBE_EEE_RX_LPI_STATUS); 5081 5082 return (sysctl_handle_int(oidp, 0, status, req)); 5083 } 5084 5085 /* 5086 * Read-only sysctl indicating whether TX Link is in LPI state. 5087 */ 5088 static int 5089 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS) 5090 { 5091 struct adapter *adapter = (struct adapter *) arg1; 5092 struct ixgbe_hw *hw = &adapter->hw; 5093 bool status; 5094 5095 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & 5096 IXGBE_EEE_TX_LPI_STATUS); 5097 5098 return (sysctl_handle_int(oidp, 0, status, req)); 5099 } 5100 5101 /* 5102 * Read-only sysctl indicating TX Link LPI delay 5103 */ 5104 static int 5105 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS) 5106 { 5107 struct adapter *adapter = (struct adapter *) arg1; 5108 struct ixgbe_hw *hw = &adapter->hw; 5109 u32 reg; 5110 5111 reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU); 5112 5113 return (sysctl_handle_int(oidp, 0, reg >> 26, req)); 5114 } 5115 5116 /* 5117 * Sysctl to enable/disable the types of packets that the 5118 * adapter will wake up on upon receipt. 5119 * WUFC - Wake Up Filter Control 5120 * Flags: 5121 * 0x1 - Link Status Change 5122 * 0x2 - Magic Packet 5123 * 0x4 - Direct Exact 5124 * 0x8 - Directed Multicast 5125 * 0x10 - Broadcast 5126 * 0x20 - ARP/IPv4 Request Packet 5127 * 0x40 - Direct IPv4 Packet 5128 * 0x80 - Direct IPv6 Packet 5129 * 5130 * Setting another flag will cause the sysctl to return an 5131 * error. 5132 */ 5133 static int 5134 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 5135 { 5136 struct adapter *adapter = (struct adapter *) arg1; 5137 int error = 0; 5138 u32 new_wufc; 5139 5140 new_wufc = adapter->wufc; 5141 5142 error = sysctl_handle_int(oidp, &new_wufc, 0, req); 5143 if ((error) || (req->newptr == NULL)) 5144 return (error); 5145 if (new_wufc == adapter->wufc) 5146 return (0); 5147 5148 if (new_wufc & 0xffffff00) 5149 return (EINVAL); 5150 else { 5151 new_wufc &= 0xff; 5152 new_wufc |= (0xffffff & adapter->wufc); 5153 adapter->wufc = new_wufc; 5154 } 5155 5156 return (0); 5157 } 5158 5159 #ifdef IXGBE_DEBUG 5160 static int 5161 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 5162 { 5163 struct adapter *adapter = (struct adapter *)arg1; 5164 struct ixgbe_hw *hw = &adapter->hw; 5165 device_t dev = adapter->dev; 5166 int error = 0, reta_size; 5167 struct sbuf *buf; 5168 u32 reg; 5169 5170 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5171 if (!buf) { 5172 device_printf(dev, "Could not allocate sbuf for output.\n"); 5173 return (ENOMEM); 5174 } 5175 5176 // TODO: use sbufs to make a string to print out 5177 /* Set multiplier for RETA setup and table size based on MAC */ 5178 switch (adapter->hw.mac.type) { 5179 case ixgbe_mac_X550: 5180 case ixgbe_mac_X550EM_x: 5181 reta_size = 128; 5182 break; 5183 default: 5184 reta_size = 32; 5185 break; 5186 } 5187 5188 /* Print out the redirection table */ 5189 sbuf_cat(buf, "\n"); 5190 for (int i = 0; i < reta_size; i++) { 5191 if (i < 32) { 5192 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 5193 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 5194 } else { 5195 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 5196 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 5197 } 5198 } 5199 5200 // TODO: print more config 5201 5202 error = sbuf_finish(buf); 5203 if (error) 5204 device_printf(dev, "Error finishing sbuf: %d\n", error); 5205 5206 sbuf_delete(buf); 5207 return (0); 5208 } 5209 #endif /* IXGBE_DEBUG */ 5210 5211 /* 5212 ** Enable the hardware to drop packets when the buffer is 5213 ** full. This is useful when multiqueue,so that no single 5214 ** queue being full stalls the entire RX engine. We only 5215 ** enable this when Multiqueue AND when Flow Control is 5216 ** disabled. 5217 */ 5218 static void 5219 ixgbe_enable_rx_drop(struct adapter *adapter) 5220 { 5221 struct ixgbe_hw *hw = &adapter->hw; 5222 5223 for (int i = 0; i < adapter->num_queues; i++) { 5224 struct rx_ring *rxr = &adapter->rx_rings[i]; 5225 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5226 srrctl |= IXGBE_SRRCTL_DROP_EN; 5227 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5228 } 5229 #ifdef PCI_IOV 5230 /* enable drop for each vf */ 5231 for (int i = 0; i < adapter->num_vfs; i++) { 5232 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5233 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 5234 IXGBE_QDE_ENABLE)); 5235 } 5236 #endif 5237 } 5238 5239 static void 5240 ixgbe_disable_rx_drop(struct adapter *adapter) 5241 { 5242 struct ixgbe_hw *hw = &adapter->hw; 5243 5244 for (int i = 0; i < adapter->num_queues; i++) { 5245 struct rx_ring *rxr = &adapter->rx_rings[i]; 5246 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5247 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 5248 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5249 } 5250 #ifdef PCI_IOV 5251 /* disable drop for each vf */ 5252 for (int i = 0; i < adapter->num_vfs; i++) { 5253 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5254 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 5255 } 5256 #endif 5257 } 5258 5259 static void 5260 ixgbe_rearm_queues(struct adapter *adapter, u64 queues) 5261 { 5262 u32 mask; 5263 5264 switch (adapter->hw.mac.type) { 5265 case ixgbe_mac_82598EB: 5266 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 5267 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 5268 break; 5269 case ixgbe_mac_82599EB: 5270 case ixgbe_mac_X540: 5271 case ixgbe_mac_X550: 5272 case ixgbe_mac_X550EM_x: 5273 mask = (queues & 0xFFFFFFFF); 5274 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 5275 mask = (queues >> 32); 5276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 5277 break; 5278 default: 5279 break; 5280 } 5281 } 5282 5283 #ifdef PCI_IOV 5284 5285 /* 5286 ** Support functions for SRIOV/VF management 5287 */ 5288 5289 static void 5290 ixgbe_ping_all_vfs(struct adapter *adapter) 5291 { 5292 struct ixgbe_vf *vf; 5293 5294 for (int i = 0; i < adapter->num_vfs; i++) { 5295 vf = &adapter->vfs[i]; 5296 if (vf->flags & IXGBE_VF_ACTIVE) 5297 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); 5298 } 5299 } 5300 5301 5302 static void 5303 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf, 5304 uint16_t tag) 5305 { 5306 struct ixgbe_hw *hw; 5307 uint32_t vmolr, vmvir; 5308 5309 hw = &adapter->hw; 5310 5311 vf->vlan_tag = tag; 5312 5313 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); 5314 5315 /* Do not receive packets that pass inexact filters. */ 5316 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); 5317 5318 /* Disable Multicast Promicuous Mode. */ 5319 vmolr &= ~IXGBE_VMOLR_MPE; 5320 5321 /* Accept broadcasts. */ 5322 vmolr |= IXGBE_VMOLR_BAM; 5323 5324 if (tag == 0) { 5325 /* Accept non-vlan tagged traffic. */ 5326 //vmolr |= IXGBE_VMOLR_AUPE; 5327 5328 /* Allow VM to tag outgoing traffic; no default tag. */ 5329 vmvir = 0; 5330 } else { 5331 /* Require vlan-tagged traffic. */ 5332 vmolr &= ~IXGBE_VMOLR_AUPE; 5333 5334 /* Tag all traffic with provided vlan tag. */ 5335 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); 5336 } 5337 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); 5338 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); 5339 } 5340 5341 5342 static boolean_t 5343 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf) 5344 { 5345 5346 /* 5347 * Frame size compatibility between PF and VF is only a problem on 5348 * 82599-based cards. X540 and later support any combination of jumbo 5349 * frames on PFs and VFs. 5350 */ 5351 if (adapter->hw.mac.type != ixgbe_mac_82599EB) 5352 return (TRUE); 5353 5354 switch (vf->api_ver) { 5355 case IXGBE_API_VER_1_0: 5356 case IXGBE_API_VER_UNKNOWN: 5357 /* 5358 * On legacy (1.0 and older) VF versions, we don't support jumbo 5359 * frames on either the PF or the VF. 5360 */ 5361 if (adapter->max_frame_size > ETHER_MAX_LEN || 5362 vf->max_frame_size > ETHER_MAX_LEN) 5363 return (FALSE); 5364 5365 return (TRUE); 5366 5367 break; 5368 case IXGBE_API_VER_1_1: 5369 default: 5370 /* 5371 * 1.1 or later VF versions always work if they aren't using 5372 * jumbo frames. 5373 */ 5374 if (vf->max_frame_size <= ETHER_MAX_LEN) 5375 return (TRUE); 5376 5377 /* 5378 * Jumbo frames only work with VFs if the PF is also using jumbo 5379 * frames. 5380 */ 5381 if (adapter->max_frame_size <= ETHER_MAX_LEN) 5382 return (TRUE); 5383 5384 return (FALSE); 5385 5386 } 5387 } 5388 5389 5390 static void 5391 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf) 5392 { 5393 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan); 5394 5395 // XXX clear multicast addresses 5396 5397 ixgbe_clear_rar(&adapter->hw, vf->rar_index); 5398 5399 vf->api_ver = IXGBE_API_VER_UNKNOWN; 5400 } 5401 5402 5403 static void 5404 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf) 5405 { 5406 struct ixgbe_hw *hw; 5407 uint32_t vf_index, vfte; 5408 5409 hw = &adapter->hw; 5410 5411 vf_index = IXGBE_VF_INDEX(vf->pool); 5412 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); 5413 vfte |= IXGBE_VF_BIT(vf->pool); 5414 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); 5415 } 5416 5417 5418 static void 5419 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf) 5420 { 5421 struct ixgbe_hw *hw; 5422 uint32_t vf_index, vfre; 5423 5424 hw = &adapter->hw; 5425 5426 vf_index = IXGBE_VF_INDEX(vf->pool); 5427 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); 5428 if (ixgbe_vf_frame_size_compatible(adapter, vf)) 5429 vfre |= IXGBE_VF_BIT(vf->pool); 5430 else 5431 vfre &= ~IXGBE_VF_BIT(vf->pool); 5432 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); 5433 } 5434 5435 5436 static void 5437 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5438 { 5439 struct ixgbe_hw *hw; 5440 uint32_t ack; 5441 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; 5442 5443 hw = &adapter->hw; 5444 5445 ixgbe_process_vf_reset(adapter, vf); 5446 5447 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { 5448 ixgbe_set_rar(&adapter->hw, vf->rar_index, 5449 vf->ether_addr, vf->pool, TRUE); 5450 ack = IXGBE_VT_MSGTYPE_ACK; 5451 } else 5452 ack = IXGBE_VT_MSGTYPE_NACK; 5453 5454 ixgbe_vf_enable_transmit(adapter, vf); 5455 ixgbe_vf_enable_receive(adapter, vf); 5456 5457 vf->flags |= IXGBE_VF_CTS; 5458 5459 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS; 5460 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); 5461 resp[3] = hw->mac.mc_filter_type; 5462 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); 5463 } 5464 5465 5466 static void 5467 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5468 { 5469 uint8_t *mac; 5470 5471 mac = (uint8_t*)&msg[1]; 5472 5473 /* Check that the VF has permission to change the MAC address. */ 5474 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { 5475 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5476 return; 5477 } 5478 5479 if (ixgbe_validate_mac_addr(mac) != 0) { 5480 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5481 return; 5482 } 5483 5484 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); 5485 5486 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, 5487 vf->pool, TRUE); 5488 5489 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5490 } 5491 5492 5493 /* 5494 ** VF multicast addresses are set by using the appropriate bit in 5495 ** 1 of 128 32 bit addresses (4096 possible). 5496 */ 5497 static void 5498 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg) 5499 { 5500 u16 *list = (u16*)&msg[1]; 5501 int entries; 5502 u32 vmolr, vec_bit, vec_reg, mta_reg; 5503 5504 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; 5505 entries = min(entries, IXGBE_MAX_VF_MC); 5506 5507 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool)); 5508 5509 vf->num_mc_hashes = entries; 5510 5511 /* Set the appropriate MTA bit */ 5512 for (int i = 0; i < entries; i++) { 5513 vf->mc_hash[i] = list[i]; 5514 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; 5515 vec_bit = vf->mc_hash[i] & 0x1F; 5516 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg)); 5517 mta_reg |= (1 << vec_bit); 5518 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg); 5519 } 5520 5521 vmolr |= IXGBE_VMOLR_ROMPE; 5522 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr); 5523 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5524 return; 5525 } 5526 5527 5528 static void 5529 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5530 { 5531 struct ixgbe_hw *hw; 5532 int enable; 5533 uint16_t tag; 5534 5535 hw = &adapter->hw; 5536 enable = IXGBE_VT_MSGINFO(msg[0]); 5537 tag = msg[1] & IXGBE_VLVF_VLANID_MASK; 5538 5539 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { 5540 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5541 return; 5542 } 5543 5544 /* It is illegal to enable vlan tag 0. */ 5545 if (tag == 0 && enable != 0){ 5546 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5547 return; 5548 } 5549 5550 ixgbe_set_vfta(hw, tag, vf->pool, enable); 5551 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5552 } 5553 5554 5555 static void 5556 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5557 { 5558 struct ixgbe_hw *hw; 5559 uint32_t vf_max_size, pf_max_size, mhadd; 5560 5561 hw = &adapter->hw; 5562 vf_max_size = msg[1]; 5563 5564 if (vf_max_size < ETHER_CRC_LEN) { 5565 /* We intentionally ACK invalid LPE requests. */ 5566 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5567 return; 5568 } 5569 5570 vf_max_size -= ETHER_CRC_LEN; 5571 5572 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { 5573 /* We intentionally ACK invalid LPE requests. */ 5574 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5575 return; 5576 } 5577 5578 vf->max_frame_size = vf_max_size; 5579 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5580 5581 /* 5582 * We might have to disable reception to this VF if the frame size is 5583 * not compatible with the config on the PF. 5584 */ 5585 ixgbe_vf_enable_receive(adapter, vf); 5586 5587 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 5588 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; 5589 5590 if (pf_max_size < adapter->max_frame_size) { 5591 mhadd &= ~IXGBE_MHADD_MFS_MASK; 5592 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 5593 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 5594 } 5595 5596 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5597 } 5598 5599 5600 static void 5601 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf, 5602 uint32_t *msg) 5603 { 5604 //XXX implement this 5605 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5606 } 5607 5608 5609 static void 5610 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf, 5611 uint32_t *msg) 5612 { 5613 5614 switch (msg[1]) { 5615 case IXGBE_API_VER_1_0: 5616 case IXGBE_API_VER_1_1: 5617 vf->api_ver = msg[1]; 5618 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5619 break; 5620 default: 5621 vf->api_ver = IXGBE_API_VER_UNKNOWN; 5622 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5623 break; 5624 } 5625 } 5626 5627 5628 static void 5629 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, 5630 uint32_t *msg) 5631 { 5632 struct ixgbe_hw *hw; 5633 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; 5634 int num_queues; 5635 5636 hw = &adapter->hw; 5637 5638 /* GET_QUEUES is not supported on pre-1.1 APIs. */ 5639 switch (msg[0]) { 5640 case IXGBE_API_VER_1_0: 5641 case IXGBE_API_VER_UNKNOWN: 5642 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5643 return; 5644 } 5645 5646 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | 5647 IXGBE_VT_MSGTYPE_CTS; 5648 5649 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter)); 5650 resp[IXGBE_VF_TX_QUEUES] = num_queues; 5651 resp[IXGBE_VF_RX_QUEUES] = num_queues; 5652 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); 5653 resp[IXGBE_VF_DEF_QUEUE] = 0; 5654 5655 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); 5656 } 5657 5658 5659 static void 5660 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf) 5661 { 5662 struct ixgbe_hw *hw; 5663 uint32_t msg[IXGBE_VFMAILBOX_SIZE]; 5664 int error; 5665 5666 hw = &adapter->hw; 5667 5668 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); 5669 5670 if (error != 0) 5671 return; 5672 5673 CTR3(KTR_MALLOC, "%s: received msg %x from %d", 5674 adapter->ifp->if_xname, msg[0], vf->pool); 5675 if (msg[0] == IXGBE_VF_RESET) { 5676 ixgbe_vf_reset_msg(adapter, vf, msg); 5677 return; 5678 } 5679 5680 if (!(vf->flags & IXGBE_VF_CTS)) { 5681 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5682 return; 5683 } 5684 5685 switch (msg[0] & IXGBE_VT_MSG_MASK) { 5686 case IXGBE_VF_SET_MAC_ADDR: 5687 ixgbe_vf_set_mac(adapter, vf, msg); 5688 break; 5689 case IXGBE_VF_SET_MULTICAST: 5690 ixgbe_vf_set_mc_addr(adapter, vf, msg); 5691 break; 5692 case IXGBE_VF_SET_VLAN: 5693 ixgbe_vf_set_vlan(adapter, vf, msg); 5694 break; 5695 case IXGBE_VF_SET_LPE: 5696 ixgbe_vf_set_lpe(adapter, vf, msg); 5697 break; 5698 case IXGBE_VF_SET_MACVLAN: 5699 ixgbe_vf_set_macvlan(adapter, vf, msg); 5700 break; 5701 case IXGBE_VF_API_NEGOTIATE: 5702 ixgbe_vf_api_negotiate(adapter, vf, msg); 5703 break; 5704 case IXGBE_VF_GET_QUEUES: 5705 ixgbe_vf_get_queues(adapter, vf, msg); 5706 break; 5707 default: 5708 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5709 } 5710 } 5711 5712 5713 /* 5714 * Tasklet for handling VF -> PF mailbox messages. 5715 */ 5716 static void 5717 ixgbe_handle_mbx(void *context, int pending) 5718 { 5719 struct adapter *adapter; 5720 struct ixgbe_hw *hw; 5721 struct ixgbe_vf *vf; 5722 int i; 5723 5724 adapter = context; 5725 hw = &adapter->hw; 5726 5727 IXGBE_CORE_LOCK(adapter); 5728 for (i = 0; i < adapter->num_vfs; i++) { 5729 vf = &adapter->vfs[i]; 5730 5731 if (vf->flags & IXGBE_VF_ACTIVE) { 5732 if (ixgbe_check_for_rst(hw, vf->pool) == 0) 5733 ixgbe_process_vf_reset(adapter, vf); 5734 5735 if (ixgbe_check_for_msg(hw, vf->pool) == 0) 5736 ixgbe_process_vf_msg(adapter, vf); 5737 5738 if (ixgbe_check_for_ack(hw, vf->pool) == 0) 5739 ixgbe_process_vf_ack(adapter, vf); 5740 } 5741 } 5742 IXGBE_CORE_UNLOCK(adapter); 5743 } 5744 5745 5746 static int 5747 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config) 5748 { 5749 struct adapter *adapter; 5750 enum ixgbe_iov_mode mode; 5751 5752 adapter = device_get_softc(dev); 5753 adapter->num_vfs = num_vfs; 5754 mode = ixgbe_get_iov_mode(adapter); 5755 5756 if (num_vfs > ixgbe_max_vfs(mode)) { 5757 adapter->num_vfs = 0; 5758 return (ENOSPC); 5759 } 5760 5761 IXGBE_CORE_LOCK(adapter); 5762 5763 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE, 5764 M_NOWAIT | M_ZERO); 5765 5766 if (adapter->vfs == NULL) { 5767 adapter->num_vfs = 0; 5768 IXGBE_CORE_UNLOCK(adapter); 5769 return (ENOMEM); 5770 } 5771 5772 ixgbe_init_locked(adapter); 5773 5774 IXGBE_CORE_UNLOCK(adapter); 5775 5776 return (0); 5777 } 5778 5779 5780 static void 5781 ixgbe_uninit_iov(device_t dev) 5782 { 5783 struct ixgbe_hw *hw; 5784 struct adapter *adapter; 5785 uint32_t pf_reg, vf_reg; 5786 5787 adapter = device_get_softc(dev); 5788 hw = &adapter->hw; 5789 5790 IXGBE_CORE_LOCK(adapter); 5791 5792 /* Enable rx/tx for the PF and disable it for all VFs. */ 5793 pf_reg = IXGBE_VF_INDEX(adapter->pool); 5794 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), 5795 IXGBE_VF_BIT(adapter->pool)); 5796 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), 5797 IXGBE_VF_BIT(adapter->pool)); 5798 5799 if (pf_reg == 0) 5800 vf_reg = 1; 5801 else 5802 vf_reg = 0; 5803 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); 5804 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); 5805 5806 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); 5807 5808 free(adapter->vfs, M_IXGBE); 5809 adapter->vfs = NULL; 5810 adapter->num_vfs = 0; 5811 5812 IXGBE_CORE_UNLOCK(adapter); 5813 } 5814 5815 5816 static void 5817 ixgbe_initialize_iov(struct adapter *adapter) 5818 { 5819 struct ixgbe_hw *hw = &adapter->hw; 5820 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; 5821 enum ixgbe_iov_mode mode; 5822 int i; 5823 5824 mode = ixgbe_get_iov_mode(adapter); 5825 if (mode == IXGBE_NO_VM) 5826 return; 5827 5828 IXGBE_CORE_LOCK_ASSERT(adapter); 5829 5830 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 5831 mrqc &= ~IXGBE_MRQC_MRQE_MASK; 5832 5833 switch (mode) { 5834 case IXGBE_64_VM: 5835 mrqc |= IXGBE_MRQC_VMDQRSS64EN; 5836 break; 5837 case IXGBE_32_VM: 5838 mrqc |= IXGBE_MRQC_VMDQRSS32EN; 5839 break; 5840 default: 5841 panic("Unexpected SR-IOV mode %d", mode); 5842 } 5843 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 5844 5845 mtqc = IXGBE_MTQC_VT_ENA; 5846 switch (mode) { 5847 case IXGBE_64_VM: 5848 mtqc |= IXGBE_MTQC_64VF; 5849 break; 5850 case IXGBE_32_VM: 5851 mtqc |= IXGBE_MTQC_32VF; 5852 break; 5853 default: 5854 panic("Unexpected SR-IOV mode %d", mode); 5855 } 5856 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); 5857 5858 5859 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 5860 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; 5861 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; 5862 switch (mode) { 5863 case IXGBE_64_VM: 5864 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; 5865 break; 5866 case IXGBE_32_VM: 5867 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; 5868 break; 5869 default: 5870 panic("Unexpected SR-IOV mode %d", mode); 5871 } 5872 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 5873 5874 5875 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5876 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK; 5877 switch (mode) { 5878 case IXGBE_64_VM: 5879 gpie |= IXGBE_GPIE_VTMODE_64; 5880 break; 5881 case IXGBE_32_VM: 5882 gpie |= IXGBE_GPIE_VTMODE_32; 5883 break; 5884 default: 5885 panic("Unexpected SR-IOV mode %d", mode); 5886 } 5887 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5888 5889 /* Enable rx/tx for the PF. */ 5890 vf_reg = IXGBE_VF_INDEX(adapter->pool); 5891 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 5892 IXGBE_VF_BIT(adapter->pool)); 5893 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 5894 IXGBE_VF_BIT(adapter->pool)); 5895 5896 /* Allow VM-to-VM communication. */ 5897 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 5898 5899 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 5900 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT); 5901 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); 5902 5903 for (i = 0; i < adapter->num_vfs; i++) 5904 ixgbe_init_vf(adapter, &adapter->vfs[i]); 5905 } 5906 5907 5908 /* 5909 ** Check the max frame setting of all active VF's 5910 */ 5911 static void 5912 ixgbe_recalculate_max_frame(struct adapter *adapter) 5913 { 5914 struct ixgbe_vf *vf; 5915 5916 IXGBE_CORE_LOCK_ASSERT(adapter); 5917 5918 for (int i = 0; i < adapter->num_vfs; i++) { 5919 vf = &adapter->vfs[i]; 5920 if (vf->flags & IXGBE_VF_ACTIVE) 5921 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5922 } 5923 } 5924 5925 5926 static void 5927 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf) 5928 { 5929 struct ixgbe_hw *hw; 5930 uint32_t vf_index, pfmbimr; 5931 5932 IXGBE_CORE_LOCK_ASSERT(adapter); 5933 5934 hw = &adapter->hw; 5935 5936 if (!(vf->flags & IXGBE_VF_ACTIVE)) 5937 return; 5938 5939 vf_index = IXGBE_VF_INDEX(vf->pool); 5940 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); 5941 pfmbimr |= IXGBE_VF_BIT(vf->pool); 5942 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); 5943 5944 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag); 5945 5946 // XXX multicast addresses 5947 5948 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { 5949 ixgbe_set_rar(&adapter->hw, vf->rar_index, 5950 vf->ether_addr, vf->pool, TRUE); 5951 } 5952 5953 ixgbe_vf_enable_transmit(adapter, vf); 5954 ixgbe_vf_enable_receive(adapter, vf); 5955 5956 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); 5957 } 5958 5959 static int 5960 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config) 5961 { 5962 struct adapter *adapter; 5963 struct ixgbe_vf *vf; 5964 const void *mac; 5965 5966 adapter = device_get_softc(dev); 5967 5968 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d", 5969 vfnum, adapter->num_vfs)); 5970 5971 IXGBE_CORE_LOCK(adapter); 5972 vf = &adapter->vfs[vfnum]; 5973 vf->pool= vfnum; 5974 5975 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ 5976 vf->rar_index = vfnum + 1; 5977 vf->default_vlan = 0; 5978 vf->max_frame_size = ETHER_MAX_LEN; 5979 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5980 5981 if (nvlist_exists_binary(config, "mac-addr")) { 5982 mac = nvlist_get_binary(config, "mac-addr", NULL); 5983 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); 5984 if (nvlist_get_bool(config, "allow-set-mac")) 5985 vf->flags |= IXGBE_VF_CAP_MAC; 5986 } else 5987 /* 5988 * If the administrator has not specified a MAC address then 5989 * we must allow the VF to choose one. 5990 */ 5991 vf->flags |= IXGBE_VF_CAP_MAC; 5992 5993 vf->flags = IXGBE_VF_ACTIVE; 5994 5995 ixgbe_init_vf(adapter, vf); 5996 IXGBE_CORE_UNLOCK(adapter); 5997 5998 return (0); 5999 } 6000 #endif /* PCI_IOV */ 6001 6002