1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef IXGBE_STANDALONE_BUILD 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_rss.h" 40 #endif 41 42 #include "ixgbe.h" 43 44 #ifdef RSS 45 #include <net/rss_config.h> 46 #include <netinet/in_rss.h> 47 #endif 48 49 /********************************************************************* 50 * Driver version 51 *********************************************************************/ 52 char ixgbe_driver_version[] = "3.1.13-k"; 53 54 55 /********************************************************************* 56 * PCI Device ID Table 57 * 58 * Used by probe to select devices to load on 59 * Last field stores an index into ixgbe_strings 60 * Last entry must be all 0s 61 * 62 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 63 *********************************************************************/ 64 65 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 66 { 67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 99 /* required last entry */ 100 {0, 0, 0, 0, 0} 101 }; 102 103 /********************************************************************* 104 * Table of branding strings 105 *********************************************************************/ 106 107 static char *ixgbe_strings[] = { 108 "Intel(R) PRO/10GbE PCI-Express Network Driver" 109 }; 110 111 /********************************************************************* 112 * Function prototypes 113 *********************************************************************/ 114 static int ixgbe_probe(device_t); 115 static int ixgbe_attach(device_t); 116 static int ixgbe_detach(device_t); 117 static int ixgbe_shutdown(device_t); 118 static int ixgbe_suspend(device_t); 119 static int ixgbe_resume(device_t); 120 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); 121 static void ixgbe_init(void *); 122 static void ixgbe_init_locked(struct adapter *); 123 static void ixgbe_stop(void *); 124 #if __FreeBSD_version >= 1100036 125 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter); 126 #endif 127 static void ixgbe_add_media_types(struct adapter *); 128 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 129 static int ixgbe_media_change(struct ifnet *); 130 static void ixgbe_identify_hardware(struct adapter *); 131 static int ixgbe_allocate_pci_resources(struct adapter *); 132 static void ixgbe_get_slot_info(struct adapter *); 133 static int ixgbe_allocate_msix(struct adapter *); 134 static int ixgbe_allocate_legacy(struct adapter *); 135 static int ixgbe_setup_msix(struct adapter *); 136 static void ixgbe_free_pci_resources(struct adapter *); 137 static void ixgbe_local_timer(void *); 138 static int ixgbe_setup_interface(device_t, struct adapter *); 139 static void ixgbe_config_gpie(struct adapter *); 140 static void ixgbe_config_dmac(struct adapter *); 141 static void ixgbe_config_delay_values(struct adapter *); 142 static void ixgbe_config_link(struct adapter *); 143 static void ixgbe_check_wol_support(struct adapter *); 144 static int ixgbe_setup_low_power_mode(struct adapter *); 145 static void ixgbe_rearm_queues(struct adapter *, u64); 146 147 static void ixgbe_initialize_transmit_units(struct adapter *); 148 static void ixgbe_initialize_receive_units(struct adapter *); 149 static void ixgbe_enable_rx_drop(struct adapter *); 150 static void ixgbe_disable_rx_drop(struct adapter *); 151 static void ixgbe_initialize_rss_mapping(struct adapter *); 152 153 static void ixgbe_enable_intr(struct adapter *); 154 static void ixgbe_disable_intr(struct adapter *); 155 static void ixgbe_update_stats_counters(struct adapter *); 156 static void ixgbe_set_promisc(struct adapter *); 157 static void ixgbe_set_multi(struct adapter *); 158 static void ixgbe_update_link_status(struct adapter *); 159 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 160 static void ixgbe_configure_ivars(struct adapter *); 161 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 162 163 static void ixgbe_setup_vlan_hw_support(struct adapter *); 164 static void ixgbe_register_vlan(void *, struct ifnet *, u16); 165 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16); 166 167 static void ixgbe_add_device_sysctls(struct adapter *); 168 static void ixgbe_add_hw_stats(struct adapter *); 169 static int ixgbe_set_flowcntl(struct adapter *, int); 170 static int ixgbe_set_advertise(struct adapter *, int); 171 172 /* Sysctl handlers */ 173 static void ixgbe_set_sysctl_value(struct adapter *, const char *, 174 const char *, int *, int); 175 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 176 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 177 static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS); 178 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 179 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 180 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 181 #ifdef IXGBE_DEBUG 182 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 183 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 184 #endif 185 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 186 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 187 static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 188 static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS); 192 193 /* Support for pluggable optic modules */ 194 static bool ixgbe_sfp_probe(struct adapter *); 195 static void ixgbe_setup_optics(struct adapter *); 196 197 /* Legacy (single vector interrupt handler */ 198 static void ixgbe_legacy_irq(void *); 199 200 /* The MSI/X Interrupt handlers */ 201 static void ixgbe_msix_que(void *); 202 static void ixgbe_msix_link(void *); 203 204 /* Deferred interrupt tasklets */ 205 static void ixgbe_handle_que(void *, int); 206 static void ixgbe_handle_link(void *, int); 207 static void ixgbe_handle_msf(void *, int); 208 static void ixgbe_handle_mod(void *, int); 209 static void ixgbe_handle_phy(void *, int); 210 211 #ifdef IXGBE_FDIR 212 static void ixgbe_reinit_fdir(void *, int); 213 #endif 214 215 #ifdef PCI_IOV 216 static void ixgbe_ping_all_vfs(struct adapter *); 217 static void ixgbe_handle_mbx(void *, int); 218 static int ixgbe_init_iov(device_t, u16, const nvlist_t *); 219 static void ixgbe_uninit_iov(device_t); 220 static int ixgbe_add_vf(device_t, u16, const nvlist_t *); 221 static void ixgbe_initialize_iov(struct adapter *); 222 static void ixgbe_recalculate_max_frame(struct adapter *); 223 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *); 224 #endif /* PCI_IOV */ 225 226 227 /********************************************************************* 228 * FreeBSD Device Interface Entry Points 229 *********************************************************************/ 230 231 static device_method_t ix_methods[] = { 232 /* Device interface */ 233 DEVMETHOD(device_probe, ixgbe_probe), 234 DEVMETHOD(device_attach, ixgbe_attach), 235 DEVMETHOD(device_detach, ixgbe_detach), 236 DEVMETHOD(device_shutdown, ixgbe_shutdown), 237 DEVMETHOD(device_suspend, ixgbe_suspend), 238 DEVMETHOD(device_resume, ixgbe_resume), 239 #ifdef PCI_IOV 240 DEVMETHOD(pci_iov_init, ixgbe_init_iov), 241 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov), 242 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf), 243 #endif /* PCI_IOV */ 244 DEVMETHOD_END 245 }; 246 247 static driver_t ix_driver = { 248 "ix", ix_methods, sizeof(struct adapter), 249 }; 250 251 devclass_t ix_devclass; 252 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 253 254 MODULE_DEPEND(ix, pci, 1, 1, 1); 255 MODULE_DEPEND(ix, ether, 1, 1, 1); 256 #ifdef DEV_NETMAP 257 MODULE_DEPEND(ix, netmap, 1, 1, 1); 258 #endif /* DEV_NETMAP */ 259 260 /* 261 ** TUNEABLE PARAMETERS: 262 */ 263 264 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, 265 "IXGBE driver parameters"); 266 267 /* 268 ** AIM: Adaptive Interrupt Moderation 269 ** which means that the interrupt rate 270 ** is varied over time based on the 271 ** traffic for that interrupt vector 272 */ 273 static int ixgbe_enable_aim = TRUE; 274 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 275 "Enable adaptive interrupt moderation"); 276 277 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 278 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 279 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 280 281 /* How many packets rxeof tries to clean at a time */ 282 static int ixgbe_rx_process_limit = 256; 283 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 284 &ixgbe_rx_process_limit, 0, 285 "Maximum number of received packets to process at a time," 286 "-1 means unlimited"); 287 288 /* How many packets txeof tries to clean at a time */ 289 static int ixgbe_tx_process_limit = 256; 290 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 291 &ixgbe_tx_process_limit, 0, 292 "Maximum number of sent packets to process at a time," 293 "-1 means unlimited"); 294 295 /* Flow control setting, default to full */ 296 static int ixgbe_flow_control = ixgbe_fc_full; 297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 298 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 299 300 /* Advertise Speed, default to 0 (auto) */ 301 static int ixgbe_advertise_speed = 0; 302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 304 305 /* 306 ** Smart speed setting, default to on 307 ** this only works as a compile option 308 ** right now as its during attach, set 309 ** this to 'ixgbe_smart_speed_off' to 310 ** disable. 311 */ 312 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 313 314 /* 315 * MSIX should be the default for best performance, 316 * but this allows it to be forced off for testing. 317 */ 318 static int ixgbe_enable_msix = 1; 319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 320 "Enable MSI-X interrupts"); 321 322 /* 323 * Number of Queues, can be set to 0, 324 * it then autoconfigures based on the 325 * number of cpus with a max of 8. This 326 * can be overriden manually here. 327 */ 328 static int ixgbe_num_queues = 0; 329 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 330 "Number of queues to configure, 0 indicates autoconfigure"); 331 332 /* 333 ** Number of TX descriptors per ring, 334 ** setting higher than RX as this seems 335 ** the better performing choice. 336 */ 337 static int ixgbe_txd = PERFORM_TXD; 338 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 339 "Number of transmit descriptors per queue"); 340 341 /* Number of RX descriptors per ring */ 342 static int ixgbe_rxd = PERFORM_RXD; 343 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 344 "Number of receive descriptors per queue"); 345 346 /* 347 ** Defining this on will allow the use 348 ** of unsupported SFP+ modules, note that 349 ** doing so you are on your own :) 350 */ 351 static int allow_unsupported_sfp = FALSE; 352 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 353 354 /* Keep running tab on them for sanity check */ 355 static int ixgbe_total_ports; 356 357 #ifdef IXGBE_FDIR 358 /* 359 ** Flow Director actually 'steals' 360 ** part of the packet buffer as its 361 ** filter pool, this variable controls 362 ** how much it uses: 363 ** 0 = 64K, 1 = 128K, 2 = 256K 364 */ 365 static int fdir_pballoc = 1; 366 #endif 367 368 #ifdef DEV_NETMAP 369 /* 370 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to 371 * be a reference on how to implement netmap support in a driver. 372 * Additional comments are in ixgbe_netmap.h . 373 * 374 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support 375 * that extend the standard driver. 376 */ 377 #include <dev/netmap/ixgbe_netmap.h> 378 #endif /* DEV_NETMAP */ 379 380 static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 381 382 /********************************************************************* 383 * Device identification routine 384 * 385 * ixgbe_probe determines if the driver should be loaded on 386 * adapter based on PCI vendor/device id of the adapter. 387 * 388 * return BUS_PROBE_DEFAULT on success, positive on failure 389 *********************************************************************/ 390 391 static int 392 ixgbe_probe(device_t dev) 393 { 394 ixgbe_vendor_info_t *ent; 395 396 u16 pci_vendor_id = 0; 397 u16 pci_device_id = 0; 398 u16 pci_subvendor_id = 0; 399 u16 pci_subdevice_id = 0; 400 char adapter_name[256]; 401 402 INIT_DEBUGOUT("ixgbe_probe: begin"); 403 404 pci_vendor_id = pci_get_vendor(dev); 405 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 406 return (ENXIO); 407 408 pci_device_id = pci_get_device(dev); 409 pci_subvendor_id = pci_get_subvendor(dev); 410 pci_subdevice_id = pci_get_subdevice(dev); 411 412 ent = ixgbe_vendor_info_array; 413 while (ent->vendor_id != 0) { 414 if ((pci_vendor_id == ent->vendor_id) && 415 (pci_device_id == ent->device_id) && 416 417 ((pci_subvendor_id == ent->subvendor_id) || 418 (ent->subvendor_id == 0)) && 419 420 ((pci_subdevice_id == ent->subdevice_id) || 421 (ent->subdevice_id == 0))) { 422 sprintf(adapter_name, "%s, Version - %s", 423 ixgbe_strings[ent->index], 424 ixgbe_driver_version); 425 device_set_desc_copy(dev, adapter_name); 426 ++ixgbe_total_ports; 427 return (BUS_PROBE_DEFAULT); 428 } 429 ent++; 430 } 431 return (ENXIO); 432 } 433 434 /********************************************************************* 435 * Device initialization routine 436 * 437 * The attach entry point is called when the driver is being loaded. 438 * This routine identifies the type of hardware, allocates all resources 439 * and initializes the hardware. 440 * 441 * return 0 on success, positive on failure 442 *********************************************************************/ 443 444 static int 445 ixgbe_attach(device_t dev) 446 { 447 struct adapter *adapter; 448 struct ixgbe_hw *hw; 449 int error = 0; 450 u16 csum; 451 u32 ctrl_ext; 452 453 INIT_DEBUGOUT("ixgbe_attach: begin"); 454 455 /* Allocate, clear, and link in our adapter structure */ 456 adapter = device_get_softc(dev); 457 adapter->dev = dev; 458 hw = &adapter->hw; 459 460 #ifdef DEV_NETMAP 461 adapter->init_locked = ixgbe_init_locked; 462 adapter->stop_locked = ixgbe_stop; 463 #endif 464 465 /* Core Lock Init*/ 466 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 467 468 /* Set up the timer callout */ 469 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 470 471 /* Determine hardware revision */ 472 ixgbe_identify_hardware(adapter); 473 474 /* Do base PCI setup - map BAR0 */ 475 if (ixgbe_allocate_pci_resources(adapter)) { 476 device_printf(dev, "Allocation of PCI resources failed\n"); 477 error = ENXIO; 478 goto err_out; 479 } 480 481 /* Sysctls for limiting the amount of work done in the taskqueues */ 482 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 483 "max number of rx packets to process", 484 &adapter->rx_process_limit, ixgbe_rx_process_limit); 485 486 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 487 "max number of tx packets to process", 488 &adapter->tx_process_limit, ixgbe_tx_process_limit); 489 490 /* Do descriptor calc and sanity checks */ 491 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 492 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 493 device_printf(dev, "TXD config issue, using default!\n"); 494 adapter->num_tx_desc = DEFAULT_TXD; 495 } else 496 adapter->num_tx_desc = ixgbe_txd; 497 498 /* 499 ** With many RX rings it is easy to exceed the 500 ** system mbuf allocation. Tuning nmbclusters 501 ** can alleviate this. 502 */ 503 if (nmbclusters > 0) { 504 int s; 505 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports; 506 if (s > nmbclusters) { 507 device_printf(dev, "RX Descriptors exceed " 508 "system mbuf max, using default instead!\n"); 509 ixgbe_rxd = DEFAULT_RXD; 510 } 511 } 512 513 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 514 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 515 device_printf(dev, "RXD config issue, using default!\n"); 516 adapter->num_rx_desc = DEFAULT_RXD; 517 } else 518 adapter->num_rx_desc = ixgbe_rxd; 519 520 /* Allocate our TX/RX Queues */ 521 if (ixgbe_allocate_queues(adapter)) { 522 error = ENOMEM; 523 goto err_out; 524 } 525 526 /* Allocate multicast array memory. */ 527 adapter->mta = malloc(sizeof(*adapter->mta) * 528 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 529 if (adapter->mta == NULL) { 530 device_printf(dev, "Can not allocate multicast setup array\n"); 531 error = ENOMEM; 532 goto err_late; 533 } 534 535 /* Initialize the shared code */ 536 hw->allow_unsupported_sfp = allow_unsupported_sfp; 537 error = ixgbe_init_shared_code(hw); 538 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 539 /* 540 ** No optics in this port, set up 541 ** so the timer routine will probe 542 ** for later insertion. 543 */ 544 adapter->sfp_probe = TRUE; 545 error = 0; 546 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 547 device_printf(dev, "Unsupported SFP+ module detected!\n"); 548 error = EIO; 549 goto err_late; 550 } else if (error) { 551 device_printf(dev, "Unable to initialize the shared code\n"); 552 error = EIO; 553 goto err_late; 554 } 555 556 /* Make sure we have a good EEPROM before we read from it */ 557 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) { 558 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 559 error = EIO; 560 goto err_late; 561 } 562 563 error = ixgbe_init_hw(hw); 564 switch (error) { 565 case IXGBE_ERR_EEPROM_VERSION: 566 device_printf(dev, "This device is a pre-production adapter/" 567 "LOM. Please be aware there may be issues associated " 568 "with your hardware.\nIf you are experiencing problems " 569 "please contact your Intel or hardware representative " 570 "who provided you with this hardware.\n"); 571 break; 572 case IXGBE_ERR_SFP_NOT_SUPPORTED: 573 device_printf(dev, "Unsupported SFP+ Module\n"); 574 error = EIO; 575 goto err_late; 576 case IXGBE_ERR_SFP_NOT_PRESENT: 577 device_printf(dev, "No SFP+ Module found\n"); 578 /* falls thru */ 579 default: 580 break; 581 } 582 583 /* hw.ix defaults init */ 584 ixgbe_set_advertise(adapter, ixgbe_advertise_speed); 585 ixgbe_set_flowcntl(adapter, ixgbe_flow_control); 586 adapter->enable_aim = ixgbe_enable_aim; 587 588 if ((adapter->msix > 1) && (ixgbe_enable_msix)) 589 error = ixgbe_allocate_msix(adapter); 590 else 591 error = ixgbe_allocate_legacy(adapter); 592 if (error) 593 goto err_late; 594 595 /* Enable the optics for 82599 SFP+ fiber */ 596 ixgbe_enable_tx_laser(hw); 597 598 /* Enable power to the phy. */ 599 ixgbe_set_phy_power(hw, TRUE); 600 601 /* Setup OS specific network interface */ 602 if (ixgbe_setup_interface(dev, adapter) != 0) 603 goto err_late; 604 605 /* Initialize statistics */ 606 ixgbe_update_stats_counters(adapter); 607 608 /* Register for VLAN events */ 609 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 610 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 611 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 612 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 613 614 /* Check PCIE slot type/speed/width */ 615 ixgbe_get_slot_info(adapter); 616 617 /* Set an initial default flow control & dmac value */ 618 adapter->fc = ixgbe_fc_full; 619 adapter->dmac = 0; 620 adapter->eee_enabled = 0; 621 622 #ifdef PCI_IOV 623 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) { 624 nvlist_t *pf_schema, *vf_schema; 625 626 hw->mbx.ops.init_params(hw); 627 pf_schema = pci_iov_schema_alloc_node(); 628 vf_schema = pci_iov_schema_alloc_node(); 629 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 630 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", 631 IOV_SCHEMA_HASDEFAULT, TRUE); 632 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 633 IOV_SCHEMA_HASDEFAULT, FALSE); 634 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 635 IOV_SCHEMA_HASDEFAULT, FALSE); 636 error = pci_iov_attach(dev, pf_schema, vf_schema); 637 if (error != 0) { 638 device_printf(dev, 639 "Error %d setting up SR-IOV\n", error); 640 } 641 } 642 #endif /* PCI_IOV */ 643 644 /* Check for certain supported features */ 645 ixgbe_check_wol_support(adapter); 646 647 /* Add sysctls */ 648 ixgbe_add_device_sysctls(adapter); 649 ixgbe_add_hw_stats(adapter); 650 651 /* let hardware know driver is loaded */ 652 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 653 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 654 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 655 656 #ifdef DEV_NETMAP 657 ixgbe_netmap_attach(adapter); 658 #endif /* DEV_NETMAP */ 659 INIT_DEBUGOUT("ixgbe_attach: end"); 660 return (0); 661 662 err_late: 663 ixgbe_free_transmit_structures(adapter); 664 ixgbe_free_receive_structures(adapter); 665 err_out: 666 if (adapter->ifp != NULL) 667 if_free(adapter->ifp); 668 ixgbe_free_pci_resources(adapter); 669 free(adapter->mta, M_DEVBUF); 670 return (error); 671 } 672 673 /********************************************************************* 674 * Device removal routine 675 * 676 * The detach entry point is called when the driver is being removed. 677 * This routine stops the adapter and deallocates all the resources 678 * that were allocated for driver operation. 679 * 680 * return 0 on success, positive on failure 681 *********************************************************************/ 682 683 static int 684 ixgbe_detach(device_t dev) 685 { 686 struct adapter *adapter = device_get_softc(dev); 687 struct ix_queue *que = adapter->queues; 688 struct tx_ring *txr = adapter->tx_rings; 689 u32 ctrl_ext; 690 691 INIT_DEBUGOUT("ixgbe_detach: begin"); 692 693 /* Make sure VLANS are not using driver */ 694 if (adapter->ifp->if_vlantrunk != NULL) { 695 device_printf(dev,"Vlan in use, detach first\n"); 696 return (EBUSY); 697 } 698 699 #ifdef PCI_IOV 700 if (pci_iov_detach(dev) != 0) { 701 device_printf(dev, "SR-IOV in use; detach first.\n"); 702 return (EBUSY); 703 } 704 #endif /* PCI_IOV */ 705 706 ether_ifdetach(adapter->ifp); 707 /* Stop the adapter */ 708 IXGBE_CORE_LOCK(adapter); 709 ixgbe_setup_low_power_mode(adapter); 710 IXGBE_CORE_UNLOCK(adapter); 711 712 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 713 if (que->tq) { 714 #ifndef IXGBE_LEGACY_TX 715 taskqueue_drain(que->tq, &txr->txq_task); 716 #endif 717 taskqueue_drain(que->tq, &que->que_task); 718 taskqueue_free(que->tq); 719 } 720 } 721 722 /* Drain the Link queue */ 723 if (adapter->tq) { 724 taskqueue_drain(adapter->tq, &adapter->link_task); 725 taskqueue_drain(adapter->tq, &adapter->mod_task); 726 taskqueue_drain(adapter->tq, &adapter->msf_task); 727 #ifdef PCI_IOV 728 taskqueue_drain(adapter->tq, &adapter->mbx_task); 729 #endif 730 taskqueue_drain(adapter->tq, &adapter->phy_task); 731 #ifdef IXGBE_FDIR 732 taskqueue_drain(adapter->tq, &adapter->fdir_task); 733 #endif 734 taskqueue_free(adapter->tq); 735 } 736 737 /* let hardware know driver is unloading */ 738 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 739 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 741 742 /* Unregister VLAN events */ 743 if (adapter->vlan_attach != NULL) 744 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 745 if (adapter->vlan_detach != NULL) 746 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 747 748 callout_drain(&adapter->timer); 749 #ifdef DEV_NETMAP 750 netmap_detach(adapter->ifp); 751 #endif /* DEV_NETMAP */ 752 ixgbe_free_pci_resources(adapter); 753 bus_generic_detach(dev); 754 if_free(adapter->ifp); 755 756 ixgbe_free_transmit_structures(adapter); 757 ixgbe_free_receive_structures(adapter); 758 free(adapter->mta, M_DEVBUF); 759 760 IXGBE_CORE_LOCK_DESTROY(adapter); 761 return (0); 762 } 763 764 /********************************************************************* 765 * 766 * Shutdown entry point 767 * 768 **********************************************************************/ 769 770 static int 771 ixgbe_shutdown(device_t dev) 772 { 773 struct adapter *adapter = device_get_softc(dev); 774 int error = 0; 775 776 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 777 778 IXGBE_CORE_LOCK(adapter); 779 error = ixgbe_setup_low_power_mode(adapter); 780 IXGBE_CORE_UNLOCK(adapter); 781 782 return (error); 783 } 784 785 /** 786 * Methods for going from: 787 * D0 -> D3: ixgbe_suspend 788 * D3 -> D0: ixgbe_resume 789 */ 790 static int 791 ixgbe_suspend(device_t dev) 792 { 793 struct adapter *adapter = device_get_softc(dev); 794 int error = 0; 795 796 INIT_DEBUGOUT("ixgbe_suspend: begin"); 797 798 IXGBE_CORE_LOCK(adapter); 799 800 error = ixgbe_setup_low_power_mode(adapter); 801 802 IXGBE_CORE_UNLOCK(adapter); 803 804 return (error); 805 } 806 807 static int 808 ixgbe_resume(device_t dev) 809 { 810 struct adapter *adapter = device_get_softc(dev); 811 struct ifnet *ifp = adapter->ifp; 812 struct ixgbe_hw *hw = &adapter->hw; 813 u32 wus; 814 815 INIT_DEBUGOUT("ixgbe_resume: begin"); 816 817 IXGBE_CORE_LOCK(adapter); 818 819 /* Read & clear WUS register */ 820 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 821 if (wus) 822 device_printf(dev, "Woken up by (WUS): %#010x\n", 823 IXGBE_READ_REG(hw, IXGBE_WUS)); 824 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 825 /* And clear WUFC until next low-power transition */ 826 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 827 828 /* 829 * Required after D3->D0 transition; 830 * will re-advertise all previous advertised speeds 831 */ 832 if (ifp->if_flags & IFF_UP) 833 ixgbe_init_locked(adapter); 834 835 IXGBE_CORE_UNLOCK(adapter); 836 837 return (0); 838 } 839 840 841 /********************************************************************* 842 * Ioctl entry point 843 * 844 * ixgbe_ioctl is called when the user wants to configure the 845 * interface. 846 * 847 * return 0 on success, positive on failure 848 **********************************************************************/ 849 850 static int 851 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 852 { 853 struct adapter *adapter = ifp->if_softc; 854 struct ifreq *ifr = (struct ifreq *) data; 855 #if defined(INET) || defined(INET6) 856 struct ifaddr *ifa = (struct ifaddr *)data; 857 #endif 858 int error = 0; 859 bool avoid_reset = FALSE; 860 861 switch (command) { 862 863 case SIOCSIFADDR: 864 #ifdef INET 865 if (ifa->ifa_addr->sa_family == AF_INET) 866 avoid_reset = TRUE; 867 #endif 868 #ifdef INET6 869 if (ifa->ifa_addr->sa_family == AF_INET6) 870 avoid_reset = TRUE; 871 #endif 872 /* 873 ** Calling init results in link renegotiation, 874 ** so we avoid doing it when possible. 875 */ 876 if (avoid_reset) { 877 ifp->if_flags |= IFF_UP; 878 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 879 ixgbe_init(adapter); 880 #ifdef INET 881 if (!(ifp->if_flags & IFF_NOARP)) 882 arp_ifinit(ifp, ifa); 883 #endif 884 } else 885 error = ether_ioctl(ifp, command, data); 886 break; 887 case SIOCSIFMTU: 888 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 889 if (ifr->ifr_mtu > IXGBE_MAX_MTU) { 890 error = EINVAL; 891 } else { 892 IXGBE_CORE_LOCK(adapter); 893 ifp->if_mtu = ifr->ifr_mtu; 894 adapter->max_frame_size = 895 ifp->if_mtu + IXGBE_MTU_HDR; 896 ixgbe_init_locked(adapter); 897 #ifdef PCI_IOV 898 ixgbe_recalculate_max_frame(adapter); 899 #endif 900 IXGBE_CORE_UNLOCK(adapter); 901 } 902 break; 903 case SIOCSIFFLAGS: 904 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 905 IXGBE_CORE_LOCK(adapter); 906 if (ifp->if_flags & IFF_UP) { 907 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 908 if ((ifp->if_flags ^ adapter->if_flags) & 909 (IFF_PROMISC | IFF_ALLMULTI)) { 910 ixgbe_set_promisc(adapter); 911 } 912 } else 913 ixgbe_init_locked(adapter); 914 } else 915 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 916 ixgbe_stop(adapter); 917 adapter->if_flags = ifp->if_flags; 918 IXGBE_CORE_UNLOCK(adapter); 919 break; 920 case SIOCADDMULTI: 921 case SIOCDELMULTI: 922 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 923 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 924 IXGBE_CORE_LOCK(adapter); 925 ixgbe_disable_intr(adapter); 926 ixgbe_set_multi(adapter); 927 ixgbe_enable_intr(adapter); 928 IXGBE_CORE_UNLOCK(adapter); 929 } 930 break; 931 case SIOCSIFMEDIA: 932 case SIOCGIFMEDIA: 933 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 934 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 935 break; 936 case SIOCSIFCAP: 937 { 938 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 939 940 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 941 if (!mask) 942 break; 943 944 /* HW cannot turn these on/off separately */ 945 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { 946 ifp->if_capenable ^= IFCAP_RXCSUM; 947 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 948 } 949 if (mask & IFCAP_TXCSUM) 950 ifp->if_capenable ^= IFCAP_TXCSUM; 951 if (mask & IFCAP_TXCSUM_IPV6) 952 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 953 if (mask & IFCAP_TSO4) 954 ifp->if_capenable ^= IFCAP_TSO4; 955 if (mask & IFCAP_TSO6) 956 ifp->if_capenable ^= IFCAP_TSO6; 957 if (mask & IFCAP_LRO) 958 ifp->if_capenable ^= IFCAP_LRO; 959 if (mask & IFCAP_VLAN_HWTAGGING) 960 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 961 if (mask & IFCAP_VLAN_HWFILTER) 962 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 963 if (mask & IFCAP_VLAN_HWTSO) 964 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 965 966 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 967 IXGBE_CORE_LOCK(adapter); 968 ixgbe_init_locked(adapter); 969 IXGBE_CORE_UNLOCK(adapter); 970 } 971 VLAN_CAPABILITIES(ifp); 972 break; 973 } 974 #if __FreeBSD_version >= 1100036 975 case SIOCGI2C: 976 { 977 struct ixgbe_hw *hw = &adapter->hw; 978 struct ifi2creq i2c; 979 int i; 980 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 981 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 982 if (error != 0) 983 break; 984 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 985 error = EINVAL; 986 break; 987 } 988 if (i2c.len > sizeof(i2c.data)) { 989 error = EINVAL; 990 break; 991 } 992 993 for (i = 0; i < i2c.len; i++) 994 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i, 995 i2c.dev_addr, &i2c.data[i]); 996 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 997 break; 998 } 999 #endif 1000 default: 1001 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 1002 error = ether_ioctl(ifp, command, data); 1003 break; 1004 } 1005 1006 return (error); 1007 } 1008 1009 /* 1010 * Set the various hardware offload abilities. 1011 * 1012 * This takes the ifnet's if_capenable flags (e.g. set by the user using 1013 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what 1014 * mbuf offload flags the driver will understand. 1015 */ 1016 static void 1017 ixgbe_set_if_hwassist(struct adapter *adapter) 1018 { 1019 struct ifnet *ifp = adapter->ifp; 1020 struct ixgbe_hw *hw = &adapter->hw; 1021 1022 ifp->if_hwassist = 0; 1023 #if __FreeBSD_version >= 1000000 1024 if (ifp->if_capenable & IFCAP_TSO4) 1025 ifp->if_hwassist |= CSUM_IP_TSO; 1026 if (ifp->if_capenable & IFCAP_TSO6) 1027 ifp->if_hwassist |= CSUM_IP6_TSO; 1028 if (ifp->if_capenable & IFCAP_TXCSUM) { 1029 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP); 1030 if (hw->mac.type != ixgbe_mac_82598EB) 1031 ifp->if_hwassist |= CSUM_IP_SCTP; 1032 } 1033 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) { 1034 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP); 1035 if (hw->mac.type != ixgbe_mac_82598EB) 1036 ifp->if_hwassist |= CSUM_IP6_SCTP; 1037 } 1038 #else 1039 if (ifp->if_capenable & IFCAP_TSO) 1040 ifp->if_hwassist |= CSUM_TSO; 1041 if (ifp->if_capenable & IFCAP_TXCSUM) { 1042 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 1043 if (hw->mac.type != ixgbe_mac_82598EB) 1044 ifp->if_hwassist |= CSUM_SCTP; 1045 } 1046 #endif 1047 } 1048 1049 /********************************************************************* 1050 * Init entry point 1051 * 1052 * This routine is used in two ways. It is used by the stack as 1053 * init entry point in network interface structure. It is also used 1054 * by the driver as a hw/sw initialization routine to get to a 1055 * consistent state. 1056 * 1057 * return 0 on success, positive on failure 1058 **********************************************************************/ 1059 #define IXGBE_MHADD_MFS_SHIFT 16 1060 1061 static void 1062 ixgbe_init_locked(struct adapter *adapter) 1063 { 1064 struct ifnet *ifp = adapter->ifp; 1065 device_t dev = adapter->dev; 1066 struct ixgbe_hw *hw = &adapter->hw; 1067 struct tx_ring *txr; 1068 struct rx_ring *rxr; 1069 u32 txdctl, mhadd; 1070 u32 rxdctl, rxctrl; 1071 int err = 0; 1072 #ifdef PCI_IOV 1073 enum ixgbe_iov_mode mode; 1074 #endif 1075 1076 mtx_assert(&adapter->core_mtx, MA_OWNED); 1077 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 1078 1079 hw->adapter_stopped = FALSE; 1080 ixgbe_stop_adapter(hw); 1081 callout_stop(&adapter->timer); 1082 1083 #ifdef PCI_IOV 1084 mode = ixgbe_get_iov_mode(adapter); 1085 adapter->pool = ixgbe_max_vfs(mode); 1086 /* Queue indices may change with IOV mode */ 1087 for (int i = 0; i < adapter->num_queues; i++) { 1088 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i); 1089 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i); 1090 } 1091 #endif 1092 /* reprogram the RAR[0] in case user changed it. */ 1093 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 1094 1095 /* Get the latest mac address, User can use a LAA */ 1096 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 1097 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 1098 hw->addr_ctrl.rar_used_count = 1; 1099 1100 /* Set hardware offload abilities from ifnet flags */ 1101 ixgbe_set_if_hwassist(adapter); 1102 1103 /* Prepare transmit descriptors and buffers */ 1104 if (ixgbe_setup_transmit_structures(adapter)) { 1105 device_printf(dev, "Could not setup transmit structures\n"); 1106 ixgbe_stop(adapter); 1107 return; 1108 } 1109 1110 ixgbe_init_hw(hw); 1111 #ifdef PCI_IOV 1112 ixgbe_initialize_iov(adapter); 1113 #endif 1114 ixgbe_initialize_transmit_units(adapter); 1115 1116 /* Setup Multicast table */ 1117 ixgbe_set_multi(adapter); 1118 1119 /* Determine the correct mbuf pool, based on frame size */ 1120 if (adapter->max_frame_size <= MCLBYTES) 1121 adapter->rx_mbuf_sz = MCLBYTES; 1122 else 1123 adapter->rx_mbuf_sz = MJUMPAGESIZE; 1124 1125 /* Prepare receive descriptors and buffers */ 1126 if (ixgbe_setup_receive_structures(adapter)) { 1127 device_printf(dev, "Could not setup receive structures\n"); 1128 ixgbe_stop(adapter); 1129 return; 1130 } 1131 1132 /* Configure RX settings */ 1133 ixgbe_initialize_receive_units(adapter); 1134 1135 /* Enable SDP & MSIX interrupts based on adapter */ 1136 ixgbe_config_gpie(adapter); 1137 1138 /* Set MTU size */ 1139 if (ifp->if_mtu > ETHERMTU) { 1140 /* aka IXGBE_MAXFRS on 82599 and newer */ 1141 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 1142 mhadd &= ~IXGBE_MHADD_MFS_MASK; 1143 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 1144 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 1145 } 1146 1147 /* Now enable all the queues */ 1148 for (int i = 0; i < adapter->num_queues; i++) { 1149 txr = &adapter->tx_rings[i]; 1150 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 1151 txdctl |= IXGBE_TXDCTL_ENABLE; 1152 /* Set WTHRESH to 8, burst writeback */ 1153 txdctl |= (8 << 16); 1154 /* 1155 * When the internal queue falls below PTHRESH (32), 1156 * start prefetching as long as there are at least 1157 * HTHRESH (1) buffers ready. The values are taken 1158 * from the Intel linux driver 3.8.21. 1159 * Prefetching enables tx line rate even with 1 queue. 1160 */ 1161 txdctl |= (32 << 0) | (1 << 8); 1162 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 1163 } 1164 1165 for (int i = 0, j = 0; i < adapter->num_queues; i++) { 1166 rxr = &adapter->rx_rings[i]; 1167 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1168 if (hw->mac.type == ixgbe_mac_82598EB) { 1169 /* 1170 ** PTHRESH = 21 1171 ** HTHRESH = 4 1172 ** WTHRESH = 8 1173 */ 1174 rxdctl &= ~0x3FFFFF; 1175 rxdctl |= 0x080420; 1176 } 1177 rxdctl |= IXGBE_RXDCTL_ENABLE; 1178 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 1179 for (; j < 10; j++) { 1180 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 1181 IXGBE_RXDCTL_ENABLE) 1182 break; 1183 else 1184 msec_delay(1); 1185 } 1186 wmb(); 1187 #ifdef DEV_NETMAP 1188 /* 1189 * In netmap mode, we must preserve the buffers made 1190 * available to userspace before the if_init() 1191 * (this is true by default on the TX side, because 1192 * init makes all buffers available to userspace). 1193 * 1194 * netmap_reset() and the device specific routines 1195 * (e.g. ixgbe_setup_receive_rings()) map these 1196 * buffers at the end of the NIC ring, so here we 1197 * must set the RDT (tail) register to make sure 1198 * they are not overwritten. 1199 * 1200 * In this driver the NIC ring starts at RDH = 0, 1201 * RDT points to the last slot available for reception (?), 1202 * so RDT = num_rx_desc - 1 means the whole ring is available. 1203 */ 1204 if (ifp->if_capenable & IFCAP_NETMAP) { 1205 struct netmap_adapter *na = NA(adapter->ifp); 1206 struct netmap_kring *kring = &na->rx_rings[i]; 1207 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1208 1209 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 1210 } else 1211 #endif /* DEV_NETMAP */ 1212 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1); 1213 } 1214 1215 /* Enable Receive engine */ 1216 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1217 if (hw->mac.type == ixgbe_mac_82598EB) 1218 rxctrl |= IXGBE_RXCTRL_DMBYPS; 1219 rxctrl |= IXGBE_RXCTRL_RXEN; 1220 ixgbe_enable_rx_dma(hw, rxctrl); 1221 1222 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 1223 1224 /* Set up MSI/X routing */ 1225 if (ixgbe_enable_msix) { 1226 ixgbe_configure_ivars(adapter); 1227 /* Set up auto-mask */ 1228 if (hw->mac.type == ixgbe_mac_82598EB) 1229 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1230 else { 1231 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 1232 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 1233 } 1234 } else { /* Simple settings for Legacy/MSI */ 1235 ixgbe_set_ivar(adapter, 0, 0, 0); 1236 ixgbe_set_ivar(adapter, 0, 0, 1); 1237 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1238 } 1239 1240 #ifdef IXGBE_FDIR 1241 /* Init Flow director */ 1242 if (hw->mac.type != ixgbe_mac_82598EB) { 1243 u32 hdrm = 32 << fdir_pballoc; 1244 1245 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL); 1246 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc); 1247 } 1248 #endif 1249 1250 /* 1251 * Check on any SFP devices that 1252 * need to be kick-started 1253 */ 1254 if (hw->phy.type == ixgbe_phy_none) { 1255 err = hw->phy.ops.identify(hw); 1256 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 1257 device_printf(dev, 1258 "Unsupported SFP+ module type was detected.\n"); 1259 return; 1260 } 1261 } 1262 1263 /* Set moderation on the Link interrupt */ 1264 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 1265 1266 /* Configure Energy Efficient Ethernet for supported devices */ 1267 if (hw->mac.ops.setup_eee) { 1268 err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled); 1269 if (err) 1270 device_printf(dev, "Error setting up EEE: %d\n", err); 1271 } 1272 1273 /* Enable power to the phy. */ 1274 ixgbe_set_phy_power(hw, TRUE); 1275 1276 /* Config/Enable Link */ 1277 ixgbe_config_link(adapter); 1278 1279 /* Hardware Packet Buffer & Flow Control setup */ 1280 ixgbe_config_delay_values(adapter); 1281 1282 /* Initialize the FC settings */ 1283 ixgbe_start_hw(hw); 1284 1285 /* Set up VLAN support and filter */ 1286 ixgbe_setup_vlan_hw_support(adapter); 1287 1288 /* Setup DMA Coalescing */ 1289 ixgbe_config_dmac(adapter); 1290 1291 /* And now turn on interrupts */ 1292 ixgbe_enable_intr(adapter); 1293 1294 #ifdef PCI_IOV 1295 /* Enable the use of the MBX by the VF's */ 1296 { 1297 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1298 reg |= IXGBE_CTRL_EXT_PFRSTD; 1299 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg); 1300 } 1301 #endif 1302 1303 /* Now inform the stack we're ready */ 1304 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1305 1306 return; 1307 } 1308 1309 static void 1310 ixgbe_init(void *arg) 1311 { 1312 struct adapter *adapter = arg; 1313 1314 IXGBE_CORE_LOCK(adapter); 1315 ixgbe_init_locked(adapter); 1316 IXGBE_CORE_UNLOCK(adapter); 1317 return; 1318 } 1319 1320 static void 1321 ixgbe_config_gpie(struct adapter *adapter) 1322 { 1323 struct ixgbe_hw *hw = &adapter->hw; 1324 u32 gpie; 1325 1326 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 1327 1328 /* Fan Failure Interrupt */ 1329 if (hw->device_id == IXGBE_DEV_ID_82598AT) 1330 gpie |= IXGBE_SDP1_GPIEN; 1331 1332 /* 1333 * Module detection (SDP2) 1334 * Media ready (SDP1) 1335 */ 1336 if (hw->mac.type == ixgbe_mac_82599EB) { 1337 gpie |= IXGBE_SDP2_GPIEN; 1338 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP) 1339 gpie |= IXGBE_SDP1_GPIEN; 1340 } 1341 1342 /* 1343 * Thermal Failure Detection (X540) 1344 * Link Detection (X552 SFP+, X552/X557-AT) 1345 */ 1346 if (hw->mac.type == ixgbe_mac_X540 || 1347 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1348 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 1349 gpie |= IXGBE_SDP0_GPIEN_X540; 1350 1351 if (adapter->msix > 1) { 1352 /* Enable Enhanced MSIX mode */ 1353 gpie |= IXGBE_GPIE_MSIX_MODE; 1354 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | 1355 IXGBE_GPIE_OCD; 1356 } 1357 1358 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 1359 return; 1360 } 1361 1362 /* 1363 * Requires adapter->max_frame_size to be set. 1364 */ 1365 static void 1366 ixgbe_config_delay_values(struct adapter *adapter) 1367 { 1368 struct ixgbe_hw *hw = &adapter->hw; 1369 u32 rxpb, frame, size, tmp; 1370 1371 frame = adapter->max_frame_size; 1372 1373 /* Calculate High Water */ 1374 switch (hw->mac.type) { 1375 case ixgbe_mac_X540: 1376 case ixgbe_mac_X550: 1377 case ixgbe_mac_X550EM_x: 1378 tmp = IXGBE_DV_X540(frame, frame); 1379 break; 1380 default: 1381 tmp = IXGBE_DV(frame, frame); 1382 break; 1383 } 1384 size = IXGBE_BT2KB(tmp); 1385 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 1386 hw->fc.high_water[0] = rxpb - size; 1387 1388 /* Now calculate Low Water */ 1389 switch (hw->mac.type) { 1390 case ixgbe_mac_X540: 1391 case ixgbe_mac_X550: 1392 case ixgbe_mac_X550EM_x: 1393 tmp = IXGBE_LOW_DV_X540(frame); 1394 break; 1395 default: 1396 tmp = IXGBE_LOW_DV(frame); 1397 break; 1398 } 1399 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 1400 1401 hw->fc.requested_mode = adapter->fc; 1402 hw->fc.pause_time = IXGBE_FC_PAUSE; 1403 hw->fc.send_xon = TRUE; 1404 } 1405 1406 /* 1407 ** 1408 ** MSIX Interrupt Handlers and Tasklets 1409 ** 1410 */ 1411 1412 static inline void 1413 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 1414 { 1415 struct ixgbe_hw *hw = &adapter->hw; 1416 u64 queue = (u64)(1 << vector); 1417 u32 mask; 1418 1419 if (hw->mac.type == ixgbe_mac_82598EB) { 1420 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1421 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 1422 } else { 1423 mask = (queue & 0xFFFFFFFF); 1424 if (mask) 1425 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 1426 mask = (queue >> 32); 1427 if (mask) 1428 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 1429 } 1430 } 1431 1432 static inline void 1433 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 1434 { 1435 struct ixgbe_hw *hw = &adapter->hw; 1436 u64 queue = (u64)(1 << vector); 1437 u32 mask; 1438 1439 if (hw->mac.type == ixgbe_mac_82598EB) { 1440 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1441 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 1442 } else { 1443 mask = (queue & 0xFFFFFFFF); 1444 if (mask) 1445 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 1446 mask = (queue >> 32); 1447 if (mask) 1448 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 1449 } 1450 } 1451 1452 static void 1453 ixgbe_handle_que(void *context, int pending) 1454 { 1455 struct ix_queue *que = context; 1456 struct adapter *adapter = que->adapter; 1457 struct tx_ring *txr = que->txr; 1458 struct ifnet *ifp = adapter->ifp; 1459 1460 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1461 ixgbe_rxeof(que); 1462 IXGBE_TX_LOCK(txr); 1463 ixgbe_txeof(txr); 1464 #ifndef IXGBE_LEGACY_TX 1465 if (!drbr_empty(ifp, txr->br)) 1466 ixgbe_mq_start_locked(ifp, txr); 1467 #else 1468 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1469 ixgbe_start_locked(txr, ifp); 1470 #endif 1471 IXGBE_TX_UNLOCK(txr); 1472 } 1473 1474 /* Reenable this interrupt */ 1475 if (que->res != NULL) 1476 ixgbe_enable_queue(adapter, que->msix); 1477 else 1478 ixgbe_enable_intr(adapter); 1479 return; 1480 } 1481 1482 1483 /********************************************************************* 1484 * 1485 * Legacy Interrupt Service routine 1486 * 1487 **********************************************************************/ 1488 1489 static void 1490 ixgbe_legacy_irq(void *arg) 1491 { 1492 struct ix_queue *que = arg; 1493 struct adapter *adapter = que->adapter; 1494 struct ixgbe_hw *hw = &adapter->hw; 1495 struct ifnet *ifp = adapter->ifp; 1496 struct tx_ring *txr = adapter->tx_rings; 1497 bool more; 1498 u32 reg_eicr; 1499 1500 1501 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1502 1503 ++que->irqs; 1504 if (reg_eicr == 0) { 1505 ixgbe_enable_intr(adapter); 1506 return; 1507 } 1508 1509 more = ixgbe_rxeof(que); 1510 1511 IXGBE_TX_LOCK(txr); 1512 ixgbe_txeof(txr); 1513 #ifdef IXGBE_LEGACY_TX 1514 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1515 ixgbe_start_locked(txr, ifp); 1516 #else 1517 if (!drbr_empty(ifp, txr->br)) 1518 ixgbe_mq_start_locked(ifp, txr); 1519 #endif 1520 IXGBE_TX_UNLOCK(txr); 1521 1522 /* Check for fan failure */ 1523 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 1524 (reg_eicr & IXGBE_EICR_GPI_SDP1)) { 1525 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " 1526 "REPLACE IMMEDIATELY!!\n"); 1527 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 1528 } 1529 1530 /* Link status change */ 1531 if (reg_eicr & IXGBE_EICR_LSC) 1532 taskqueue_enqueue(adapter->tq, &adapter->link_task); 1533 1534 /* External PHY interrupt */ 1535 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 1536 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) 1537 taskqueue_enqueue(adapter->tq, &adapter->phy_task); 1538 1539 if (more) 1540 taskqueue_enqueue(que->tq, &que->que_task); 1541 else 1542 ixgbe_enable_intr(adapter); 1543 return; 1544 } 1545 1546 1547 /********************************************************************* 1548 * 1549 * MSIX Queue Interrupt Service routine 1550 * 1551 **********************************************************************/ 1552 void 1553 ixgbe_msix_que(void *arg) 1554 { 1555 struct ix_queue *que = arg; 1556 struct adapter *adapter = que->adapter; 1557 struct ifnet *ifp = adapter->ifp; 1558 struct tx_ring *txr = que->txr; 1559 struct rx_ring *rxr = que->rxr; 1560 bool more; 1561 u32 newitr = 0; 1562 1563 1564 /* Protect against spurious interrupts */ 1565 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1566 return; 1567 1568 ixgbe_disable_queue(adapter, que->msix); 1569 ++que->irqs; 1570 1571 more = ixgbe_rxeof(que); 1572 1573 IXGBE_TX_LOCK(txr); 1574 ixgbe_txeof(txr); 1575 #ifdef IXGBE_LEGACY_TX 1576 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd)) 1577 ixgbe_start_locked(txr, ifp); 1578 #else 1579 if (!drbr_empty(ifp, txr->br)) 1580 ixgbe_mq_start_locked(ifp, txr); 1581 #endif 1582 IXGBE_TX_UNLOCK(txr); 1583 1584 /* Do AIM now? */ 1585 1586 if (adapter->enable_aim == FALSE) 1587 goto no_calc; 1588 /* 1589 ** Do Adaptive Interrupt Moderation: 1590 ** - Write out last calculated setting 1591 ** - Calculate based on average size over 1592 ** the last interval. 1593 */ 1594 if (que->eitr_setting) 1595 IXGBE_WRITE_REG(&adapter->hw, 1596 IXGBE_EITR(que->msix), que->eitr_setting); 1597 1598 que->eitr_setting = 0; 1599 1600 /* Idle, do nothing */ 1601 if ((txr->bytes == 0) && (rxr->bytes == 0)) 1602 goto no_calc; 1603 1604 if ((txr->bytes) && (txr->packets)) 1605 newitr = txr->bytes/txr->packets; 1606 if ((rxr->bytes) && (rxr->packets)) 1607 newitr = max(newitr, 1608 (rxr->bytes / rxr->packets)); 1609 newitr += 24; /* account for hardware frame, crc */ 1610 1611 /* set an upper boundary */ 1612 newitr = min(newitr, 3000); 1613 1614 /* Be nice to the mid range */ 1615 if ((newitr > 300) && (newitr < 1200)) 1616 newitr = (newitr / 3); 1617 else 1618 newitr = (newitr / 2); 1619 1620 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1621 newitr |= newitr << 16; 1622 else 1623 newitr |= IXGBE_EITR_CNT_WDIS; 1624 1625 /* save for next interrupt */ 1626 que->eitr_setting = newitr; 1627 1628 /* Reset state */ 1629 txr->bytes = 0; 1630 txr->packets = 0; 1631 rxr->bytes = 0; 1632 rxr->packets = 0; 1633 1634 no_calc: 1635 if (more) 1636 taskqueue_enqueue(que->tq, &que->que_task); 1637 else 1638 ixgbe_enable_queue(adapter, que->msix); 1639 return; 1640 } 1641 1642 1643 static void 1644 ixgbe_msix_link(void *arg) 1645 { 1646 struct adapter *adapter = arg; 1647 struct ixgbe_hw *hw = &adapter->hw; 1648 u32 reg_eicr, mod_mask; 1649 1650 ++adapter->link_irq; 1651 1652 /* Pause other interrupts */ 1653 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 1654 1655 /* First get the cause */ 1656 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 1657 /* Be sure the queue bits are not cleared */ 1658 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE; 1659 /* Clear interrupt with write */ 1660 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr); 1661 1662 /* Link status change */ 1663 if (reg_eicr & IXGBE_EICR_LSC) { 1664 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1665 taskqueue_enqueue(adapter->tq, &adapter->link_task); 1666 } 1667 1668 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 1669 #ifdef IXGBE_FDIR 1670 if (reg_eicr & IXGBE_EICR_FLOW_DIR) { 1671 /* This is probably overkill :) */ 1672 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 1673 return; 1674 /* Disable the interrupt */ 1675 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 1676 taskqueue_enqueue(adapter->tq, &adapter->fdir_task); 1677 } else 1678 #endif 1679 if (reg_eicr & IXGBE_EICR_ECC) { 1680 device_printf(adapter->dev, "CRITICAL: ECC ERROR!! " 1681 "Please Reboot!!\n"); 1682 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 1683 } 1684 1685 /* Check for over temp condition */ 1686 if (reg_eicr & IXGBE_EICR_TS) { 1687 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! " 1688 "PHY IS SHUT DOWN!!\n"); 1689 device_printf(adapter->dev, "System shutdown required!\n"); 1690 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 1691 } 1692 #ifdef PCI_IOV 1693 if (reg_eicr & IXGBE_EICR_MAILBOX) 1694 taskqueue_enqueue(adapter->tq, &adapter->mbx_task); 1695 #endif 1696 } 1697 1698 /* Pluggable optics-related interrupt */ 1699 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1700 mod_mask = IXGBE_EICR_GPI_SDP0_X540; 1701 else 1702 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 1703 1704 if (ixgbe_is_sfp(hw)) { 1705 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 1706 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 1707 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 1708 } else if (reg_eicr & mod_mask) { 1709 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask); 1710 taskqueue_enqueue(adapter->tq, &adapter->mod_task); 1711 } 1712 } 1713 1714 /* Check for fan failure */ 1715 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 1716 (reg_eicr & IXGBE_EICR_GPI_SDP1)) { 1717 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1718 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " 1719 "REPLACE IMMEDIATELY!!\n"); 1720 } 1721 1722 /* External PHY interrupt */ 1723 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 1724 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) { 1725 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 1726 taskqueue_enqueue(adapter->tq, &adapter->phy_task); 1727 } 1728 1729 /* Re-enable other interrupts */ 1730 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1731 return; 1732 } 1733 1734 /********************************************************************* 1735 * 1736 * Media Ioctl callback 1737 * 1738 * This routine is called whenever the user queries the status of 1739 * the interface using ifconfig. 1740 * 1741 **********************************************************************/ 1742 static void 1743 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 1744 { 1745 struct adapter *adapter = ifp->if_softc; 1746 struct ixgbe_hw *hw = &adapter->hw; 1747 int layer; 1748 1749 INIT_DEBUGOUT("ixgbe_media_status: begin"); 1750 IXGBE_CORE_LOCK(adapter); 1751 ixgbe_update_link_status(adapter); 1752 1753 ifmr->ifm_status = IFM_AVALID; 1754 ifmr->ifm_active = IFM_ETHER; 1755 1756 if (!adapter->link_active) { 1757 IXGBE_CORE_UNLOCK(adapter); 1758 return; 1759 } 1760 1761 ifmr->ifm_status |= IFM_ACTIVE; 1762 layer = adapter->phy_layer; 1763 1764 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 1765 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 1766 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1767 switch (adapter->link_speed) { 1768 case IXGBE_LINK_SPEED_10GB_FULL: 1769 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 1770 break; 1771 case IXGBE_LINK_SPEED_1GB_FULL: 1772 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 1773 break; 1774 case IXGBE_LINK_SPEED_100_FULL: 1775 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1776 break; 1777 } 1778 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1779 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1780 switch (adapter->link_speed) { 1781 case IXGBE_LINK_SPEED_10GB_FULL: 1782 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 1783 break; 1784 } 1785 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 1786 switch (adapter->link_speed) { 1787 case IXGBE_LINK_SPEED_10GB_FULL: 1788 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 1789 break; 1790 case IXGBE_LINK_SPEED_1GB_FULL: 1791 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1792 break; 1793 } 1794 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 1795 switch (adapter->link_speed) { 1796 case IXGBE_LINK_SPEED_10GB_FULL: 1797 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 1798 break; 1799 case IXGBE_LINK_SPEED_1GB_FULL: 1800 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1801 break; 1802 } 1803 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 1804 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1805 switch (adapter->link_speed) { 1806 case IXGBE_LINK_SPEED_10GB_FULL: 1807 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 1808 break; 1809 case IXGBE_LINK_SPEED_1GB_FULL: 1810 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1811 break; 1812 } 1813 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1814 switch (adapter->link_speed) { 1815 case IXGBE_LINK_SPEED_10GB_FULL: 1816 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 1817 break; 1818 } 1819 /* 1820 ** XXX: These need to use the proper media types once 1821 ** they're added. 1822 */ 1823 #ifndef IFM_ETH_XTYPE 1824 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1825 switch (adapter->link_speed) { 1826 case IXGBE_LINK_SPEED_10GB_FULL: 1827 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 1828 break; 1829 case IXGBE_LINK_SPEED_2_5GB_FULL: 1830 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 1831 break; 1832 case IXGBE_LINK_SPEED_1GB_FULL: 1833 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 1834 break; 1835 } 1836 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 1837 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1838 switch (adapter->link_speed) { 1839 case IXGBE_LINK_SPEED_10GB_FULL: 1840 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 1841 break; 1842 case IXGBE_LINK_SPEED_2_5GB_FULL: 1843 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 1844 break; 1845 case IXGBE_LINK_SPEED_1GB_FULL: 1846 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 1847 break; 1848 } 1849 #else 1850 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1851 switch (adapter->link_speed) { 1852 case IXGBE_LINK_SPEED_10GB_FULL: 1853 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 1854 break; 1855 case IXGBE_LINK_SPEED_2_5GB_FULL: 1856 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 1857 break; 1858 case IXGBE_LINK_SPEED_1GB_FULL: 1859 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 1860 break; 1861 } 1862 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 1863 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1864 switch (adapter->link_speed) { 1865 case IXGBE_LINK_SPEED_10GB_FULL: 1866 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 1867 break; 1868 case IXGBE_LINK_SPEED_2_5GB_FULL: 1869 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 1870 break; 1871 case IXGBE_LINK_SPEED_1GB_FULL: 1872 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 1873 break; 1874 } 1875 #endif 1876 1877 /* If nothing is recognized... */ 1878 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 1879 ifmr->ifm_active |= IFM_UNKNOWN; 1880 1881 #if __FreeBSD_version >= 900025 1882 /* Display current flow control setting used on link */ 1883 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 1884 hw->fc.current_mode == ixgbe_fc_full) 1885 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1886 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 1887 hw->fc.current_mode == ixgbe_fc_full) 1888 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1889 #endif 1890 1891 IXGBE_CORE_UNLOCK(adapter); 1892 1893 return; 1894 } 1895 1896 /********************************************************************* 1897 * 1898 * Media Ioctl callback 1899 * 1900 * This routine is called when the user changes speed/duplex using 1901 * media/mediopt option with ifconfig. 1902 * 1903 **********************************************************************/ 1904 static int 1905 ixgbe_media_change(struct ifnet * ifp) 1906 { 1907 struct adapter *adapter = ifp->if_softc; 1908 struct ifmedia *ifm = &adapter->media; 1909 struct ixgbe_hw *hw = &adapter->hw; 1910 ixgbe_link_speed speed = 0; 1911 1912 INIT_DEBUGOUT("ixgbe_media_change: begin"); 1913 1914 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1915 return (EINVAL); 1916 1917 if (hw->phy.media_type == ixgbe_media_type_backplane) 1918 return (ENODEV); 1919 1920 /* 1921 ** We don't actually need to check against the supported 1922 ** media types of the adapter; ifmedia will take care of 1923 ** that for us. 1924 */ 1925 #ifndef IFM_ETH_XTYPE 1926 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1927 case IFM_AUTO: 1928 case IFM_10G_T: 1929 speed |= IXGBE_LINK_SPEED_100_FULL; 1930 case IFM_10G_LRM: 1931 case IFM_10G_SR: /* KR, too */ 1932 case IFM_10G_LR: 1933 case IFM_10G_CX4: /* KX4 */ 1934 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1935 case IFM_10G_TWINAX: 1936 speed |= IXGBE_LINK_SPEED_10GB_FULL; 1937 break; 1938 case IFM_1000_T: 1939 speed |= IXGBE_LINK_SPEED_100_FULL; 1940 case IFM_1000_LX: 1941 case IFM_1000_SX: 1942 case IFM_1000_CX: /* KX */ 1943 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1944 break; 1945 case IFM_100_TX: 1946 speed |= IXGBE_LINK_SPEED_100_FULL; 1947 break; 1948 default: 1949 goto invalid; 1950 } 1951 #else 1952 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1953 case IFM_AUTO: 1954 case IFM_10G_T: 1955 speed |= IXGBE_LINK_SPEED_100_FULL; 1956 case IFM_10G_LRM: 1957 case IFM_10G_KR: 1958 case IFM_10G_LR: 1959 case IFM_10G_KX4: 1960 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1961 case IFM_10G_TWINAX: 1962 speed |= IXGBE_LINK_SPEED_10GB_FULL; 1963 break; 1964 case IFM_1000_T: 1965 speed |= IXGBE_LINK_SPEED_100_FULL; 1966 case IFM_1000_LX: 1967 case IFM_1000_SX: 1968 case IFM_1000_KX: 1969 speed |= IXGBE_LINK_SPEED_1GB_FULL; 1970 break; 1971 case IFM_100_TX: 1972 speed |= IXGBE_LINK_SPEED_100_FULL; 1973 break; 1974 default: 1975 goto invalid; 1976 } 1977 #endif 1978 1979 hw->mac.autotry_restart = TRUE; 1980 hw->mac.ops.setup_link(hw, speed, TRUE); 1981 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1982 adapter->advertise = 0; 1983 } else { 1984 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0) 1985 adapter->advertise |= 1 << 2; 1986 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0) 1987 adapter->advertise |= 1 << 1; 1988 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0) 1989 adapter->advertise |= 1 << 0; 1990 } 1991 1992 return (0); 1993 1994 invalid: 1995 device_printf(adapter->dev, "Invalid media type!\n"); 1996 return (EINVAL); 1997 } 1998 1999 static void 2000 ixgbe_set_promisc(struct adapter *adapter) 2001 { 2002 u_int32_t reg_rctl; 2003 struct ifnet *ifp = adapter->ifp; 2004 int mcnt = 0; 2005 2006 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2007 reg_rctl &= (~IXGBE_FCTRL_UPE); 2008 if (ifp->if_flags & IFF_ALLMULTI) 2009 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2010 else { 2011 struct ifmultiaddr *ifma; 2012 #if __FreeBSD_version < 800000 2013 IF_ADDR_LOCK(ifp); 2014 #else 2015 if_maddr_rlock(ifp); 2016 #endif 2017 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2018 if (ifma->ifma_addr->sa_family != AF_LINK) 2019 continue; 2020 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2021 break; 2022 mcnt++; 2023 } 2024 #if __FreeBSD_version < 800000 2025 IF_ADDR_UNLOCK(ifp); 2026 #else 2027 if_maddr_runlock(ifp); 2028 #endif 2029 } 2030 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2031 reg_rctl &= (~IXGBE_FCTRL_MPE); 2032 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 2033 2034 if (ifp->if_flags & IFF_PROMISC) { 2035 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2036 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 2037 } else if (ifp->if_flags & IFF_ALLMULTI) { 2038 reg_rctl |= IXGBE_FCTRL_MPE; 2039 reg_rctl &= ~IXGBE_FCTRL_UPE; 2040 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); 2041 } 2042 return; 2043 } 2044 2045 2046 /********************************************************************* 2047 * Multicast Update 2048 * 2049 * This routine is called whenever multicast address list is updated. 2050 * 2051 **********************************************************************/ 2052 #define IXGBE_RAR_ENTRIES 16 2053 2054 static void 2055 ixgbe_set_multi(struct adapter *adapter) 2056 { 2057 u32 fctrl; 2058 u8 *update_ptr; 2059 struct ifmultiaddr *ifma; 2060 struct ixgbe_mc_addr *mta; 2061 int mcnt = 0; 2062 struct ifnet *ifp = adapter->ifp; 2063 2064 IOCTL_DEBUGOUT("ixgbe_set_multi: begin"); 2065 2066 mta = adapter->mta; 2067 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 2068 2069 #if __FreeBSD_version < 800000 2070 IF_ADDR_LOCK(ifp); 2071 #else 2072 if_maddr_rlock(ifp); 2073 #endif 2074 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2075 if (ifma->ifma_addr->sa_family != AF_LINK) 2076 continue; 2077 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2078 break; 2079 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2080 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 2081 mta[mcnt].vmdq = adapter->pool; 2082 mcnt++; 2083 } 2084 #if __FreeBSD_version < 800000 2085 IF_ADDR_UNLOCK(ifp); 2086 #else 2087 if_maddr_runlock(ifp); 2088 #endif 2089 2090 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2091 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2092 if (ifp->if_flags & IFF_PROMISC) 2093 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2094 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 2095 ifp->if_flags & IFF_ALLMULTI) { 2096 fctrl |= IXGBE_FCTRL_MPE; 2097 fctrl &= ~IXGBE_FCTRL_UPE; 2098 } else 2099 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2100 2101 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 2102 2103 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 2104 update_ptr = (u8 *)mta; 2105 ixgbe_update_mc_addr_list(&adapter->hw, 2106 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE); 2107 } 2108 2109 return; 2110 } 2111 2112 /* 2113 * This is an iterator function now needed by the multicast 2114 * shared code. It simply feeds the shared code routine the 2115 * addresses in the array of ixgbe_set_multi() one by one. 2116 */ 2117 static u8 * 2118 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 2119 { 2120 struct ixgbe_mc_addr *mta; 2121 2122 mta = (struct ixgbe_mc_addr *)*update_ptr; 2123 *vmdq = mta->vmdq; 2124 2125 *update_ptr = (u8*)(mta + 1); 2126 return (mta->addr); 2127 } 2128 2129 2130 /********************************************************************* 2131 * Timer routine 2132 * 2133 * This routine checks for link status,updates statistics, 2134 * and runs the watchdog check. 2135 * 2136 **********************************************************************/ 2137 2138 static void 2139 ixgbe_local_timer(void *arg) 2140 { 2141 struct adapter *adapter = arg; 2142 device_t dev = adapter->dev; 2143 struct ix_queue *que = adapter->queues; 2144 u64 queues = 0; 2145 int hung = 0; 2146 2147 mtx_assert(&adapter->core_mtx, MA_OWNED); 2148 2149 /* Check for pluggable optics */ 2150 if (adapter->sfp_probe) 2151 if (!ixgbe_sfp_probe(adapter)) 2152 goto out; /* Nothing to do */ 2153 2154 ixgbe_update_link_status(adapter); 2155 ixgbe_update_stats_counters(adapter); 2156 2157 /* 2158 ** Check the TX queues status 2159 ** - mark hung queues so we don't schedule on them 2160 ** - watchdog only if all queues show hung 2161 */ 2162 for (int i = 0; i < adapter->num_queues; i++, que++) { 2163 /* Keep track of queues with work for soft irq */ 2164 if (que->txr->busy) 2165 queues |= ((u64)1 << que->me); 2166 /* 2167 ** Each time txeof runs without cleaning, but there 2168 ** are uncleaned descriptors it increments busy. If 2169 ** we get to the MAX we declare it hung. 2170 */ 2171 if (que->busy == IXGBE_QUEUE_HUNG) { 2172 ++hung; 2173 /* Mark the queue as inactive */ 2174 adapter->active_queues &= ~((u64)1 << que->me); 2175 continue; 2176 } else { 2177 /* Check if we've come back from hung */ 2178 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 2179 adapter->active_queues |= ((u64)1 << que->me); 2180 } 2181 if (que->busy >= IXGBE_MAX_TX_BUSY) { 2182 device_printf(dev,"Warning queue %d " 2183 "appears to be hung!\n", i); 2184 que->txr->busy = IXGBE_QUEUE_HUNG; 2185 ++hung; 2186 } 2187 2188 } 2189 2190 /* Only truly watchdog if all queues show hung */ 2191 if (hung == adapter->num_queues) 2192 goto watchdog; 2193 else if (queues != 0) { /* Force an IRQ on queues with work */ 2194 ixgbe_rearm_queues(adapter, queues); 2195 } 2196 2197 out: 2198 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 2199 return; 2200 2201 watchdog: 2202 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 2203 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2204 adapter->watchdog_events++; 2205 ixgbe_init_locked(adapter); 2206 } 2207 2208 2209 /* 2210 ** Note: this routine updates the OS on the link state 2211 ** the real check of the hardware only happens with 2212 ** a link interrupt. 2213 */ 2214 static void 2215 ixgbe_update_link_status(struct adapter *adapter) 2216 { 2217 struct ifnet *ifp = adapter->ifp; 2218 device_t dev = adapter->dev; 2219 2220 if (adapter->link_up){ 2221 if (adapter->link_active == FALSE) { 2222 if (bootverbose) 2223 device_printf(dev,"Link is up %d Gbps %s \n", 2224 ((adapter->link_speed == 128)? 10:1), 2225 "Full Duplex"); 2226 adapter->link_active = TRUE; 2227 /* Update any Flow Control changes */ 2228 ixgbe_fc_enable(&adapter->hw); 2229 /* Update DMA coalescing config */ 2230 ixgbe_config_dmac(adapter); 2231 if_link_state_change(ifp, LINK_STATE_UP); 2232 #ifdef PCI_IOV 2233 ixgbe_ping_all_vfs(adapter); 2234 #endif 2235 } 2236 } else { /* Link down */ 2237 if (adapter->link_active == TRUE) { 2238 if (bootverbose) 2239 device_printf(dev,"Link is Down\n"); 2240 if_link_state_change(ifp, LINK_STATE_DOWN); 2241 adapter->link_active = FALSE; 2242 #ifdef PCI_IOV 2243 ixgbe_ping_all_vfs(adapter); 2244 #endif 2245 } 2246 } 2247 2248 return; 2249 } 2250 2251 2252 /********************************************************************* 2253 * 2254 * This routine disables all traffic on the adapter by issuing a 2255 * global reset on the MAC and deallocates TX/RX buffers. 2256 * 2257 **********************************************************************/ 2258 2259 static void 2260 ixgbe_stop(void *arg) 2261 { 2262 struct ifnet *ifp; 2263 struct adapter *adapter = arg; 2264 struct ixgbe_hw *hw = &adapter->hw; 2265 ifp = adapter->ifp; 2266 2267 mtx_assert(&adapter->core_mtx, MA_OWNED); 2268 2269 INIT_DEBUGOUT("ixgbe_stop: begin\n"); 2270 ixgbe_disable_intr(adapter); 2271 callout_stop(&adapter->timer); 2272 2273 /* Let the stack know...*/ 2274 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2275 2276 ixgbe_reset_hw(hw); 2277 hw->adapter_stopped = FALSE; 2278 ixgbe_stop_adapter(hw); 2279 if (hw->mac.type == ixgbe_mac_82599EB) 2280 ixgbe_stop_mac_link_on_d3_82599(hw); 2281 /* Turn off the laser - noop with no optics */ 2282 ixgbe_disable_tx_laser(hw); 2283 2284 /* Update the stack */ 2285 adapter->link_up = FALSE; 2286 ixgbe_update_link_status(adapter); 2287 2288 /* reprogram the RAR[0] in case user changed it. */ 2289 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 2290 2291 return; 2292 } 2293 2294 2295 /********************************************************************* 2296 * 2297 * Determine hardware revision. 2298 * 2299 **********************************************************************/ 2300 static void 2301 ixgbe_identify_hardware(struct adapter *adapter) 2302 { 2303 device_t dev = adapter->dev; 2304 struct ixgbe_hw *hw = &adapter->hw; 2305 2306 /* Save off the information about this board */ 2307 hw->vendor_id = pci_get_vendor(dev); 2308 hw->device_id = pci_get_device(dev); 2309 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 2310 hw->subsystem_vendor_id = 2311 pci_read_config(dev, PCIR_SUBVEND_0, 2); 2312 hw->subsystem_device_id = 2313 pci_read_config(dev, PCIR_SUBDEV_0, 2); 2314 2315 /* 2316 ** Make sure BUSMASTER is set 2317 */ 2318 pci_enable_busmaster(dev); 2319 2320 /* We need this here to set the num_segs below */ 2321 ixgbe_set_mac_type(hw); 2322 2323 /* Pick up the 82599 settings */ 2324 if (hw->mac.type != ixgbe_mac_82598EB) { 2325 hw->phy.smart_speed = ixgbe_smart_speed; 2326 adapter->num_segs = IXGBE_82599_SCATTER; 2327 } else 2328 adapter->num_segs = IXGBE_82598_SCATTER; 2329 2330 return; 2331 } 2332 2333 /********************************************************************* 2334 * 2335 * Determine optic type 2336 * 2337 **********************************************************************/ 2338 static void 2339 ixgbe_setup_optics(struct adapter *adapter) 2340 { 2341 struct ixgbe_hw *hw = &adapter->hw; 2342 int layer; 2343 2344 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 2345 2346 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 2347 adapter->optics = IFM_10G_T; 2348 return; 2349 } 2350 2351 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 2352 adapter->optics = IFM_1000_T; 2353 return; 2354 } 2355 2356 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 2357 adapter->optics = IFM_1000_SX; 2358 return; 2359 } 2360 2361 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR | 2362 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) { 2363 adapter->optics = IFM_10G_LR; 2364 return; 2365 } 2366 2367 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 2368 adapter->optics = IFM_10G_SR; 2369 return; 2370 } 2371 2372 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) { 2373 adapter->optics = IFM_10G_TWINAX; 2374 return; 2375 } 2376 2377 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | 2378 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) { 2379 adapter->optics = IFM_10G_CX4; 2380 return; 2381 } 2382 2383 /* If we get here just set the default */ 2384 adapter->optics = IFM_ETHER | IFM_AUTO; 2385 return; 2386 } 2387 2388 /********************************************************************* 2389 * 2390 * Setup the Legacy or MSI Interrupt handler 2391 * 2392 **********************************************************************/ 2393 static int 2394 ixgbe_allocate_legacy(struct adapter *adapter) 2395 { 2396 device_t dev = adapter->dev; 2397 struct ix_queue *que = adapter->queues; 2398 #ifndef IXGBE_LEGACY_TX 2399 struct tx_ring *txr = adapter->tx_rings; 2400 #endif 2401 int error, rid = 0; 2402 2403 /* MSI RID at 1 */ 2404 if (adapter->msix == 1) 2405 rid = 1; 2406 2407 /* We allocate a single interrupt resource */ 2408 adapter->res = bus_alloc_resource_any(dev, 2409 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2410 if (adapter->res == NULL) { 2411 device_printf(dev, "Unable to allocate bus resource: " 2412 "interrupt\n"); 2413 return (ENXIO); 2414 } 2415 2416 /* 2417 * Try allocating a fast interrupt and the associated deferred 2418 * processing contexts. 2419 */ 2420 #ifndef IXGBE_LEGACY_TX 2421 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2422 #endif 2423 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 2424 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 2425 taskqueue_thread_enqueue, &que->tq); 2426 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq", 2427 device_get_nameunit(adapter->dev)); 2428 2429 /* Tasklets for Link, SFP and Multispeed Fiber */ 2430 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); 2431 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); 2432 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); 2433 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); 2434 #ifdef IXGBE_FDIR 2435 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); 2436 #endif 2437 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, 2438 taskqueue_thread_enqueue, &adapter->tq); 2439 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", 2440 device_get_nameunit(adapter->dev)); 2441 2442 if ((error = bus_setup_intr(dev, adapter->res, 2443 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, 2444 que, &adapter->tag)) != 0) { 2445 device_printf(dev, "Failed to register fast interrupt " 2446 "handler: %d\n", error); 2447 taskqueue_free(que->tq); 2448 taskqueue_free(adapter->tq); 2449 que->tq = NULL; 2450 adapter->tq = NULL; 2451 return (error); 2452 } 2453 /* For simplicity in the handlers */ 2454 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK; 2455 2456 return (0); 2457 } 2458 2459 2460 /********************************************************************* 2461 * 2462 * Setup MSIX Interrupt resources and handlers 2463 * 2464 **********************************************************************/ 2465 static int 2466 ixgbe_allocate_msix(struct adapter *adapter) 2467 { 2468 device_t dev = adapter->dev; 2469 struct ix_queue *que = adapter->queues; 2470 struct tx_ring *txr = adapter->tx_rings; 2471 int error, rid, vector = 0; 2472 int cpu_id = 0; 2473 #ifdef RSS 2474 cpuset_t cpu_mask; 2475 #endif 2476 2477 #ifdef RSS 2478 /* 2479 * If we're doing RSS, the number of queues needs to 2480 * match the number of RSS buckets that are configured. 2481 * 2482 * + If there's more queues than RSS buckets, we'll end 2483 * up with queues that get no traffic. 2484 * 2485 * + If there's more RSS buckets than queues, we'll end 2486 * up having multiple RSS buckets map to the same queue, 2487 * so there'll be some contention. 2488 */ 2489 if (adapter->num_queues != rss_getnumbuckets()) { 2490 device_printf(dev, 2491 "%s: number of queues (%d) != number of RSS buckets (%d)" 2492 "; performance will be impacted.\n", 2493 __func__, 2494 adapter->num_queues, 2495 rss_getnumbuckets()); 2496 } 2497 #endif 2498 2499 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 2500 rid = vector + 1; 2501 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2502 RF_SHAREABLE | RF_ACTIVE); 2503 if (que->res == NULL) { 2504 device_printf(dev,"Unable to allocate" 2505 " bus resource: que interrupt [%d]\n", vector); 2506 return (ENXIO); 2507 } 2508 /* Set the handler function */ 2509 error = bus_setup_intr(dev, que->res, 2510 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2511 ixgbe_msix_que, que, &que->tag); 2512 if (error) { 2513 que->res = NULL; 2514 device_printf(dev, "Failed to register QUE handler"); 2515 return (error); 2516 } 2517 #if __FreeBSD_version >= 800504 2518 bus_describe_intr(dev, que->res, que->tag, "q%d", i); 2519 #endif 2520 que->msix = vector; 2521 adapter->active_queues |= (u64)(1 << que->msix); 2522 #ifdef RSS 2523 /* 2524 * The queue ID is used as the RSS layer bucket ID. 2525 * We look up the queue ID -> RSS CPU ID and select 2526 * that. 2527 */ 2528 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2529 #else 2530 /* 2531 * Bind the msix vector, and thus the 2532 * rings to the corresponding cpu. 2533 * 2534 * This just happens to match the default RSS round-robin 2535 * bucket -> queue -> CPU allocation. 2536 */ 2537 if (adapter->num_queues > 1) 2538 cpu_id = i; 2539 #endif 2540 if (adapter->num_queues > 1) 2541 bus_bind_intr(dev, que->res, cpu_id); 2542 #ifdef IXGBE_DEBUG 2543 #ifdef RSS 2544 device_printf(dev, 2545 "Bound RSS bucket %d to CPU %d\n", 2546 i, cpu_id); 2547 #else 2548 device_printf(dev, 2549 "Bound queue %d to cpu %d\n", 2550 i, cpu_id); 2551 #endif 2552 #endif /* IXGBE_DEBUG */ 2553 2554 2555 #ifndef IXGBE_LEGACY_TX 2556 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 2557 #endif 2558 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 2559 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 2560 taskqueue_thread_enqueue, &que->tq); 2561 #ifdef RSS 2562 CPU_SETOF(cpu_id, &cpu_mask); 2563 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, 2564 &cpu_mask, 2565 "%s (bucket %d)", 2566 device_get_nameunit(adapter->dev), 2567 cpu_id); 2568 #else 2569 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d", 2570 device_get_nameunit(adapter->dev), i); 2571 #endif 2572 } 2573 2574 /* and Link */ 2575 rid = vector + 1; 2576 adapter->res = bus_alloc_resource_any(dev, 2577 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2578 if (!adapter->res) { 2579 device_printf(dev,"Unable to allocate" 2580 " bus resource: Link interrupt [%d]\n", rid); 2581 return (ENXIO); 2582 } 2583 /* Set the link handler function */ 2584 error = bus_setup_intr(dev, adapter->res, 2585 INTR_TYPE_NET | INTR_MPSAFE, NULL, 2586 ixgbe_msix_link, adapter, &adapter->tag); 2587 if (error) { 2588 adapter->res = NULL; 2589 device_printf(dev, "Failed to register LINK handler"); 2590 return (error); 2591 } 2592 #if __FreeBSD_version >= 800504 2593 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); 2594 #endif 2595 adapter->vector = vector; 2596 /* Tasklets for Link, SFP and Multispeed Fiber */ 2597 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); 2598 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); 2599 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); 2600 #ifdef PCI_IOV 2601 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter); 2602 #endif 2603 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); 2604 #ifdef IXGBE_FDIR 2605 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); 2606 #endif 2607 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, 2608 taskqueue_thread_enqueue, &adapter->tq); 2609 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", 2610 device_get_nameunit(adapter->dev)); 2611 2612 return (0); 2613 } 2614 2615 /* 2616 * Setup Either MSI/X or MSI 2617 */ 2618 static int 2619 ixgbe_setup_msix(struct adapter *adapter) 2620 { 2621 device_t dev = adapter->dev; 2622 int rid, want, queues, msgs; 2623 2624 /* Override by tuneable */ 2625 if (ixgbe_enable_msix == 0) 2626 goto msi; 2627 2628 /* First try MSI/X */ 2629 msgs = pci_msix_count(dev); 2630 if (msgs == 0) 2631 goto msi; 2632 rid = PCIR_BAR(MSIX_82598_BAR); 2633 adapter->msix_mem = bus_alloc_resource_any(dev, 2634 SYS_RES_MEMORY, &rid, RF_ACTIVE); 2635 if (adapter->msix_mem == NULL) { 2636 rid += 4; /* 82599 maps in higher BAR */ 2637 adapter->msix_mem = bus_alloc_resource_any(dev, 2638 SYS_RES_MEMORY, &rid, RF_ACTIVE); 2639 } 2640 if (adapter->msix_mem == NULL) { 2641 /* May not be enabled */ 2642 device_printf(adapter->dev, 2643 "Unable to map MSIX table \n"); 2644 goto msi; 2645 } 2646 2647 /* Figure out a reasonable auto config value */ 2648 queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus; 2649 2650 #ifdef RSS 2651 /* If we're doing RSS, clamp at the number of RSS buckets */ 2652 if (queues > rss_getnumbuckets()) 2653 queues = rss_getnumbuckets(); 2654 #endif 2655 2656 if (ixgbe_num_queues != 0) 2657 queues = ixgbe_num_queues; 2658 /* Set max queues to 8 when autoconfiguring */ 2659 else if ((ixgbe_num_queues == 0) && (queues > 8)) 2660 queues = 8; 2661 2662 /* reflect correct sysctl value */ 2663 ixgbe_num_queues = queues; 2664 2665 /* 2666 ** Want one vector (RX/TX pair) per queue 2667 ** plus an additional for Link. 2668 */ 2669 want = queues + 1; 2670 if (msgs >= want) 2671 msgs = want; 2672 else { 2673 device_printf(adapter->dev, 2674 "MSIX Configuration Problem, " 2675 "%d vectors but %d queues wanted!\n", 2676 msgs, want); 2677 goto msi; 2678 } 2679 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 2680 device_printf(adapter->dev, 2681 "Using MSIX interrupts with %d vectors\n", msgs); 2682 adapter->num_queues = queues; 2683 return (msgs); 2684 } 2685 /* 2686 ** If MSIX alloc failed or provided us with 2687 ** less than needed, free and fall through to MSI 2688 */ 2689 pci_release_msi(dev); 2690 2691 msi: 2692 if (adapter->msix_mem != NULL) { 2693 bus_release_resource(dev, SYS_RES_MEMORY, 2694 rid, adapter->msix_mem); 2695 adapter->msix_mem = NULL; 2696 } 2697 msgs = 1; 2698 if (pci_alloc_msi(dev, &msgs) == 0) { 2699 device_printf(adapter->dev, "Using an MSI interrupt\n"); 2700 return (msgs); 2701 } 2702 device_printf(adapter->dev, "Using a Legacy interrupt\n"); 2703 return (0); 2704 } 2705 2706 2707 static int 2708 ixgbe_allocate_pci_resources(struct adapter *adapter) 2709 { 2710 int rid; 2711 device_t dev = adapter->dev; 2712 2713 rid = PCIR_BAR(0); 2714 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2715 &rid, RF_ACTIVE); 2716 2717 if (!(adapter->pci_mem)) { 2718 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2719 return (ENXIO); 2720 } 2721 2722 /* Save bus_space values for READ/WRITE_REG macros */ 2723 adapter->osdep.mem_bus_space_tag = 2724 rman_get_bustag(adapter->pci_mem); 2725 adapter->osdep.mem_bus_space_handle = 2726 rman_get_bushandle(adapter->pci_mem); 2727 /* Set hw values for shared code */ 2728 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; 2729 adapter->hw.back = adapter; 2730 2731 /* Default to 1 queue if MSI-X setup fails */ 2732 adapter->num_queues = 1; 2733 2734 /* 2735 ** Now setup MSI or MSI-X, should 2736 ** return us the number of supported 2737 ** vectors. (Will be 1 for MSI) 2738 */ 2739 adapter->msix = ixgbe_setup_msix(adapter); 2740 return (0); 2741 } 2742 2743 static void 2744 ixgbe_free_pci_resources(struct adapter * adapter) 2745 { 2746 struct ix_queue *que = adapter->queues; 2747 device_t dev = adapter->dev; 2748 int rid, memrid; 2749 2750 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2751 memrid = PCIR_BAR(MSIX_82598_BAR); 2752 else 2753 memrid = PCIR_BAR(MSIX_82599_BAR); 2754 2755 /* 2756 ** There is a slight possibility of a failure mode 2757 ** in attach that will result in entering this function 2758 ** before interrupt resources have been initialized, and 2759 ** in that case we do not want to execute the loops below 2760 ** We can detect this reliably by the state of the adapter 2761 ** res pointer. 2762 */ 2763 if (adapter->res == NULL) 2764 goto mem; 2765 2766 /* 2767 ** Release all msix queue resources: 2768 */ 2769 for (int i = 0; i < adapter->num_queues; i++, que++) { 2770 rid = que->msix + 1; 2771 if (que->tag != NULL) { 2772 bus_teardown_intr(dev, que->res, que->tag); 2773 que->tag = NULL; 2774 } 2775 if (que->res != NULL) 2776 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 2777 } 2778 2779 2780 /* Clean the Legacy or Link interrupt last */ 2781 if (adapter->vector) /* we are doing MSIX */ 2782 rid = adapter->vector + 1; 2783 else 2784 (adapter->msix != 0) ? (rid = 1):(rid = 0); 2785 2786 if (adapter->tag != NULL) { 2787 bus_teardown_intr(dev, adapter->res, adapter->tag); 2788 adapter->tag = NULL; 2789 } 2790 if (adapter->res != NULL) 2791 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 2792 2793 mem: 2794 if (adapter->msix) 2795 pci_release_msi(dev); 2796 2797 if (adapter->msix_mem != NULL) 2798 bus_release_resource(dev, SYS_RES_MEMORY, 2799 memrid, adapter->msix_mem); 2800 2801 if (adapter->pci_mem != NULL) 2802 bus_release_resource(dev, SYS_RES_MEMORY, 2803 PCIR_BAR(0), adapter->pci_mem); 2804 2805 return; 2806 } 2807 2808 /********************************************************************* 2809 * 2810 * Setup networking device structure and register an interface. 2811 * 2812 **********************************************************************/ 2813 static int 2814 ixgbe_setup_interface(device_t dev, struct adapter *adapter) 2815 { 2816 struct ifnet *ifp; 2817 2818 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 2819 2820 ifp = adapter->ifp = if_alloc(IFT_ETHER); 2821 if (ifp == NULL) { 2822 device_printf(dev, "can not allocate ifnet structure\n"); 2823 return (-1); 2824 } 2825 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2826 ifp->if_baudrate = IF_Gbps(10); 2827 ifp->if_init = ixgbe_init; 2828 ifp->if_softc = adapter; 2829 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2830 ifp->if_ioctl = ixgbe_ioctl; 2831 #if __FreeBSD_version >= 1100036 2832 if_setgetcounterfn(ifp, ixgbe_get_counter); 2833 #endif 2834 #if __FreeBSD_version >= 1100045 2835 /* TSO parameters */ 2836 ifp->if_hw_tsomax = 65518; 2837 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 2838 ifp->if_hw_tsomaxsegsize = 2048; 2839 #endif 2840 #ifndef IXGBE_LEGACY_TX 2841 ifp->if_transmit = ixgbe_mq_start; 2842 ifp->if_qflush = ixgbe_qflush; 2843 #else 2844 ifp->if_start = ixgbe_start; 2845 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 2846 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2; 2847 IFQ_SET_READY(&ifp->if_snd); 2848 #endif 2849 2850 ether_ifattach(ifp, adapter->hw.mac.addr); 2851 2852 adapter->max_frame_size = 2853 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2854 2855 /* 2856 * Tell the upper layer(s) we support long frames. 2857 */ 2858 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2859 2860 /* Set capability flags */ 2861 ifp->if_capabilities |= IFCAP_RXCSUM 2862 | IFCAP_TXCSUM 2863 | IFCAP_RXCSUM_IPV6 2864 | IFCAP_TXCSUM_IPV6 2865 | IFCAP_TSO4 2866 | IFCAP_TSO6 2867 | IFCAP_LRO 2868 | IFCAP_VLAN_HWTAGGING 2869 | IFCAP_VLAN_HWTSO 2870 | IFCAP_VLAN_HWCSUM 2871 | IFCAP_JUMBO_MTU 2872 | IFCAP_VLAN_MTU 2873 | IFCAP_HWSTATS; 2874 2875 /* Enable the above capabilities by default */ 2876 ifp->if_capenable = ifp->if_capabilities; 2877 2878 /* 2879 ** Don't turn this on by default, if vlans are 2880 ** created on another pseudo device (eg. lagg) 2881 ** then vlan events are not passed thru, breaking 2882 ** operation, but with HW FILTER off it works. If 2883 ** using vlans directly on the ixgbe driver you can 2884 ** enable this and get full hardware tag filtering. 2885 */ 2886 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2887 2888 /* 2889 * Specify the media types supported by this adapter and register 2890 * callbacks to update media and link information 2891 */ 2892 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, 2893 ixgbe_media_status); 2894 2895 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 2896 ixgbe_add_media_types(adapter); 2897 2898 /* Set autoselect media by default */ 2899 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2900 2901 return (0); 2902 } 2903 2904 static void 2905 ixgbe_add_media_types(struct adapter *adapter) 2906 { 2907 struct ixgbe_hw *hw = &adapter->hw; 2908 device_t dev = adapter->dev; 2909 int layer; 2910 2911 layer = adapter->phy_layer; 2912 2913 /* Media types with matching FreeBSD media defines */ 2914 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 2915 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 2916 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 2917 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 2918 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 2919 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2920 2921 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2922 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2923 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 2924 2925 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 2926 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 2927 if (hw->phy.multispeed_fiber) 2928 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL); 2929 } 2930 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 2931 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 2932 if (hw->phy.multispeed_fiber) 2933 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2934 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2935 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 2936 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2937 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 2938 2939 #ifdef IFM_ETH_XTYPE 2940 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2941 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 2942 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 2943 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 2944 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2945 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 2946 #else 2947 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 2948 device_printf(dev, "Media supported: 10GbaseKR\n"); 2949 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 2950 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 2951 } 2952 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 2953 device_printf(dev, "Media supported: 10GbaseKX4\n"); 2954 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 2955 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 2956 } 2957 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 2958 device_printf(dev, "Media supported: 1000baseKX\n"); 2959 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 2960 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 2961 } 2962 #endif 2963 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 2964 device_printf(dev, "Media supported: 1000baseBX\n"); 2965 2966 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 2967 ifmedia_add(&adapter->media, 2968 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2969 ifmedia_add(&adapter->media, 2970 IFM_ETHER | IFM_1000_T, 0, NULL); 2971 } 2972 2973 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2974 } 2975 2976 static void 2977 ixgbe_config_link(struct adapter *adapter) 2978 { 2979 struct ixgbe_hw *hw = &adapter->hw; 2980 u32 autoneg, err = 0; 2981 bool sfp, negotiate; 2982 2983 sfp = ixgbe_is_sfp(hw); 2984 2985 if (sfp) { 2986 taskqueue_enqueue(adapter->tq, &adapter->mod_task); 2987 } else { 2988 if (hw->mac.ops.check_link) 2989 err = ixgbe_check_link(hw, &adapter->link_speed, 2990 &adapter->link_up, FALSE); 2991 if (err) 2992 goto out; 2993 autoneg = hw->phy.autoneg_advertised; 2994 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 2995 err = hw->mac.ops.get_link_capabilities(hw, 2996 &autoneg, &negotiate); 2997 if (err) 2998 goto out; 2999 if (hw->mac.ops.setup_link) 3000 err = hw->mac.ops.setup_link(hw, 3001 autoneg, adapter->link_up); 3002 } 3003 out: 3004 return; 3005 } 3006 3007 3008 /********************************************************************* 3009 * 3010 * Enable transmit units. 3011 * 3012 **********************************************************************/ 3013 static void 3014 ixgbe_initialize_transmit_units(struct adapter *adapter) 3015 { 3016 struct tx_ring *txr = adapter->tx_rings; 3017 struct ixgbe_hw *hw = &adapter->hw; 3018 3019 /* Setup the Base and Length of the Tx Descriptor Ring */ 3020 for (int i = 0; i < adapter->num_queues; i++, txr++) { 3021 u64 tdba = txr->txdma.dma_paddr; 3022 u32 txctrl = 0; 3023 int j = txr->me; 3024 3025 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 3026 (tdba & 0x00000000ffffffffULL)); 3027 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 3028 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 3029 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 3030 3031 /* Setup the HW Tx Head and Tail descriptor pointers */ 3032 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 3033 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 3034 3035 /* Cache the tail address */ 3036 txr->tail = IXGBE_TDT(j); 3037 3038 /* Disable Head Writeback */ 3039 /* 3040 * Note: for X550 series devices, these registers are actually 3041 * prefixed with TPH_ isntead of DCA_, but the addresses and 3042 * fields remain the same. 3043 */ 3044 switch (hw->mac.type) { 3045 case ixgbe_mac_82598EB: 3046 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 3047 break; 3048 default: 3049 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 3050 break; 3051 } 3052 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 3053 switch (hw->mac.type) { 3054 case ixgbe_mac_82598EB: 3055 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 3056 break; 3057 default: 3058 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 3059 break; 3060 } 3061 3062 } 3063 3064 if (hw->mac.type != ixgbe_mac_82598EB) { 3065 u32 dmatxctl, rttdcs; 3066 #ifdef PCI_IOV 3067 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter); 3068 #endif 3069 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 3070 dmatxctl |= IXGBE_DMATXCTL_TE; 3071 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 3072 /* Disable arbiter to set MTQC */ 3073 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 3074 rttdcs |= IXGBE_RTTDCS_ARBDIS; 3075 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3076 #ifdef PCI_IOV 3077 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode)); 3078 #else 3079 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 3080 #endif 3081 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 3082 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3083 } 3084 3085 return; 3086 } 3087 3088 static void 3089 ixgbe_initialize_rss_mapping(struct adapter *adapter) 3090 { 3091 struct ixgbe_hw *hw = &adapter->hw; 3092 u32 reta = 0, mrqc, rss_key[10]; 3093 int queue_id, table_size, index_mult; 3094 #ifdef RSS 3095 u32 rss_hash_config; 3096 #endif 3097 #ifdef PCI_IOV 3098 enum ixgbe_iov_mode mode; 3099 #endif 3100 3101 #ifdef RSS 3102 /* Fetch the configured RSS key */ 3103 rss_getkey((uint8_t *) &rss_key); 3104 #else 3105 /* set up random bits */ 3106 arc4rand(&rss_key, sizeof(rss_key), 0); 3107 #endif 3108 3109 /* Set multiplier for RETA setup and table size based on MAC */ 3110 index_mult = 0x1; 3111 table_size = 128; 3112 switch (adapter->hw.mac.type) { 3113 case ixgbe_mac_82598EB: 3114 index_mult = 0x11; 3115 break; 3116 case ixgbe_mac_X550: 3117 case ixgbe_mac_X550EM_x: 3118 table_size = 512; 3119 break; 3120 default: 3121 break; 3122 } 3123 3124 /* Set up the redirection table */ 3125 for (int i = 0, j = 0; i < table_size; i++, j++) { 3126 if (j == adapter->num_queues) j = 0; 3127 #ifdef RSS 3128 /* 3129 * Fetch the RSS bucket id for the given indirection entry. 3130 * Cap it at the number of configured buckets (which is 3131 * num_queues.) 3132 */ 3133 queue_id = rss_get_indirection_to_bucket(i); 3134 queue_id = queue_id % adapter->num_queues; 3135 #else 3136 queue_id = (j * index_mult); 3137 #endif 3138 /* 3139 * The low 8 bits are for hash value (n+0); 3140 * The next 8 bits are for hash value (n+1), etc. 3141 */ 3142 reta = reta >> 8; 3143 reta = reta | ( ((uint32_t) queue_id) << 24); 3144 if ((i & 3) == 3) { 3145 if (i < 128) 3146 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3147 else 3148 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); 3149 reta = 0; 3150 } 3151 } 3152 3153 /* Now fill our hash function seeds */ 3154 for (int i = 0; i < 10; i++) 3155 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 3156 3157 /* Perform hash on these packet types */ 3158 #ifdef RSS 3159 mrqc = IXGBE_MRQC_RSSEN; 3160 rss_hash_config = rss_gethashconfig(); 3161 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 3162 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 3163 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 3164 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 3165 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 3166 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 3167 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 3168 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 3169 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 3170 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 3171 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 3172 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 3173 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 3174 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 3175 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX) 3176 device_printf(adapter->dev, 3177 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, " 3178 "but not supported\n", __func__); 3179 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 3180 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3181 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 3182 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 3183 #else 3184 /* 3185 * Disable UDP - IP fragments aren't currently being handled 3186 * and so we end up with a mix of 2-tuple and 4-tuple 3187 * traffic. 3188 */ 3189 mrqc = IXGBE_MRQC_RSSEN 3190 | IXGBE_MRQC_RSS_FIELD_IPV4 3191 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 3192 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 3193 | IXGBE_MRQC_RSS_FIELD_IPV6_EX 3194 | IXGBE_MRQC_RSS_FIELD_IPV6 3195 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 3196 ; 3197 #endif /* RSS */ 3198 #ifdef PCI_IOV 3199 mode = ixgbe_get_iov_mode(adapter); 3200 mrqc |= ixgbe_get_mrqc(mode); 3201 #endif 3202 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3203 } 3204 3205 3206 /********************************************************************* 3207 * 3208 * Setup receive registers and features. 3209 * 3210 **********************************************************************/ 3211 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 3212 3213 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 3214 3215 static void 3216 ixgbe_initialize_receive_units(struct adapter *adapter) 3217 { 3218 struct rx_ring *rxr = adapter->rx_rings; 3219 struct ixgbe_hw *hw = &adapter->hw; 3220 struct ifnet *ifp = adapter->ifp; 3221 u32 bufsz, fctrl, srrctl, rxcsum; 3222 u32 hlreg; 3223 3224 /* 3225 * Make sure receives are disabled while 3226 * setting up the descriptor ring 3227 */ 3228 ixgbe_disable_rx(hw); 3229 3230 /* Enable broadcasts */ 3231 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3232 fctrl |= IXGBE_FCTRL_BAM; 3233 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3234 fctrl |= IXGBE_FCTRL_DPF; 3235 fctrl |= IXGBE_FCTRL_PMCF; 3236 } 3237 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3238 3239 /* Set for Jumbo Frames? */ 3240 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3241 if (ifp->if_mtu > ETHERMTU) 3242 hlreg |= IXGBE_HLREG0_JUMBOEN; 3243 else 3244 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 3245 #ifdef DEV_NETMAP 3246 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */ 3247 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip) 3248 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 3249 else 3250 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 3251 #endif /* DEV_NETMAP */ 3252 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 3253 3254 bufsz = (adapter->rx_mbuf_sz + 3255 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3256 3257 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 3258 u64 rdba = rxr->rxdma.dma_paddr; 3259 int j = rxr->me; 3260 3261 /* Setup the Base and Length of the Rx Descriptor Ring */ 3262 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 3263 (rdba & 0x00000000ffffffffULL)); 3264 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 3265 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 3266 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 3267 3268 /* Set up the SRRCTL register */ 3269 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 3270 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 3271 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 3272 srrctl |= bufsz; 3273 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 3274 3275 /* 3276 * Set DROP_EN iff we have no flow control and >1 queue. 3277 * Note that srrctl was cleared shortly before during reset, 3278 * so we do not need to clear the bit, but do it just in case 3279 * this code is moved elsewhere. 3280 */ 3281 if (adapter->num_queues > 1 && 3282 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 3283 srrctl |= IXGBE_SRRCTL_DROP_EN; 3284 } else { 3285 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3286 } 3287 3288 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 3289 3290 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 3291 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 3292 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 3293 3294 /* Set the driver rx tail address */ 3295 rxr->tail = IXGBE_RDT(rxr->me); 3296 } 3297 3298 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 3299 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 3300 IXGBE_PSRTYPE_UDPHDR | 3301 IXGBE_PSRTYPE_IPV4HDR | 3302 IXGBE_PSRTYPE_IPV6HDR; 3303 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 3304 } 3305 3306 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3307 3308 ixgbe_initialize_rss_mapping(adapter); 3309 3310 if (adapter->num_queues > 1) { 3311 /* RSS and RX IPP Checksum are mutually exclusive */ 3312 rxcsum |= IXGBE_RXCSUM_PCSD; 3313 } 3314 3315 if (ifp->if_capenable & IFCAP_RXCSUM) 3316 rxcsum |= IXGBE_RXCSUM_PCSD; 3317 3318 /* This is useful for calculating UDP/IP fragment checksums */ 3319 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 3320 rxcsum |= IXGBE_RXCSUM_IPPCSE; 3321 3322 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3323 3324 return; 3325 } 3326 3327 3328 /* 3329 ** This routine is run via an vlan config EVENT, 3330 ** it enables us to use the HW Filter table since 3331 ** we can get the vlan id. This just creates the 3332 ** entry in the soft version of the VFTA, init will 3333 ** repopulate the real table. 3334 */ 3335 static void 3336 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3337 { 3338 struct adapter *adapter = ifp->if_softc; 3339 u16 index, bit; 3340 3341 if (ifp->if_softc != arg) /* Not our event */ 3342 return; 3343 3344 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3345 return; 3346 3347 IXGBE_CORE_LOCK(adapter); 3348 index = (vtag >> 5) & 0x7F; 3349 bit = vtag & 0x1F; 3350 adapter->shadow_vfta[index] |= (1 << bit); 3351 ++adapter->num_vlans; 3352 ixgbe_setup_vlan_hw_support(adapter); 3353 IXGBE_CORE_UNLOCK(adapter); 3354 } 3355 3356 /* 3357 ** This routine is run via an vlan 3358 ** unconfig EVENT, remove our entry 3359 ** in the soft vfta. 3360 */ 3361 static void 3362 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3363 { 3364 struct adapter *adapter = ifp->if_softc; 3365 u16 index, bit; 3366 3367 if (ifp->if_softc != arg) 3368 return; 3369 3370 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3371 return; 3372 3373 IXGBE_CORE_LOCK(adapter); 3374 index = (vtag >> 5) & 0x7F; 3375 bit = vtag & 0x1F; 3376 adapter->shadow_vfta[index] &= ~(1 << bit); 3377 --adapter->num_vlans; 3378 /* Re-init to load the changes */ 3379 ixgbe_setup_vlan_hw_support(adapter); 3380 IXGBE_CORE_UNLOCK(adapter); 3381 } 3382 3383 static void 3384 ixgbe_setup_vlan_hw_support(struct adapter *adapter) 3385 { 3386 struct ifnet *ifp = adapter->ifp; 3387 struct ixgbe_hw *hw = &adapter->hw; 3388 struct rx_ring *rxr; 3389 u32 ctrl; 3390 3391 3392 /* 3393 ** We get here thru init_locked, meaning 3394 ** a soft reset, this has already cleared 3395 ** the VFTA and other state, so if there 3396 ** have been no vlan's registered do nothing. 3397 */ 3398 if (adapter->num_vlans == 0) 3399 return; 3400 3401 /* Setup the queues for vlans */ 3402 for (int i = 0; i < adapter->num_queues; i++) { 3403 rxr = &adapter->rx_rings[i]; 3404 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 3405 if (hw->mac.type != ixgbe_mac_82598EB) { 3406 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3407 ctrl |= IXGBE_RXDCTL_VME; 3408 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 3409 } 3410 rxr->vtag_strip = TRUE; 3411 } 3412 3413 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 3414 return; 3415 /* 3416 ** A soft reset zero's out the VFTA, so 3417 ** we need to repopulate it now. 3418 */ 3419 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) 3420 if (adapter->shadow_vfta[i] != 0) 3421 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 3422 adapter->shadow_vfta[i]); 3423 3424 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3425 /* Enable the Filter Table if enabled */ 3426 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 3427 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 3428 ctrl |= IXGBE_VLNCTRL_VFE; 3429 } 3430 if (hw->mac.type == ixgbe_mac_82598EB) 3431 ctrl |= IXGBE_VLNCTRL_VME; 3432 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 3433 } 3434 3435 static void 3436 ixgbe_enable_intr(struct adapter *adapter) 3437 { 3438 struct ixgbe_hw *hw = &adapter->hw; 3439 struct ix_queue *que = adapter->queues; 3440 u32 mask, fwsm; 3441 3442 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3443 /* Enable Fan Failure detection */ 3444 if (hw->device_id == IXGBE_DEV_ID_82598AT) 3445 mask |= IXGBE_EIMS_GPI_SDP1; 3446 3447 switch (adapter->hw.mac.type) { 3448 case ixgbe_mac_82599EB: 3449 mask |= IXGBE_EIMS_ECC; 3450 /* Temperature sensor on some adapters */ 3451 mask |= IXGBE_EIMS_GPI_SDP0; 3452 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3453 mask |= IXGBE_EIMS_GPI_SDP1; 3454 mask |= IXGBE_EIMS_GPI_SDP2; 3455 #ifdef IXGBE_FDIR 3456 mask |= IXGBE_EIMS_FLOW_DIR; 3457 #endif 3458 #ifdef PCI_IOV 3459 mask |= IXGBE_EIMS_MAILBOX; 3460 #endif 3461 break; 3462 case ixgbe_mac_X540: 3463 /* Detect if Thermal Sensor is enabled */ 3464 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3465 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3466 mask |= IXGBE_EIMS_TS; 3467 mask |= IXGBE_EIMS_ECC; 3468 #ifdef IXGBE_FDIR 3469 mask |= IXGBE_EIMS_FLOW_DIR; 3470 #endif 3471 break; 3472 case ixgbe_mac_X550: 3473 case ixgbe_mac_X550EM_x: 3474 /* MAC thermal sensor is automatically enabled */ 3475 mask |= IXGBE_EIMS_TS; 3476 /* Some devices use SDP0 for important information */ 3477 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3478 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3479 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3480 mask |= IXGBE_EIMS_ECC; 3481 #ifdef IXGBE_FDIR 3482 mask |= IXGBE_EIMS_FLOW_DIR; 3483 #endif 3484 #ifdef PCI_IOV 3485 mask |= IXGBE_EIMS_MAILBOX; 3486 #endif 3487 /* falls through */ 3488 default: 3489 break; 3490 } 3491 3492 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3493 3494 /* With MSI-X we use auto clear */ 3495 if (adapter->msix_mem) { 3496 mask = IXGBE_EIMS_ENABLE_MASK; 3497 /* Don't autoclear Link */ 3498 mask &= ~IXGBE_EIMS_OTHER; 3499 mask &= ~IXGBE_EIMS_LSC; 3500 #ifdef PCI_IOV 3501 mask &= ~IXGBE_EIMS_MAILBOX; 3502 #endif 3503 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3504 } 3505 3506 /* 3507 ** Now enable all queues, this is done separately to 3508 ** allow for handling the extended (beyond 32) MSIX 3509 ** vectors that can be used by 82599 3510 */ 3511 for (int i = 0; i < adapter->num_queues; i++, que++) 3512 ixgbe_enable_queue(adapter, que->msix); 3513 3514 IXGBE_WRITE_FLUSH(hw); 3515 3516 return; 3517 } 3518 3519 static void 3520 ixgbe_disable_intr(struct adapter *adapter) 3521 { 3522 if (adapter->msix_mem) 3523 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3524 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3526 } else { 3527 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3528 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3529 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3530 } 3531 IXGBE_WRITE_FLUSH(&adapter->hw); 3532 return; 3533 } 3534 3535 /* 3536 ** Get the width and transaction speed of 3537 ** the slot this adapter is plugged into. 3538 */ 3539 static void 3540 ixgbe_get_slot_info(struct adapter *adapter) 3541 { 3542 device_t dev = adapter->dev; 3543 struct ixgbe_hw *hw = &adapter->hw; 3544 struct ixgbe_mac_info *mac = &hw->mac; 3545 u16 link; 3546 u32 offset; 3547 3548 /* For most devices simply call the shared code routine */ 3549 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) { 3550 ixgbe_get_bus_info(hw); 3551 /* These devices don't use PCI-E */ 3552 switch (hw->mac.type) { 3553 case ixgbe_mac_X550EM_x: 3554 return; 3555 default: 3556 goto display; 3557 } 3558 } 3559 3560 /* 3561 ** For the Quad port adapter we need to parse back 3562 ** up the PCI tree to find the speed of the expansion 3563 ** slot into which this adapter is plugged. A bit more work. 3564 */ 3565 dev = device_get_parent(device_get_parent(dev)); 3566 #ifdef IXGBE_DEBUG 3567 device_printf(dev, "parent pcib = %x,%x,%x\n", 3568 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 3569 #endif 3570 dev = device_get_parent(device_get_parent(dev)); 3571 #ifdef IXGBE_DEBUG 3572 device_printf(dev, "slot pcib = %x,%x,%x\n", 3573 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); 3574 #endif 3575 /* Now get the PCI Express Capabilities offset */ 3576 pci_find_cap(dev, PCIY_EXPRESS, &offset); 3577 /* ...and read the Link Status Register */ 3578 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 3579 switch (link & IXGBE_PCI_LINK_WIDTH) { 3580 case IXGBE_PCI_LINK_WIDTH_1: 3581 hw->bus.width = ixgbe_bus_width_pcie_x1; 3582 break; 3583 case IXGBE_PCI_LINK_WIDTH_2: 3584 hw->bus.width = ixgbe_bus_width_pcie_x2; 3585 break; 3586 case IXGBE_PCI_LINK_WIDTH_4: 3587 hw->bus.width = ixgbe_bus_width_pcie_x4; 3588 break; 3589 case IXGBE_PCI_LINK_WIDTH_8: 3590 hw->bus.width = ixgbe_bus_width_pcie_x8; 3591 break; 3592 default: 3593 hw->bus.width = ixgbe_bus_width_unknown; 3594 break; 3595 } 3596 3597 switch (link & IXGBE_PCI_LINK_SPEED) { 3598 case IXGBE_PCI_LINK_SPEED_2500: 3599 hw->bus.speed = ixgbe_bus_speed_2500; 3600 break; 3601 case IXGBE_PCI_LINK_SPEED_5000: 3602 hw->bus.speed = ixgbe_bus_speed_5000; 3603 break; 3604 case IXGBE_PCI_LINK_SPEED_8000: 3605 hw->bus.speed = ixgbe_bus_speed_8000; 3606 break; 3607 default: 3608 hw->bus.speed = ixgbe_bus_speed_unknown; 3609 break; 3610 } 3611 3612 mac->ops.set_lan_id(hw); 3613 3614 display: 3615 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 3616 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s": 3617 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s": 3618 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"), 3619 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 3620 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 3621 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 3622 ("Unknown")); 3623 3624 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 3625 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 3626 (hw->bus.speed == ixgbe_bus_speed_2500))) { 3627 device_printf(dev, "PCI-Express bandwidth available" 3628 " for this card\n is not sufficient for" 3629 " optimal performance.\n"); 3630 device_printf(dev, "For optimal performance a x8 " 3631 "PCIE, or x4 PCIE Gen2 slot is required.\n"); 3632 } 3633 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 3634 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 3635 (hw->bus.speed < ixgbe_bus_speed_8000))) { 3636 device_printf(dev, "PCI-Express bandwidth available" 3637 " for this card\n is not sufficient for" 3638 " optimal performance.\n"); 3639 device_printf(dev, "For optimal performance a x8 " 3640 "PCIE Gen3 slot is required.\n"); 3641 } 3642 3643 return; 3644 } 3645 3646 3647 /* 3648 ** Setup the correct IVAR register for a particular MSIX interrupt 3649 ** (yes this is all very magic and confusing :) 3650 ** - entry is the register array entry 3651 ** - vector is the MSIX vector for this queue 3652 ** - type is RX/TX/MISC 3653 */ 3654 static void 3655 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3656 { 3657 struct ixgbe_hw *hw = &adapter->hw; 3658 u32 ivar, index; 3659 3660 vector |= IXGBE_IVAR_ALLOC_VAL; 3661 3662 switch (hw->mac.type) { 3663 3664 case ixgbe_mac_82598EB: 3665 if (type == -1) 3666 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3667 else 3668 entry += (type * 64); 3669 index = (entry >> 2) & 0x1F; 3670 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3671 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3672 ivar |= (vector << (8 * (entry & 0x3))); 3673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3674 break; 3675 3676 case ixgbe_mac_82599EB: 3677 case ixgbe_mac_X540: 3678 case ixgbe_mac_X550: 3679 case ixgbe_mac_X550EM_x: 3680 if (type == -1) { /* MISC IVAR */ 3681 index = (entry & 1) * 8; 3682 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3683 ivar &= ~(0xFF << index); 3684 ivar |= (vector << index); 3685 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3686 } else { /* RX/TX IVARS */ 3687 index = (16 * (entry & 1)) + (8 * type); 3688 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3689 ivar &= ~(0xFF << index); 3690 ivar |= (vector << index); 3691 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3692 } 3693 3694 default: 3695 break; 3696 } 3697 } 3698 3699 static void 3700 ixgbe_configure_ivars(struct adapter *adapter) 3701 { 3702 struct ix_queue *que = adapter->queues; 3703 u32 newitr; 3704 3705 if (ixgbe_max_interrupt_rate > 0) 3706 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3707 else { 3708 /* 3709 ** Disable DMA coalescing if interrupt moderation is 3710 ** disabled. 3711 */ 3712 adapter->dmac = 0; 3713 newitr = 0; 3714 } 3715 3716 for (int i = 0; i < adapter->num_queues; i++, que++) { 3717 struct rx_ring *rxr = &adapter->rx_rings[i]; 3718 struct tx_ring *txr = &adapter->tx_rings[i]; 3719 /* First the RX queue entry */ 3720 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0); 3721 /* ... and the TX */ 3722 ixgbe_set_ivar(adapter, txr->me, que->msix, 1); 3723 /* Set an Initial EITR value */ 3724 IXGBE_WRITE_REG(&adapter->hw, 3725 IXGBE_EITR(que->msix), newitr); 3726 } 3727 3728 /* For the Link interrupt */ 3729 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3730 } 3731 3732 /* 3733 ** ixgbe_sfp_probe - called in the local timer to 3734 ** determine if a port had optics inserted. 3735 */ 3736 static bool 3737 ixgbe_sfp_probe(struct adapter *adapter) 3738 { 3739 struct ixgbe_hw *hw = &adapter->hw; 3740 device_t dev = adapter->dev; 3741 bool result = FALSE; 3742 3743 if ((hw->phy.type == ixgbe_phy_nl) && 3744 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3745 s32 ret = hw->phy.ops.identify_sfp(hw); 3746 if (ret) 3747 goto out; 3748 ret = hw->phy.ops.reset(hw); 3749 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3750 device_printf(dev, "Unsupported SFP+ module detected!"); 3751 device_printf(dev, "Reload driver with supported module.\n"); 3752 adapter->sfp_probe = FALSE; 3753 goto out; 3754 } else 3755 device_printf(dev, "SFP+ module detected!\n"); 3756 /* We now have supported optics */ 3757 adapter->sfp_probe = FALSE; 3758 /* Set the optics type so system reports correctly */ 3759 ixgbe_setup_optics(adapter); 3760 result = TRUE; 3761 } 3762 out: 3763 return (result); 3764 } 3765 3766 /* 3767 ** Tasklet handler for MSIX Link interrupts 3768 ** - do outside interrupt since it might sleep 3769 */ 3770 static void 3771 ixgbe_handle_link(void *context, int pending) 3772 { 3773 struct adapter *adapter = context; 3774 struct ixgbe_hw *hw = &adapter->hw; 3775 3776 ixgbe_check_link(hw, 3777 &adapter->link_speed, &adapter->link_up, 0); 3778 ixgbe_update_link_status(adapter); 3779 3780 /* Re-enable link interrupts */ 3781 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC); 3782 } 3783 3784 /* 3785 ** Tasklet for handling SFP module interrupts 3786 */ 3787 static void 3788 ixgbe_handle_mod(void *context, int pending) 3789 { 3790 struct adapter *adapter = context; 3791 struct ixgbe_hw *hw = &adapter->hw; 3792 enum ixgbe_phy_type orig_type = hw->phy.type; 3793 device_t dev = adapter->dev; 3794 u32 err; 3795 3796 IXGBE_CORE_LOCK(adapter); 3797 3798 /* Check to see if the PHY type changed */ 3799 if (hw->phy.ops.identify) { 3800 hw->phy.type = ixgbe_phy_unknown; 3801 hw->phy.ops.identify(hw); 3802 } 3803 3804 if (hw->phy.type != orig_type) { 3805 device_printf(dev, "Detected phy_type %d\n", hw->phy.type); 3806 3807 if (hw->phy.type == ixgbe_phy_none) { 3808 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 3809 goto out; 3810 } 3811 3812 /* Try to do the initialization that was skipped before */ 3813 if (hw->phy.ops.init) 3814 hw->phy.ops.init(hw); 3815 if (hw->phy.ops.reset) 3816 hw->phy.ops.reset(hw); 3817 } 3818 3819 err = hw->phy.ops.identify_sfp(hw); 3820 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3821 device_printf(dev, 3822 "Unsupported SFP+ module type was detected.\n"); 3823 goto out; 3824 } 3825 3826 err = hw->mac.ops.setup_sfp(hw); 3827 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3828 device_printf(dev, 3829 "Setup failure - unsupported SFP+ module type.\n"); 3830 goto out; 3831 } 3832 if (hw->phy.multispeed_fiber) 3833 taskqueue_enqueue(adapter->tq, &adapter->msf_task); 3834 out: 3835 /* Update media type */ 3836 switch (hw->mac.ops.get_media_type(hw)) { 3837 case ixgbe_media_type_fiber: 3838 adapter->optics = IFM_10G_SR; 3839 break; 3840 case ixgbe_media_type_copper: 3841 adapter->optics = IFM_10G_TWINAX; 3842 break; 3843 case ixgbe_media_type_cx4: 3844 adapter->optics = IFM_10G_CX4; 3845 break; 3846 default: 3847 adapter->optics = 0; 3848 break; 3849 } 3850 3851 IXGBE_CORE_UNLOCK(adapter); 3852 return; 3853 } 3854 3855 3856 /* 3857 ** Tasklet for handling MSF (multispeed fiber) interrupts 3858 */ 3859 static void 3860 ixgbe_handle_msf(void *context, int pending) 3861 { 3862 struct adapter *adapter = context; 3863 struct ixgbe_hw *hw = &adapter->hw; 3864 u32 autoneg; 3865 bool negotiate; 3866 3867 IXGBE_CORE_LOCK(adapter); 3868 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3869 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 3870 3871 autoneg = hw->phy.autoneg_advertised; 3872 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3873 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3874 if (hw->mac.ops.setup_link) 3875 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3876 3877 /* Adjust media types shown in ifconfig */ 3878 ifmedia_removeall(&adapter->media); 3879 ixgbe_add_media_types(adapter); 3880 IXGBE_CORE_UNLOCK(adapter); 3881 return; 3882 } 3883 3884 /* 3885 ** Tasklet for handling interrupts from an external PHY 3886 */ 3887 static void 3888 ixgbe_handle_phy(void *context, int pending) 3889 { 3890 struct adapter *adapter = context; 3891 struct ixgbe_hw *hw = &adapter->hw; 3892 int error; 3893 3894 error = hw->phy.ops.handle_lasi(hw); 3895 if (error == IXGBE_ERR_OVERTEMP) 3896 device_printf(adapter->dev, 3897 "CRITICAL: EXTERNAL PHY OVER TEMP!! " 3898 " PHY will downshift to lower power state!\n"); 3899 else if (error) 3900 device_printf(adapter->dev, 3901 "Error handling LASI interrupt: %d\n", 3902 error); 3903 return; 3904 } 3905 3906 #ifdef IXGBE_FDIR 3907 /* 3908 ** Tasklet for reinitializing the Flow Director filter table 3909 */ 3910 static void 3911 ixgbe_reinit_fdir(void *context, int pending) 3912 { 3913 struct adapter *adapter = context; 3914 struct ifnet *ifp = adapter->ifp; 3915 3916 if (adapter->fdir_reinit != 1) /* Shouldn't happen */ 3917 return; 3918 ixgbe_reinit_fdir_tables_82599(&adapter->hw); 3919 adapter->fdir_reinit = 0; 3920 /* re-enable flow director interrupts */ 3921 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 3922 /* Restart the interface */ 3923 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3924 return; 3925 } 3926 #endif 3927 3928 /********************************************************************* 3929 * 3930 * Configure DMA Coalescing 3931 * 3932 **********************************************************************/ 3933 static void 3934 ixgbe_config_dmac(struct adapter *adapter) 3935 { 3936 struct ixgbe_hw *hw = &adapter->hw; 3937 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3938 3939 if (hw->mac.type < ixgbe_mac_X550 || 3940 !hw->mac.ops.dmac_config) 3941 return; 3942 3943 if (dcfg->watchdog_timer ^ adapter->dmac || 3944 dcfg->link_speed ^ adapter->link_speed) { 3945 dcfg->watchdog_timer = adapter->dmac; 3946 dcfg->fcoe_en = false; 3947 dcfg->link_speed = adapter->link_speed; 3948 dcfg->num_tcs = 1; 3949 3950 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3951 dcfg->watchdog_timer, dcfg->link_speed); 3952 3953 hw->mac.ops.dmac_config(hw); 3954 } 3955 } 3956 3957 /* 3958 * Checks whether the adapter's ports are capable of 3959 * Wake On LAN by reading the adapter's NVM. 3960 * 3961 * Sets each port's hw->wol_enabled value depending 3962 * on the value read here. 3963 */ 3964 static void 3965 ixgbe_check_wol_support(struct adapter *adapter) 3966 { 3967 struct ixgbe_hw *hw = &adapter->hw; 3968 u16 dev_caps = 0; 3969 3970 /* Find out WoL support for port */ 3971 adapter->wol_support = hw->wol_enabled = 0; 3972 ixgbe_get_device_caps(hw, &dev_caps); 3973 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 3974 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 3975 hw->bus.func == 0)) 3976 adapter->wol_support = hw->wol_enabled = 1; 3977 3978 /* Save initial wake up filter configuration */ 3979 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 3980 3981 return; 3982 } 3983 3984 /* 3985 * Prepare the adapter/port for LPLU and/or WoL 3986 */ 3987 static int 3988 ixgbe_setup_low_power_mode(struct adapter *adapter) 3989 { 3990 struct ixgbe_hw *hw = &adapter->hw; 3991 device_t dev = adapter->dev; 3992 s32 error = 0; 3993 3994 mtx_assert(&adapter->core_mtx, MA_OWNED); 3995 3996 if (!hw->wol_enabled) 3997 ixgbe_set_phy_power(hw, FALSE); 3998 3999 /* Limit power management flow to X550EM baseT */ 4000 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T 4001 && hw->phy.ops.enter_lplu) { 4002 /* Turn off support for APM wakeup. (Using ACPI instead) */ 4003 IXGBE_WRITE_REG(hw, IXGBE_GRC, 4004 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 4005 4006 /* 4007 * Clear Wake Up Status register to prevent any previous wakeup 4008 * events from waking us up immediately after we suspend. 4009 */ 4010 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 4011 4012 /* 4013 * Program the Wakeup Filter Control register with user filter 4014 * settings 4015 */ 4016 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 4017 4018 /* Enable wakeups and power management in Wakeup Control */ 4019 IXGBE_WRITE_REG(hw, IXGBE_WUC, 4020 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 4021 4022 /* X550EM baseT adapters need a special LPLU flow */ 4023 hw->phy.reset_disable = true; 4024 ixgbe_stop(adapter); 4025 error = hw->phy.ops.enter_lplu(hw); 4026 if (error) 4027 device_printf(dev, 4028 "Error entering LPLU: %d\n", error); 4029 hw->phy.reset_disable = false; 4030 } else { 4031 /* Just stop for other adapters */ 4032 ixgbe_stop(adapter); 4033 } 4034 4035 return error; 4036 } 4037 4038 /********************************************************************** 4039 * 4040 * Update the board statistics counters. 4041 * 4042 **********************************************************************/ 4043 static void 4044 ixgbe_update_stats_counters(struct adapter *adapter) 4045 { 4046 struct ixgbe_hw *hw = &adapter->hw; 4047 u32 missed_rx = 0, bprc, lxon, lxoff, total; 4048 u64 total_missed_rx = 0; 4049 4050 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 4051 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 4052 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 4053 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 4054 4055 for (int i = 0; i < 16; i++) { 4056 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 4057 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 4058 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 4059 } 4060 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 4061 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 4062 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 4063 4064 /* Hardware workaround, gprc counts missed packets */ 4065 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 4066 adapter->stats.pf.gprc -= missed_rx; 4067 4068 if (hw->mac.type != ixgbe_mac_82598EB) { 4069 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 4070 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 4071 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 4072 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 4073 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 4074 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 4075 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4076 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 4077 } else { 4078 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 4079 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 4080 /* 82598 only has a counter in the high register */ 4081 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 4082 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 4083 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 4084 } 4085 4086 /* 4087 * Workaround: mprc hardware is incorrectly counting 4088 * broadcasts, so for now we subtract those. 4089 */ 4090 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 4091 adapter->stats.pf.bprc += bprc; 4092 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 4093 if (hw->mac.type == ixgbe_mac_82598EB) 4094 adapter->stats.pf.mprc -= bprc; 4095 4096 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 4097 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 4098 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 4099 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 4100 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 4101 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 4102 4103 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 4104 adapter->stats.pf.lxontxc += lxon; 4105 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 4106 adapter->stats.pf.lxofftxc += lxoff; 4107 total = lxon + lxoff; 4108 4109 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 4110 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 4111 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 4112 adapter->stats.pf.gptc -= total; 4113 adapter->stats.pf.mptc -= total; 4114 adapter->stats.pf.ptc64 -= total; 4115 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN; 4116 4117 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 4118 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 4119 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 4120 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 4121 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 4122 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 4123 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 4124 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 4125 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 4126 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 4127 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 4128 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 4129 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 4130 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 4131 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 4132 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC); 4133 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 4134 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 4135 /* Only read FCOE on 82599 */ 4136 if (hw->mac.type != ixgbe_mac_82598EB) { 4137 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 4138 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 4139 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 4140 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 4141 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 4142 } 4143 4144 /* Fill out the OS statistics structure */ 4145 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc); 4146 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc); 4147 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc); 4148 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc); 4149 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc); 4150 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc); 4151 IXGBE_SET_COLLISIONS(adapter, 0); 4152 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 4153 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs 4154 + adapter->stats.pf.rlec); 4155 } 4156 4157 #if __FreeBSD_version >= 1100036 4158 static uint64_t 4159 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt) 4160 { 4161 struct adapter *adapter; 4162 struct tx_ring *txr; 4163 uint64_t rv; 4164 4165 adapter = if_getsoftc(ifp); 4166 4167 switch (cnt) { 4168 case IFCOUNTER_IPACKETS: 4169 return (adapter->ipackets); 4170 case IFCOUNTER_OPACKETS: 4171 return (adapter->opackets); 4172 case IFCOUNTER_IBYTES: 4173 return (adapter->ibytes); 4174 case IFCOUNTER_OBYTES: 4175 return (adapter->obytes); 4176 case IFCOUNTER_IMCASTS: 4177 return (adapter->imcasts); 4178 case IFCOUNTER_OMCASTS: 4179 return (adapter->omcasts); 4180 case IFCOUNTER_COLLISIONS: 4181 return (0); 4182 case IFCOUNTER_IQDROPS: 4183 return (adapter->iqdrops); 4184 case IFCOUNTER_OQDROPS: 4185 rv = 0; 4186 txr = adapter->tx_rings; 4187 for (int i = 0; i < adapter->num_queues; i++, txr++) 4188 rv += txr->br->br_drops; 4189 return (rv); 4190 case IFCOUNTER_IERRORS: 4191 return (adapter->ierrors); 4192 default: 4193 return (if_get_counter_default(ifp, cnt)); 4194 } 4195 } 4196 #endif 4197 4198 /** ixgbe_sysctl_tdh_handler - Handler function 4199 * Retrieves the TDH value from the hardware 4200 */ 4201 static int 4202 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 4203 { 4204 int error; 4205 4206 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 4207 if (!txr) return 0; 4208 4209 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 4210 error = sysctl_handle_int(oidp, &val, 0, req); 4211 if (error || !req->newptr) 4212 return error; 4213 return 0; 4214 } 4215 4216 /** ixgbe_sysctl_tdt_handler - Handler function 4217 * Retrieves the TDT value from the hardware 4218 */ 4219 static int 4220 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 4221 { 4222 int error; 4223 4224 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 4225 if (!txr) return 0; 4226 4227 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 4228 error = sysctl_handle_int(oidp, &val, 0, req); 4229 if (error || !req->newptr) 4230 return error; 4231 return 0; 4232 } 4233 4234 /** ixgbe_sysctl_rdh_handler - Handler function 4235 * Retrieves the RDH value from the hardware 4236 */ 4237 static int 4238 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 4239 { 4240 int error; 4241 4242 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 4243 if (!rxr) return 0; 4244 4245 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 4246 error = sysctl_handle_int(oidp, &val, 0, req); 4247 if (error || !req->newptr) 4248 return error; 4249 return 0; 4250 } 4251 4252 /** ixgbe_sysctl_rdt_handler - Handler function 4253 * Retrieves the RDT value from the hardware 4254 */ 4255 static int 4256 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 4257 { 4258 int error; 4259 4260 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 4261 if (!rxr) return 0; 4262 4263 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 4264 error = sysctl_handle_int(oidp, &val, 0, req); 4265 if (error || !req->newptr) 4266 return error; 4267 return 0; 4268 } 4269 4270 static int 4271 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 4272 { 4273 int error; 4274 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1); 4275 unsigned int reg, usec, rate; 4276 4277 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 4278 usec = ((reg & 0x0FF8) >> 3); 4279 if (usec > 0) 4280 rate = 500000 / usec; 4281 else 4282 rate = 0; 4283 error = sysctl_handle_int(oidp, &rate, 0, req); 4284 if (error || !req->newptr) 4285 return error; 4286 reg &= ~0xfff; /* default, no limitation */ 4287 ixgbe_max_interrupt_rate = 0; 4288 if (rate > 0 && rate < 500000) { 4289 if (rate < 1000) 4290 rate = 1000; 4291 ixgbe_max_interrupt_rate = rate; 4292 reg |= ((4000000/rate) & 0xff8 ); 4293 } 4294 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 4295 return 0; 4296 } 4297 4298 static void 4299 ixgbe_add_device_sysctls(struct adapter *adapter) 4300 { 4301 device_t dev = adapter->dev; 4302 struct ixgbe_hw *hw = &adapter->hw; 4303 struct sysctl_oid_list *child; 4304 struct sysctl_ctx_list *ctx; 4305 4306 ctx = device_get_sysctl_ctx(dev); 4307 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4308 4309 /* Sysctls for all devices */ 4310 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", 4311 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4312 ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC); 4313 4314 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", 4315 CTLFLAG_RW, 4316 &ixgbe_enable_aim, 1, "Interrupt Moderation"); 4317 4318 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed", 4319 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4320 ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED); 4321 4322 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test", 4323 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4324 ixgbe_sysctl_thermal_test, "I", "Thermal Test"); 4325 4326 #ifdef IXGBE_DEBUG 4327 /* testing sysctls (for all devices) */ 4328 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state", 4329 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4330 ixgbe_sysctl_power_state, "I", "PCI Power State"); 4331 4332 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config", 4333 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, 4334 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 4335 #endif 4336 /* for X550 series devices */ 4337 if (hw->mac.type >= ixgbe_mac_X550) 4338 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac", 4339 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4340 ixgbe_sysctl_dmac, "I", "DMA Coalesce"); 4341 4342 /* for X552 backplane devices */ 4343 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { 4344 struct sysctl_oid *eee_node; 4345 struct sysctl_oid_list *eee_list; 4346 4347 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee", 4348 CTLFLAG_RD, NULL, 4349 "Energy Efficient Ethernet sysctls"); 4350 eee_list = SYSCTL_CHILDREN(eee_node); 4351 4352 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable", 4353 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4354 ixgbe_sysctl_eee_enable, "I", 4355 "Enable or Disable EEE"); 4356 4357 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated", 4358 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4359 ixgbe_sysctl_eee_negotiated, "I", 4360 "EEE negotiated on link"); 4361 4362 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status", 4363 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4364 ixgbe_sysctl_eee_tx_lpi_status, "I", 4365 "Whether or not TX link is in LPI state"); 4366 4367 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status", 4368 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4369 ixgbe_sysctl_eee_rx_lpi_status, "I", 4370 "Whether or not RX link is in LPI state"); 4371 4372 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay", 4373 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4374 ixgbe_sysctl_eee_tx_lpi_delay, "I", 4375 "TX LPI entry delay in microseconds"); 4376 } 4377 4378 /* for WoL-capable devices */ 4379 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 4380 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable", 4381 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4382 ixgbe_sysctl_wol_enable, "I", 4383 "Enable/Disable Wake on LAN"); 4384 4385 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc", 4386 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 4387 ixgbe_sysctl_wufc, "I", 4388 "Enable/Disable Wake Up Filters"); 4389 } 4390 4391 /* for X552/X557-AT devices */ 4392 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 4393 struct sysctl_oid *phy_node; 4394 struct sysctl_oid_list *phy_list; 4395 4396 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy", 4397 CTLFLAG_RD, NULL, 4398 "External PHY sysctls"); 4399 phy_list = SYSCTL_CHILDREN(phy_node); 4400 4401 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp", 4402 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4403 ixgbe_sysctl_phy_temp, "I", 4404 "Current External PHY Temperature (Celsius)"); 4405 4406 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred", 4407 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 4408 ixgbe_sysctl_phy_overtemp_occurred, "I", 4409 "External PHY High Temperature Event Occurred"); 4410 } 4411 } 4412 4413 /* 4414 * Add sysctl variables, one per statistic, to the system. 4415 */ 4416 static void 4417 ixgbe_add_hw_stats(struct adapter *adapter) 4418 { 4419 device_t dev = adapter->dev; 4420 4421 struct tx_ring *txr = adapter->tx_rings; 4422 struct rx_ring *rxr = adapter->rx_rings; 4423 4424 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4425 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 4426 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 4427 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 4428 4429 struct sysctl_oid *stat_node, *queue_node; 4430 struct sysctl_oid_list *stat_list, *queue_list; 4431 4432 #define QUEUE_NAME_LEN 32 4433 char namebuf[QUEUE_NAME_LEN]; 4434 4435 /* Driver Statistics */ 4436 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 4437 CTLFLAG_RD, &adapter->dropped_pkts, 4438 "Driver dropped packets"); 4439 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 4440 CTLFLAG_RD, &adapter->mbuf_defrag_failed, 4441 "m_defrag() failed"); 4442 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 4443 CTLFLAG_RD, &adapter->watchdog_events, 4444 "Watchdog timeouts"); 4445 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 4446 CTLFLAG_RD, &adapter->link_irq, 4447 "Link MSIX IRQ Handled"); 4448 4449 for (int i = 0; i < adapter->num_queues; i++, txr++) { 4450 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4451 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4452 CTLFLAG_RD, NULL, "Queue Name"); 4453 queue_list = SYSCTL_CHILDREN(queue_node); 4454 4455 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 4456 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i], 4457 sizeof(&adapter->queues[i]), 4458 ixgbe_sysctl_interrupt_rate_handler, "IU", 4459 "Interrupt Rate"); 4460 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 4461 CTLFLAG_RD, &(adapter->queues[i].irqs), 4462 "irqs on this queue"); 4463 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 4464 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 4465 ixgbe_sysctl_tdh_handler, "IU", 4466 "Transmit Descriptor Head"); 4467 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 4468 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 4469 ixgbe_sysctl_tdt_handler, "IU", 4470 "Transmit Descriptor Tail"); 4471 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx", 4472 CTLFLAG_RD, &txr->tso_tx, 4473 "TSO"); 4474 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup", 4475 CTLFLAG_RD, &txr->no_tx_dma_setup, 4476 "Driver tx dma failure in xmit"); 4477 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 4478 CTLFLAG_RD, &txr->no_desc_avail, 4479 "Queue No Descriptor Available"); 4480 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 4481 CTLFLAG_RD, &txr->total_packets, 4482 "Queue Packets Transmitted"); 4483 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops", 4484 CTLFLAG_RD, &txr->br->br_drops, 4485 "Packets dropped in buf_ring"); 4486 } 4487 4488 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 4489 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4490 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4491 CTLFLAG_RD, NULL, "Queue Name"); 4492 queue_list = SYSCTL_CHILDREN(queue_node); 4493 4494 struct lro_ctrl *lro = &rxr->lro; 4495 4496 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 4497 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4498 CTLFLAG_RD, NULL, "Queue Name"); 4499 queue_list = SYSCTL_CHILDREN(queue_node); 4500 4501 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 4502 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 4503 ixgbe_sysctl_rdh_handler, "IU", 4504 "Receive Descriptor Head"); 4505 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 4506 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 4507 ixgbe_sysctl_rdt_handler, "IU", 4508 "Receive Descriptor Tail"); 4509 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 4510 CTLFLAG_RD, &rxr->rx_packets, 4511 "Queue Packets Received"); 4512 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 4513 CTLFLAG_RD, &rxr->rx_bytes, 4514 "Queue Bytes Received"); 4515 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 4516 CTLFLAG_RD, &rxr->rx_copies, 4517 "Copied RX Frames"); 4518 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued", 4519 CTLFLAG_RD, &lro->lro_queued, 0, 4520 "LRO Queued"); 4521 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed", 4522 CTLFLAG_RD, &lro->lro_flushed, 0, 4523 "LRO Flushed"); 4524 } 4525 4526 /* MAC stats get the own sub node */ 4527 4528 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 4529 CTLFLAG_RD, NULL, "MAC Statistics"); 4530 stat_list = SYSCTL_CHILDREN(stat_node); 4531 4532 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 4533 CTLFLAG_RD, &stats->crcerrs, 4534 "CRC Errors"); 4535 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 4536 CTLFLAG_RD, &stats->illerrc, 4537 "Illegal Byte Errors"); 4538 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 4539 CTLFLAG_RD, &stats->errbc, 4540 "Byte Errors"); 4541 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 4542 CTLFLAG_RD, &stats->mspdc, 4543 "MAC Short Packets Discarded"); 4544 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 4545 CTLFLAG_RD, &stats->mlfc, 4546 "MAC Local Faults"); 4547 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 4548 CTLFLAG_RD, &stats->mrfc, 4549 "MAC Remote Faults"); 4550 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 4551 CTLFLAG_RD, &stats->rlec, 4552 "Receive Length Errors"); 4553 4554 /* Flow Control stats */ 4555 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 4556 CTLFLAG_RD, &stats->lxontxc, 4557 "Link XON Transmitted"); 4558 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 4559 CTLFLAG_RD, &stats->lxonrxc, 4560 "Link XON Received"); 4561 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 4562 CTLFLAG_RD, &stats->lxofftxc, 4563 "Link XOFF Transmitted"); 4564 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 4565 CTLFLAG_RD, &stats->lxoffrxc, 4566 "Link XOFF Received"); 4567 4568 /* Packet Reception Stats */ 4569 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 4570 CTLFLAG_RD, &stats->tor, 4571 "Total Octets Received"); 4572 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 4573 CTLFLAG_RD, &stats->gorc, 4574 "Good Octets Received"); 4575 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 4576 CTLFLAG_RD, &stats->tpr, 4577 "Total Packets Received"); 4578 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 4579 CTLFLAG_RD, &stats->gprc, 4580 "Good Packets Received"); 4581 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 4582 CTLFLAG_RD, &stats->mprc, 4583 "Multicast Packets Received"); 4584 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 4585 CTLFLAG_RD, &stats->bprc, 4586 "Broadcast Packets Received"); 4587 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 4588 CTLFLAG_RD, &stats->prc64, 4589 "64 byte frames received "); 4590 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 4591 CTLFLAG_RD, &stats->prc127, 4592 "65-127 byte frames received"); 4593 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 4594 CTLFLAG_RD, &stats->prc255, 4595 "128-255 byte frames received"); 4596 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 4597 CTLFLAG_RD, &stats->prc511, 4598 "256-511 byte frames received"); 4599 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 4600 CTLFLAG_RD, &stats->prc1023, 4601 "512-1023 byte frames received"); 4602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 4603 CTLFLAG_RD, &stats->prc1522, 4604 "1023-1522 byte frames received"); 4605 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 4606 CTLFLAG_RD, &stats->ruc, 4607 "Receive Undersized"); 4608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 4609 CTLFLAG_RD, &stats->rfc, 4610 "Fragmented Packets Received "); 4611 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 4612 CTLFLAG_RD, &stats->roc, 4613 "Oversized Packets Received"); 4614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 4615 CTLFLAG_RD, &stats->rjc, 4616 "Received Jabber"); 4617 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 4618 CTLFLAG_RD, &stats->mngprc, 4619 "Management Packets Received"); 4620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 4621 CTLFLAG_RD, &stats->mngptc, 4622 "Management Packets Dropped"); 4623 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 4624 CTLFLAG_RD, &stats->xec, 4625 "Checksum Errors"); 4626 4627 /* Packet Transmission Stats */ 4628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 4629 CTLFLAG_RD, &stats->gotc, 4630 "Good Octets Transmitted"); 4631 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 4632 CTLFLAG_RD, &stats->tpt, 4633 "Total Packets Transmitted"); 4634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 4635 CTLFLAG_RD, &stats->gptc, 4636 "Good Packets Transmitted"); 4637 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 4638 CTLFLAG_RD, &stats->bptc, 4639 "Broadcast Packets Transmitted"); 4640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 4641 CTLFLAG_RD, &stats->mptc, 4642 "Multicast Packets Transmitted"); 4643 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 4644 CTLFLAG_RD, &stats->mngptc, 4645 "Management Packets Transmitted"); 4646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 4647 CTLFLAG_RD, &stats->ptc64, 4648 "64 byte frames transmitted "); 4649 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 4650 CTLFLAG_RD, &stats->ptc127, 4651 "65-127 byte frames transmitted"); 4652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 4653 CTLFLAG_RD, &stats->ptc255, 4654 "128-255 byte frames transmitted"); 4655 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 4656 CTLFLAG_RD, &stats->ptc511, 4657 "256-511 byte frames transmitted"); 4658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 4659 CTLFLAG_RD, &stats->ptc1023, 4660 "512-1023 byte frames transmitted"); 4661 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 4662 CTLFLAG_RD, &stats->ptc1522, 4663 "1024-1522 byte frames transmitted"); 4664 } 4665 4666 static void 4667 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name, 4668 const char *description, int *limit, int value) 4669 { 4670 *limit = value; 4671 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 4672 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4673 OID_AUTO, name, CTLFLAG_RW, limit, value, description); 4674 } 4675 4676 /* 4677 ** Set flow control using sysctl: 4678 ** Flow control values: 4679 ** 0 - off 4680 ** 1 - rx pause 4681 ** 2 - tx pause 4682 ** 3 - full 4683 */ 4684 static int 4685 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 4686 { 4687 int error, fc; 4688 struct adapter *adapter; 4689 4690 adapter = (struct adapter *) arg1; 4691 fc = adapter->fc; 4692 4693 error = sysctl_handle_int(oidp, &fc, 0, req); 4694 if ((error) || (req->newptr == NULL)) 4695 return (error); 4696 4697 /* Don't bother if it's not changed */ 4698 if (adapter->fc == fc) 4699 return (0); 4700 4701 return ixgbe_set_flowcntl(adapter, fc); 4702 } 4703 4704 4705 static int 4706 ixgbe_set_flowcntl(struct adapter *adapter, int fc) 4707 { 4708 4709 switch (fc) { 4710 case ixgbe_fc_rx_pause: 4711 case ixgbe_fc_tx_pause: 4712 case ixgbe_fc_full: 4713 adapter->hw.fc.requested_mode = adapter->fc; 4714 if (adapter->num_queues > 1) 4715 ixgbe_disable_rx_drop(adapter); 4716 break; 4717 case ixgbe_fc_none: 4718 adapter->hw.fc.requested_mode = ixgbe_fc_none; 4719 if (adapter->num_queues > 1) 4720 ixgbe_enable_rx_drop(adapter); 4721 break; 4722 default: 4723 return (EINVAL); 4724 } 4725 adapter->fc = fc; 4726 /* Don't autoneg if forcing a value */ 4727 adapter->hw.fc.disable_fc_autoneg = TRUE; 4728 ixgbe_fc_enable(&adapter->hw); 4729 return (0); 4730 } 4731 4732 /* 4733 ** Control advertised link speed: 4734 ** Flags: 4735 ** 0x1 - advertise 100 Mb 4736 ** 0x2 - advertise 1G 4737 ** 0x4 - advertise 10G 4738 */ 4739 static int 4740 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4741 { 4742 int error, advertise; 4743 struct adapter *adapter; 4744 4745 adapter = (struct adapter *) arg1; 4746 advertise = adapter->advertise; 4747 4748 error = sysctl_handle_int(oidp, &advertise, 0, req); 4749 if ((error) || (req->newptr == NULL)) 4750 return (error); 4751 4752 return ixgbe_set_advertise(adapter, advertise); 4753 } 4754 4755 static int 4756 ixgbe_set_advertise(struct adapter *adapter, int advertise) 4757 { 4758 device_t dev; 4759 struct ixgbe_hw *hw; 4760 ixgbe_link_speed speed; 4761 4762 /* Checks to validate new value */ 4763 if (adapter->advertise == advertise) /* no change */ 4764 return (0); 4765 4766 hw = &adapter->hw; 4767 dev = adapter->dev; 4768 4769 /* No speed changes for backplane media */ 4770 if (hw->phy.media_type == ixgbe_media_type_backplane) 4771 return (ENODEV); 4772 4773 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4774 (hw->phy.multispeed_fiber))) { 4775 device_printf(dev, 4776 "Advertised speed can only be set on copper or " 4777 "multispeed fiber media types.\n"); 4778 return (EINVAL); 4779 } 4780 4781 if (advertise < 0x1 || advertise > 0x7) { 4782 device_printf(dev, 4783 "Invalid advertised speed; valid modes are 0x1 through 0x7\n"); 4784 return (EINVAL); 4785 } 4786 4787 if ((advertise & 0x1) 4788 && (hw->mac.type != ixgbe_mac_X540) 4789 && (hw->mac.type != ixgbe_mac_X550)) { 4790 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n"); 4791 return (EINVAL); 4792 } 4793 4794 /* Set new value and report new advertised mode */ 4795 speed = 0; 4796 if (advertise & 0x1) 4797 speed |= IXGBE_LINK_SPEED_100_FULL; 4798 if (advertise & 0x2) 4799 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4800 if (advertise & 0x4) 4801 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4802 adapter->advertise = advertise; 4803 4804 hw->mac.autotry_restart = TRUE; 4805 hw->mac.ops.setup_link(hw, speed, TRUE); 4806 4807 return (0); 4808 } 4809 4810 /* 4811 * The following two sysctls are for X552/X557-AT devices; 4812 * they deal with the external PHY used in them. 4813 */ 4814 static int 4815 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4816 { 4817 struct adapter *adapter = (struct adapter *) arg1; 4818 struct ixgbe_hw *hw = &adapter->hw; 4819 u16 reg; 4820 4821 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4822 device_printf(adapter->dev, 4823 "Device has no supported external thermal sensor.\n"); 4824 return (ENODEV); 4825 } 4826 4827 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4828 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 4829 ®)) { 4830 device_printf(adapter->dev, 4831 "Error reading from PHY's current temperature register\n"); 4832 return (EAGAIN); 4833 } 4834 4835 /* Shift temp for output */ 4836 reg = reg >> 8; 4837 4838 return (sysctl_handle_int(oidp, NULL, reg, req)); 4839 } 4840 4841 /* 4842 * Reports whether the current PHY temperature is over 4843 * the overtemp threshold. 4844 * - This is reported directly from the PHY 4845 */ 4846 static int 4847 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4848 { 4849 struct adapter *adapter = (struct adapter *) arg1; 4850 struct ixgbe_hw *hw = &adapter->hw; 4851 u16 reg; 4852 4853 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4854 device_printf(adapter->dev, 4855 "Device has no supported external thermal sensor.\n"); 4856 return (ENODEV); 4857 } 4858 4859 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4860 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 4861 ®)) { 4862 device_printf(adapter->dev, 4863 "Error reading from PHY's temperature status register\n"); 4864 return (EAGAIN); 4865 } 4866 4867 /* Get occurrence bit */ 4868 reg = !!(reg & 0x4000); 4869 return (sysctl_handle_int(oidp, 0, reg, req)); 4870 } 4871 4872 /* 4873 ** Thermal Shutdown Trigger (internal MAC) 4874 ** - Set this to 1 to cause an overtemp event to occur 4875 */ 4876 static int 4877 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS) 4878 { 4879 struct adapter *adapter = (struct adapter *) arg1; 4880 struct ixgbe_hw *hw = &adapter->hw; 4881 int error, fire = 0; 4882 4883 error = sysctl_handle_int(oidp, &fire, 0, req); 4884 if ((error) || (req->newptr == NULL)) 4885 return (error); 4886 4887 if (fire) { 4888 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS); 4889 reg |= IXGBE_EICR_TS; 4890 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg); 4891 } 4892 4893 return (0); 4894 } 4895 4896 /* 4897 ** Manage DMA Coalescing. 4898 ** Control values: 4899 ** 0/1 - off / on (use default value of 1000) 4900 ** 4901 ** Legal timer values are: 4902 ** 50,100,250,500,1000,2000,5000,10000 4903 ** 4904 ** Turning off interrupt moderation will also turn this off. 4905 */ 4906 static int 4907 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4908 { 4909 struct adapter *adapter = (struct adapter *) arg1; 4910 struct ifnet *ifp = adapter->ifp; 4911 int error; 4912 u32 newval; 4913 4914 newval = adapter->dmac; 4915 error = sysctl_handle_int(oidp, &newval, 0, req); 4916 if ((error) || (req->newptr == NULL)) 4917 return (error); 4918 4919 switch (newval) { 4920 case 0: 4921 /* Disabled */ 4922 adapter->dmac = 0; 4923 break; 4924 case 1: 4925 /* Enable and use default */ 4926 adapter->dmac = 1000; 4927 break; 4928 case 50: 4929 case 100: 4930 case 250: 4931 case 500: 4932 case 1000: 4933 case 2000: 4934 case 5000: 4935 case 10000: 4936 /* Legal values - allow */ 4937 adapter->dmac = newval; 4938 break; 4939 default: 4940 /* Do nothing, illegal value */ 4941 return (EINVAL); 4942 } 4943 4944 /* Re-initialize hardware if it's already running */ 4945 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4946 ixgbe_init(adapter); 4947 4948 return (0); 4949 } 4950 4951 #ifdef IXGBE_DEBUG 4952 /** 4953 * Sysctl to test power states 4954 * Values: 4955 * 0 - set device to D0 4956 * 3 - set device to D3 4957 * (none) - get current device power state 4958 */ 4959 static int 4960 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4961 { 4962 struct adapter *adapter = (struct adapter *) arg1; 4963 device_t dev = adapter->dev; 4964 int curr_ps, new_ps, error = 0; 4965 4966 curr_ps = new_ps = pci_get_powerstate(dev); 4967 4968 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4969 if ((error) || (req->newptr == NULL)) 4970 return (error); 4971 4972 if (new_ps == curr_ps) 4973 return (0); 4974 4975 if (new_ps == 3 && curr_ps == 0) 4976 error = DEVICE_SUSPEND(dev); 4977 else if (new_ps == 0 && curr_ps == 3) 4978 error = DEVICE_RESUME(dev); 4979 else 4980 return (EINVAL); 4981 4982 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4983 4984 return (error); 4985 } 4986 #endif 4987 /* 4988 * Sysctl to enable/disable the WoL capability, if supported by the adapter. 4989 * Values: 4990 * 0 - disabled 4991 * 1 - enabled 4992 */ 4993 static int 4994 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4995 { 4996 struct adapter *adapter = (struct adapter *) arg1; 4997 struct ixgbe_hw *hw = &adapter->hw; 4998 int new_wol_enabled; 4999 int error = 0; 5000 5001 new_wol_enabled = hw->wol_enabled; 5002 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 5003 if ((error) || (req->newptr == NULL)) 5004 return (error); 5005 new_wol_enabled = !!(new_wol_enabled); 5006 if (new_wol_enabled == hw->wol_enabled) 5007 return (0); 5008 5009 if (new_wol_enabled > 0 && !adapter->wol_support) 5010 return (ENODEV); 5011 else 5012 hw->wol_enabled = new_wol_enabled; 5013 5014 return (0); 5015 } 5016 5017 /* 5018 * Sysctl to enable/disable the Energy Efficient Ethernet capability, 5019 * if supported by the adapter. 5020 * Values: 5021 * 0 - disabled 5022 * 1 - enabled 5023 */ 5024 static int 5025 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 5026 { 5027 struct adapter *adapter = (struct adapter *) arg1; 5028 struct ixgbe_hw *hw = &adapter->hw; 5029 struct ifnet *ifp = adapter->ifp; 5030 int new_eee_enabled, error = 0; 5031 5032 new_eee_enabled = adapter->eee_enabled; 5033 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req); 5034 if ((error) || (req->newptr == NULL)) 5035 return (error); 5036 new_eee_enabled = !!(new_eee_enabled); 5037 if (new_eee_enabled == adapter->eee_enabled) 5038 return (0); 5039 5040 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee) 5041 return (ENODEV); 5042 else 5043 adapter->eee_enabled = new_eee_enabled; 5044 5045 /* Re-initialize hardware if it's already running */ 5046 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 5047 ixgbe_init(adapter); 5048 5049 return (0); 5050 } 5051 5052 /* 5053 * Read-only sysctl indicating whether EEE support was negotiated 5054 * on the link. 5055 */ 5056 static int 5057 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS) 5058 { 5059 struct adapter *adapter = (struct adapter *) arg1; 5060 struct ixgbe_hw *hw = &adapter->hw; 5061 bool status; 5062 5063 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG); 5064 5065 return (sysctl_handle_int(oidp, 0, status, req)); 5066 } 5067 5068 /* 5069 * Read-only sysctl indicating whether RX Link is in LPI state. 5070 */ 5071 static int 5072 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS) 5073 { 5074 struct adapter *adapter = (struct adapter *) arg1; 5075 struct ixgbe_hw *hw = &adapter->hw; 5076 bool status; 5077 5078 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & 5079 IXGBE_EEE_RX_LPI_STATUS); 5080 5081 return (sysctl_handle_int(oidp, 0, status, req)); 5082 } 5083 5084 /* 5085 * Read-only sysctl indicating whether TX Link is in LPI state. 5086 */ 5087 static int 5088 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS) 5089 { 5090 struct adapter *adapter = (struct adapter *) arg1; 5091 struct ixgbe_hw *hw = &adapter->hw; 5092 bool status; 5093 5094 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & 5095 IXGBE_EEE_TX_LPI_STATUS); 5096 5097 return (sysctl_handle_int(oidp, 0, status, req)); 5098 } 5099 5100 /* 5101 * Read-only sysctl indicating TX Link LPI delay 5102 */ 5103 static int 5104 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS) 5105 { 5106 struct adapter *adapter = (struct adapter *) arg1; 5107 struct ixgbe_hw *hw = &adapter->hw; 5108 u32 reg; 5109 5110 reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU); 5111 5112 return (sysctl_handle_int(oidp, 0, reg >> 26, req)); 5113 } 5114 5115 /* 5116 * Sysctl to enable/disable the types of packets that the 5117 * adapter will wake up on upon receipt. 5118 * WUFC - Wake Up Filter Control 5119 * Flags: 5120 * 0x1 - Link Status Change 5121 * 0x2 - Magic Packet 5122 * 0x4 - Direct Exact 5123 * 0x8 - Directed Multicast 5124 * 0x10 - Broadcast 5125 * 0x20 - ARP/IPv4 Request Packet 5126 * 0x40 - Direct IPv4 Packet 5127 * 0x80 - Direct IPv6 Packet 5128 * 5129 * Setting another flag will cause the sysctl to return an 5130 * error. 5131 */ 5132 static int 5133 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 5134 { 5135 struct adapter *adapter = (struct adapter *) arg1; 5136 int error = 0; 5137 u32 new_wufc; 5138 5139 new_wufc = adapter->wufc; 5140 5141 error = sysctl_handle_int(oidp, &new_wufc, 0, req); 5142 if ((error) || (req->newptr == NULL)) 5143 return (error); 5144 if (new_wufc == adapter->wufc) 5145 return (0); 5146 5147 if (new_wufc & 0xffffff00) 5148 return (EINVAL); 5149 else { 5150 new_wufc &= 0xff; 5151 new_wufc |= (0xffffff & adapter->wufc); 5152 adapter->wufc = new_wufc; 5153 } 5154 5155 return (0); 5156 } 5157 5158 #ifdef IXGBE_DEBUG 5159 static int 5160 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 5161 { 5162 struct adapter *adapter = (struct adapter *)arg1; 5163 struct ixgbe_hw *hw = &adapter->hw; 5164 device_t dev = adapter->dev; 5165 int error = 0, reta_size; 5166 struct sbuf *buf; 5167 u32 reg; 5168 5169 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5170 if (!buf) { 5171 device_printf(dev, "Could not allocate sbuf for output.\n"); 5172 return (ENOMEM); 5173 } 5174 5175 // TODO: use sbufs to make a string to print out 5176 /* Set multiplier for RETA setup and table size based on MAC */ 5177 switch (adapter->hw.mac.type) { 5178 case ixgbe_mac_X550: 5179 case ixgbe_mac_X550EM_x: 5180 reta_size = 128; 5181 break; 5182 default: 5183 reta_size = 32; 5184 break; 5185 } 5186 5187 /* Print out the redirection table */ 5188 sbuf_cat(buf, "\n"); 5189 for (int i = 0; i < reta_size; i++) { 5190 if (i < 32) { 5191 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 5192 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 5193 } else { 5194 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 5195 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 5196 } 5197 } 5198 5199 // TODO: print more config 5200 5201 error = sbuf_finish(buf); 5202 if (error) 5203 device_printf(dev, "Error finishing sbuf: %d\n", error); 5204 5205 sbuf_delete(buf); 5206 return (0); 5207 } 5208 #endif /* IXGBE_DEBUG */ 5209 5210 /* 5211 ** Enable the hardware to drop packets when the buffer is 5212 ** full. This is useful when multiqueue,so that no single 5213 ** queue being full stalls the entire RX engine. We only 5214 ** enable this when Multiqueue AND when Flow Control is 5215 ** disabled. 5216 */ 5217 static void 5218 ixgbe_enable_rx_drop(struct adapter *adapter) 5219 { 5220 struct ixgbe_hw *hw = &adapter->hw; 5221 5222 for (int i = 0; i < adapter->num_queues; i++) { 5223 struct rx_ring *rxr = &adapter->rx_rings[i]; 5224 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5225 srrctl |= IXGBE_SRRCTL_DROP_EN; 5226 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5227 } 5228 #ifdef PCI_IOV 5229 /* enable drop for each vf */ 5230 for (int i = 0; i < adapter->num_vfs; i++) { 5231 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5232 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 5233 IXGBE_QDE_ENABLE)); 5234 } 5235 #endif 5236 } 5237 5238 static void 5239 ixgbe_disable_rx_drop(struct adapter *adapter) 5240 { 5241 struct ixgbe_hw *hw = &adapter->hw; 5242 5243 for (int i = 0; i < adapter->num_queues; i++) { 5244 struct rx_ring *rxr = &adapter->rx_rings[i]; 5245 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5246 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 5247 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5248 } 5249 #ifdef PCI_IOV 5250 /* disable drop for each vf */ 5251 for (int i = 0; i < adapter->num_vfs; i++) { 5252 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5253 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 5254 } 5255 #endif 5256 } 5257 5258 static void 5259 ixgbe_rearm_queues(struct adapter *adapter, u64 queues) 5260 { 5261 u32 mask; 5262 5263 switch (adapter->hw.mac.type) { 5264 case ixgbe_mac_82598EB: 5265 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 5266 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 5267 break; 5268 case ixgbe_mac_82599EB: 5269 case ixgbe_mac_X540: 5270 case ixgbe_mac_X550: 5271 case ixgbe_mac_X550EM_x: 5272 mask = (queues & 0xFFFFFFFF); 5273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 5274 mask = (queues >> 32); 5275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 5276 break; 5277 default: 5278 break; 5279 } 5280 } 5281 5282 #ifdef PCI_IOV 5283 5284 /* 5285 ** Support functions for SRIOV/VF management 5286 */ 5287 5288 static void 5289 ixgbe_ping_all_vfs(struct adapter *adapter) 5290 { 5291 struct ixgbe_vf *vf; 5292 5293 for (int i = 0; i < adapter->num_vfs; i++) { 5294 vf = &adapter->vfs[i]; 5295 if (vf->flags & IXGBE_VF_ACTIVE) 5296 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); 5297 } 5298 } 5299 5300 5301 static void 5302 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf, 5303 uint16_t tag) 5304 { 5305 struct ixgbe_hw *hw; 5306 uint32_t vmolr, vmvir; 5307 5308 hw = &adapter->hw; 5309 5310 vf->vlan_tag = tag; 5311 5312 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); 5313 5314 /* Do not receive packets that pass inexact filters. */ 5315 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); 5316 5317 /* Disable Multicast Promicuous Mode. */ 5318 vmolr &= ~IXGBE_VMOLR_MPE; 5319 5320 /* Accept broadcasts. */ 5321 vmolr |= IXGBE_VMOLR_BAM; 5322 5323 if (tag == 0) { 5324 /* Accept non-vlan tagged traffic. */ 5325 //vmolr |= IXGBE_VMOLR_AUPE; 5326 5327 /* Allow VM to tag outgoing traffic; no default tag. */ 5328 vmvir = 0; 5329 } else { 5330 /* Require vlan-tagged traffic. */ 5331 vmolr &= ~IXGBE_VMOLR_AUPE; 5332 5333 /* Tag all traffic with provided vlan tag. */ 5334 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); 5335 } 5336 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); 5337 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); 5338 } 5339 5340 5341 static boolean_t 5342 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf) 5343 { 5344 5345 /* 5346 * Frame size compatibility between PF and VF is only a problem on 5347 * 82599-based cards. X540 and later support any combination of jumbo 5348 * frames on PFs and VFs. 5349 */ 5350 if (adapter->hw.mac.type != ixgbe_mac_82599EB) 5351 return (TRUE); 5352 5353 switch (vf->api_ver) { 5354 case IXGBE_API_VER_1_0: 5355 case IXGBE_API_VER_UNKNOWN: 5356 /* 5357 * On legacy (1.0 and older) VF versions, we don't support jumbo 5358 * frames on either the PF or the VF. 5359 */ 5360 if (adapter->max_frame_size > ETHER_MAX_LEN || 5361 vf->max_frame_size > ETHER_MAX_LEN) 5362 return (FALSE); 5363 5364 return (TRUE); 5365 5366 break; 5367 case IXGBE_API_VER_1_1: 5368 default: 5369 /* 5370 * 1.1 or later VF versions always work if they aren't using 5371 * jumbo frames. 5372 */ 5373 if (vf->max_frame_size <= ETHER_MAX_LEN) 5374 return (TRUE); 5375 5376 /* 5377 * Jumbo frames only work with VFs if the PF is also using jumbo 5378 * frames. 5379 */ 5380 if (adapter->max_frame_size <= ETHER_MAX_LEN) 5381 return (TRUE); 5382 5383 return (FALSE); 5384 5385 } 5386 } 5387 5388 5389 static void 5390 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf) 5391 { 5392 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan); 5393 5394 // XXX clear multicast addresses 5395 5396 ixgbe_clear_rar(&adapter->hw, vf->rar_index); 5397 5398 vf->api_ver = IXGBE_API_VER_UNKNOWN; 5399 } 5400 5401 5402 static void 5403 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf) 5404 { 5405 struct ixgbe_hw *hw; 5406 uint32_t vf_index, vfte; 5407 5408 hw = &adapter->hw; 5409 5410 vf_index = IXGBE_VF_INDEX(vf->pool); 5411 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); 5412 vfte |= IXGBE_VF_BIT(vf->pool); 5413 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); 5414 } 5415 5416 5417 static void 5418 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf) 5419 { 5420 struct ixgbe_hw *hw; 5421 uint32_t vf_index, vfre; 5422 5423 hw = &adapter->hw; 5424 5425 vf_index = IXGBE_VF_INDEX(vf->pool); 5426 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); 5427 if (ixgbe_vf_frame_size_compatible(adapter, vf)) 5428 vfre |= IXGBE_VF_BIT(vf->pool); 5429 else 5430 vfre &= ~IXGBE_VF_BIT(vf->pool); 5431 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); 5432 } 5433 5434 5435 static void 5436 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5437 { 5438 struct ixgbe_hw *hw; 5439 uint32_t ack; 5440 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; 5441 5442 hw = &adapter->hw; 5443 5444 ixgbe_process_vf_reset(adapter, vf); 5445 5446 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { 5447 ixgbe_set_rar(&adapter->hw, vf->rar_index, 5448 vf->ether_addr, vf->pool, TRUE); 5449 ack = IXGBE_VT_MSGTYPE_ACK; 5450 } else 5451 ack = IXGBE_VT_MSGTYPE_NACK; 5452 5453 ixgbe_vf_enable_transmit(adapter, vf); 5454 ixgbe_vf_enable_receive(adapter, vf); 5455 5456 vf->flags |= IXGBE_VF_CTS; 5457 5458 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS; 5459 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); 5460 resp[3] = hw->mac.mc_filter_type; 5461 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); 5462 } 5463 5464 5465 static void 5466 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5467 { 5468 uint8_t *mac; 5469 5470 mac = (uint8_t*)&msg[1]; 5471 5472 /* Check that the VF has permission to change the MAC address. */ 5473 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { 5474 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5475 return; 5476 } 5477 5478 if (ixgbe_validate_mac_addr(mac) != 0) { 5479 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5480 return; 5481 } 5482 5483 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); 5484 5485 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, 5486 vf->pool, TRUE); 5487 5488 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5489 } 5490 5491 5492 /* 5493 ** VF multicast addresses are set by using the appropriate bit in 5494 ** 1 of 128 32 bit addresses (4096 possible). 5495 */ 5496 static void 5497 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg) 5498 { 5499 u16 *list = (u16*)&msg[1]; 5500 int entries; 5501 u32 vmolr, vec_bit, vec_reg, mta_reg; 5502 5503 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; 5504 entries = min(entries, IXGBE_MAX_VF_MC); 5505 5506 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool)); 5507 5508 vf->num_mc_hashes = entries; 5509 5510 /* Set the appropriate MTA bit */ 5511 for (int i = 0; i < entries; i++) { 5512 vf->mc_hash[i] = list[i]; 5513 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; 5514 vec_bit = vf->mc_hash[i] & 0x1F; 5515 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg)); 5516 mta_reg |= (1 << vec_bit); 5517 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg); 5518 } 5519 5520 vmolr |= IXGBE_VMOLR_ROMPE; 5521 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr); 5522 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5523 return; 5524 } 5525 5526 5527 static void 5528 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5529 { 5530 struct ixgbe_hw *hw; 5531 int enable; 5532 uint16_t tag; 5533 5534 hw = &adapter->hw; 5535 enable = IXGBE_VT_MSGINFO(msg[0]); 5536 tag = msg[1] & IXGBE_VLVF_VLANID_MASK; 5537 5538 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { 5539 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5540 return; 5541 } 5542 5543 /* It is illegal to enable vlan tag 0. */ 5544 if (tag == 0 && enable != 0){ 5545 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5546 return; 5547 } 5548 5549 ixgbe_set_vfta(hw, tag, vf->pool, enable); 5550 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5551 } 5552 5553 5554 static void 5555 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) 5556 { 5557 struct ixgbe_hw *hw; 5558 uint32_t vf_max_size, pf_max_size, mhadd; 5559 5560 hw = &adapter->hw; 5561 vf_max_size = msg[1]; 5562 5563 if (vf_max_size < ETHER_CRC_LEN) { 5564 /* We intentionally ACK invalid LPE requests. */ 5565 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5566 return; 5567 } 5568 5569 vf_max_size -= ETHER_CRC_LEN; 5570 5571 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { 5572 /* We intentionally ACK invalid LPE requests. */ 5573 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5574 return; 5575 } 5576 5577 vf->max_frame_size = vf_max_size; 5578 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5579 5580 /* 5581 * We might have to disable reception to this VF if the frame size is 5582 * not compatible with the config on the PF. 5583 */ 5584 ixgbe_vf_enable_receive(adapter, vf); 5585 5586 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 5587 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; 5588 5589 if (pf_max_size < adapter->max_frame_size) { 5590 mhadd &= ~IXGBE_MHADD_MFS_MASK; 5591 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 5592 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 5593 } 5594 5595 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5596 } 5597 5598 5599 static void 5600 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf, 5601 uint32_t *msg) 5602 { 5603 //XXX implement this 5604 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5605 } 5606 5607 5608 static void 5609 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf, 5610 uint32_t *msg) 5611 { 5612 5613 switch (msg[1]) { 5614 case IXGBE_API_VER_1_0: 5615 case IXGBE_API_VER_1_1: 5616 vf->api_ver = msg[1]; 5617 ixgbe_send_vf_ack(adapter, vf, msg[0]); 5618 break; 5619 default: 5620 vf->api_ver = IXGBE_API_VER_UNKNOWN; 5621 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5622 break; 5623 } 5624 } 5625 5626 5627 static void 5628 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, 5629 uint32_t *msg) 5630 { 5631 struct ixgbe_hw *hw; 5632 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; 5633 int num_queues; 5634 5635 hw = &adapter->hw; 5636 5637 /* GET_QUEUES is not supported on pre-1.1 APIs. */ 5638 switch (msg[0]) { 5639 case IXGBE_API_VER_1_0: 5640 case IXGBE_API_VER_UNKNOWN: 5641 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5642 return; 5643 } 5644 5645 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | 5646 IXGBE_VT_MSGTYPE_CTS; 5647 5648 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter)); 5649 resp[IXGBE_VF_TX_QUEUES] = num_queues; 5650 resp[IXGBE_VF_RX_QUEUES] = num_queues; 5651 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); 5652 resp[IXGBE_VF_DEF_QUEUE] = 0; 5653 5654 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); 5655 } 5656 5657 5658 static void 5659 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf) 5660 { 5661 struct ixgbe_hw *hw; 5662 uint32_t msg[IXGBE_VFMAILBOX_SIZE]; 5663 int error; 5664 5665 hw = &adapter->hw; 5666 5667 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); 5668 5669 if (error != 0) 5670 return; 5671 5672 CTR3(KTR_MALLOC, "%s: received msg %x from %d", 5673 adapter->ifp->if_xname, msg[0], vf->pool); 5674 if (msg[0] == IXGBE_VF_RESET) { 5675 ixgbe_vf_reset_msg(adapter, vf, msg); 5676 return; 5677 } 5678 5679 if (!(vf->flags & IXGBE_VF_CTS)) { 5680 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5681 return; 5682 } 5683 5684 switch (msg[0] & IXGBE_VT_MSG_MASK) { 5685 case IXGBE_VF_SET_MAC_ADDR: 5686 ixgbe_vf_set_mac(adapter, vf, msg); 5687 break; 5688 case IXGBE_VF_SET_MULTICAST: 5689 ixgbe_vf_set_mc_addr(adapter, vf, msg); 5690 break; 5691 case IXGBE_VF_SET_VLAN: 5692 ixgbe_vf_set_vlan(adapter, vf, msg); 5693 break; 5694 case IXGBE_VF_SET_LPE: 5695 ixgbe_vf_set_lpe(adapter, vf, msg); 5696 break; 5697 case IXGBE_VF_SET_MACVLAN: 5698 ixgbe_vf_set_macvlan(adapter, vf, msg); 5699 break; 5700 case IXGBE_VF_API_NEGOTIATE: 5701 ixgbe_vf_api_negotiate(adapter, vf, msg); 5702 break; 5703 case IXGBE_VF_GET_QUEUES: 5704 ixgbe_vf_get_queues(adapter, vf, msg); 5705 break; 5706 default: 5707 ixgbe_send_vf_nack(adapter, vf, msg[0]); 5708 } 5709 } 5710 5711 5712 /* 5713 * Tasklet for handling VF -> PF mailbox messages. 5714 */ 5715 static void 5716 ixgbe_handle_mbx(void *context, int pending) 5717 { 5718 struct adapter *adapter; 5719 struct ixgbe_hw *hw; 5720 struct ixgbe_vf *vf; 5721 int i; 5722 5723 adapter = context; 5724 hw = &adapter->hw; 5725 5726 IXGBE_CORE_LOCK(adapter); 5727 for (i = 0; i < adapter->num_vfs; i++) { 5728 vf = &adapter->vfs[i]; 5729 5730 if (vf->flags & IXGBE_VF_ACTIVE) { 5731 if (ixgbe_check_for_rst(hw, vf->pool) == 0) 5732 ixgbe_process_vf_reset(adapter, vf); 5733 5734 if (ixgbe_check_for_msg(hw, vf->pool) == 0) 5735 ixgbe_process_vf_msg(adapter, vf); 5736 5737 if (ixgbe_check_for_ack(hw, vf->pool) == 0) 5738 ixgbe_process_vf_ack(adapter, vf); 5739 } 5740 } 5741 IXGBE_CORE_UNLOCK(adapter); 5742 } 5743 5744 5745 static int 5746 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config) 5747 { 5748 struct adapter *adapter; 5749 enum ixgbe_iov_mode mode; 5750 5751 adapter = device_get_softc(dev); 5752 adapter->num_vfs = num_vfs; 5753 mode = ixgbe_get_iov_mode(adapter); 5754 5755 if (num_vfs > ixgbe_max_vfs(mode)) { 5756 adapter->num_vfs = 0; 5757 return (ENOSPC); 5758 } 5759 5760 IXGBE_CORE_LOCK(adapter); 5761 5762 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE, 5763 M_NOWAIT | M_ZERO); 5764 5765 if (adapter->vfs == NULL) { 5766 adapter->num_vfs = 0; 5767 IXGBE_CORE_UNLOCK(adapter); 5768 return (ENOMEM); 5769 } 5770 5771 ixgbe_init_locked(adapter); 5772 5773 IXGBE_CORE_UNLOCK(adapter); 5774 5775 return (0); 5776 } 5777 5778 5779 static void 5780 ixgbe_uninit_iov(device_t dev) 5781 { 5782 struct ixgbe_hw *hw; 5783 struct adapter *adapter; 5784 uint32_t pf_reg, vf_reg; 5785 5786 adapter = device_get_softc(dev); 5787 hw = &adapter->hw; 5788 5789 IXGBE_CORE_LOCK(adapter); 5790 5791 /* Enable rx/tx for the PF and disable it for all VFs. */ 5792 pf_reg = IXGBE_VF_INDEX(adapter->pool); 5793 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), 5794 IXGBE_VF_BIT(adapter->pool)); 5795 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), 5796 IXGBE_VF_BIT(adapter->pool)); 5797 5798 if (pf_reg == 0) 5799 vf_reg = 1; 5800 else 5801 vf_reg = 0; 5802 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); 5803 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); 5804 5805 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); 5806 5807 free(adapter->vfs, M_IXGBE); 5808 adapter->vfs = NULL; 5809 adapter->num_vfs = 0; 5810 5811 IXGBE_CORE_UNLOCK(adapter); 5812 } 5813 5814 5815 static void 5816 ixgbe_initialize_iov(struct adapter *adapter) 5817 { 5818 struct ixgbe_hw *hw = &adapter->hw; 5819 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; 5820 enum ixgbe_iov_mode mode; 5821 int i; 5822 5823 mode = ixgbe_get_iov_mode(adapter); 5824 if (mode == IXGBE_NO_VM) 5825 return; 5826 5827 IXGBE_CORE_LOCK_ASSERT(adapter); 5828 5829 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 5830 mrqc &= ~IXGBE_MRQC_MRQE_MASK; 5831 5832 switch (mode) { 5833 case IXGBE_64_VM: 5834 mrqc |= IXGBE_MRQC_VMDQRSS64EN; 5835 break; 5836 case IXGBE_32_VM: 5837 mrqc |= IXGBE_MRQC_VMDQRSS32EN; 5838 break; 5839 default: 5840 panic("Unexpected SR-IOV mode %d", mode); 5841 } 5842 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 5843 5844 mtqc = IXGBE_MTQC_VT_ENA; 5845 switch (mode) { 5846 case IXGBE_64_VM: 5847 mtqc |= IXGBE_MTQC_64VF; 5848 break; 5849 case IXGBE_32_VM: 5850 mtqc |= IXGBE_MTQC_32VF; 5851 break; 5852 default: 5853 panic("Unexpected SR-IOV mode %d", mode); 5854 } 5855 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); 5856 5857 5858 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 5859 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; 5860 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; 5861 switch (mode) { 5862 case IXGBE_64_VM: 5863 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; 5864 break; 5865 case IXGBE_32_VM: 5866 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; 5867 break; 5868 default: 5869 panic("Unexpected SR-IOV mode %d", mode); 5870 } 5871 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 5872 5873 5874 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5875 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK; 5876 switch (mode) { 5877 case IXGBE_64_VM: 5878 gpie |= IXGBE_GPIE_VTMODE_64; 5879 break; 5880 case IXGBE_32_VM: 5881 gpie |= IXGBE_GPIE_VTMODE_32; 5882 break; 5883 default: 5884 panic("Unexpected SR-IOV mode %d", mode); 5885 } 5886 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5887 5888 /* Enable rx/tx for the PF. */ 5889 vf_reg = IXGBE_VF_INDEX(adapter->pool); 5890 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 5891 IXGBE_VF_BIT(adapter->pool)); 5892 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 5893 IXGBE_VF_BIT(adapter->pool)); 5894 5895 /* Allow VM-to-VM communication. */ 5896 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 5897 5898 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 5899 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT); 5900 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); 5901 5902 for (i = 0; i < adapter->num_vfs; i++) 5903 ixgbe_init_vf(adapter, &adapter->vfs[i]); 5904 } 5905 5906 5907 /* 5908 ** Check the max frame setting of all active VF's 5909 */ 5910 static void 5911 ixgbe_recalculate_max_frame(struct adapter *adapter) 5912 { 5913 struct ixgbe_vf *vf; 5914 5915 IXGBE_CORE_LOCK_ASSERT(adapter); 5916 5917 for (int i = 0; i < adapter->num_vfs; i++) { 5918 vf = &adapter->vfs[i]; 5919 if (vf->flags & IXGBE_VF_ACTIVE) 5920 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5921 } 5922 } 5923 5924 5925 static void 5926 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf) 5927 { 5928 struct ixgbe_hw *hw; 5929 uint32_t vf_index, pfmbimr; 5930 5931 IXGBE_CORE_LOCK_ASSERT(adapter); 5932 5933 hw = &adapter->hw; 5934 5935 if (!(vf->flags & IXGBE_VF_ACTIVE)) 5936 return; 5937 5938 vf_index = IXGBE_VF_INDEX(vf->pool); 5939 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); 5940 pfmbimr |= IXGBE_VF_BIT(vf->pool); 5941 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); 5942 5943 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag); 5944 5945 // XXX multicast addresses 5946 5947 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { 5948 ixgbe_set_rar(&adapter->hw, vf->rar_index, 5949 vf->ether_addr, vf->pool, TRUE); 5950 } 5951 5952 ixgbe_vf_enable_transmit(adapter, vf); 5953 ixgbe_vf_enable_receive(adapter, vf); 5954 5955 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); 5956 } 5957 5958 static int 5959 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config) 5960 { 5961 struct adapter *adapter; 5962 struct ixgbe_vf *vf; 5963 const void *mac; 5964 5965 adapter = device_get_softc(dev); 5966 5967 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d", 5968 vfnum, adapter->num_vfs)); 5969 5970 IXGBE_CORE_LOCK(adapter); 5971 vf = &adapter->vfs[vfnum]; 5972 vf->pool= vfnum; 5973 5974 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ 5975 vf->rar_index = vfnum + 1; 5976 vf->default_vlan = 0; 5977 vf->max_frame_size = ETHER_MAX_LEN; 5978 ixgbe_update_max_frame(adapter, vf->max_frame_size); 5979 5980 if (nvlist_exists_binary(config, "mac-addr")) { 5981 mac = nvlist_get_binary(config, "mac-addr", NULL); 5982 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); 5983 if (nvlist_get_bool(config, "allow-set-mac")) 5984 vf->flags |= IXGBE_VF_CAP_MAC; 5985 } else 5986 /* 5987 * If the administrator has not specified a MAC address then 5988 * we must allow the VF to choose one. 5989 */ 5990 vf->flags |= IXGBE_VF_CAP_MAC; 5991 5992 vf->flags = IXGBE_VF_ACTIVE; 5993 5994 ixgbe_init_vf(adapter, vf); 5995 IXGBE_CORE_UNLOCK(adapter); 5996 5997 return (0); 5998 } 5999 #endif /* PCI_IOV */ 6000 6001