1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_rss.h" 38 39 #include "ixgbe.h" 40 #include "ixgbe_sriov.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixgbe_driver_version[] = "4.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixgbe_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixgbe_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), 106 /* required last entry */ 107 PVID_END 108 }; 109 110 static void *ixgbe_register(device_t); 111 static int ixgbe_if_attach_pre(if_ctx_t); 112 static int ixgbe_if_attach_post(if_ctx_t); 113 static int ixgbe_if_detach(if_ctx_t); 114 static int ixgbe_if_shutdown(if_ctx_t); 115 static int ixgbe_if_suspend(if_ctx_t); 116 static int ixgbe_if_resume(if_ctx_t); 117 118 static void ixgbe_if_stop(if_ctx_t); 119 void ixgbe_if_enable_intr(if_ctx_t); 120 static void ixgbe_if_disable_intr(if_ctx_t); 121 static void ixgbe_link_intr_enable(if_ctx_t); 122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); 124 static int ixgbe_if_media_change(if_ctx_t); 125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); 127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); 128 static void ixgbe_if_multi_set(if_ctx_t); 129 static int ixgbe_if_promisc_set(if_ctx_t, int); 130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 132 static void ixgbe_if_queues_free(if_ctx_t); 133 static void ixgbe_if_timer(if_ctx_t, uint16_t); 134 static void ixgbe_if_update_admin_status(if_ctx_t); 135 static void ixgbe_if_vlan_register(if_ctx_t, u16); 136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16); 137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); 138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 139 int ixgbe_intr(void *); 140 141 /************************************************************************ 142 * Function prototypes 143 ************************************************************************/ 144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 145 146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32); 147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32); 148 static void ixgbe_add_device_sysctls(if_ctx_t); 149 static int ixgbe_allocate_pci_resources(if_ctx_t); 150 static int ixgbe_setup_low_power_mode(if_ctx_t); 151 152 static void ixgbe_config_dmac(struct ixgbe_softc *); 153 static void ixgbe_configure_ivars(struct ixgbe_softc *); 154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 156 static bool ixgbe_sfp_probe(if_ctx_t); 157 158 static void ixgbe_free_pci_resources(if_ctx_t); 159 160 static int ixgbe_msix_link(void *); 161 static int ixgbe_msix_que(void *); 162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 163 static void ixgbe_initialize_receive_units(if_ctx_t); 164 static void ixgbe_initialize_transmit_units(if_ctx_t); 165 166 static int ixgbe_setup_interface(if_ctx_t); 167 static void ixgbe_init_device_features(struct ixgbe_softc *); 168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); 170 static void ixgbe_print_fw_version(if_ctx_t); 171 static void ixgbe_add_media_types(if_ctx_t); 172 static void ixgbe_update_stats_counters(struct ixgbe_softc *); 173 static void ixgbe_config_link(if_ctx_t); 174 static void ixgbe_get_slot_info(struct ixgbe_softc *); 175 static void ixgbe_check_wol_support(struct ixgbe_softc *); 176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 178 179 static void ixgbe_add_hw_stats(struct ixgbe_softc *); 180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 181 static int ixgbe_set_advertise(struct ixgbe_softc *, int); 182 static int ixgbe_get_default_advertise(struct ixgbe_softc *); 183 static void ixgbe_setup_vlan_hw_support(if_ctx_t); 184 static void ixgbe_config_gpie(struct ixgbe_softc *); 185 static void ixgbe_config_delay_values(struct ixgbe_softc *); 186 187 /* Sysctl handlers */ 188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 195 #ifdef IXGBE_DEBUG 196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 198 #endif 199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 206 207 /* Deferred interrupt tasklets */ 208 static void ixgbe_handle_msf(void *); 209 static void ixgbe_handle_mod(void *); 210 static void ixgbe_handle_phy(void *); 211 212 /************************************************************************ 213 * FreeBSD Device Interface Entry Points 214 ************************************************************************/ 215 static device_method_t ix_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_register, ixgbe_register), 218 DEVMETHOD(device_probe, iflib_device_probe), 219 DEVMETHOD(device_attach, iflib_device_attach), 220 DEVMETHOD(device_detach, iflib_device_detach), 221 DEVMETHOD(device_shutdown, iflib_device_shutdown), 222 DEVMETHOD(device_suspend, iflib_device_suspend), 223 DEVMETHOD(device_resume, iflib_device_resume), 224 #ifdef PCI_IOV 225 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 228 #endif /* PCI_IOV */ 229 DEVMETHOD_END 230 }; 231 232 static driver_t ix_driver = { 233 "ix", ix_methods, sizeof(struct ixgbe_softc), 234 }; 235 236 DRIVER_MODULE(ix, pci, ix_driver, 0, 0); 237 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 238 MODULE_DEPEND(ix, pci, 1, 1, 1); 239 MODULE_DEPEND(ix, ether, 1, 1, 1); 240 MODULE_DEPEND(ix, iflib, 1, 1, 1); 241 242 static device_method_t ixgbe_if_methods[] = { 243 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 244 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 245 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 246 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 247 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 248 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 249 DEVMETHOD(ifdi_init, ixgbe_if_init), 250 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 251 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 252 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 253 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 254 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 255 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 256 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 257 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 258 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 259 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 260 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 261 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 262 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 263 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 264 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 265 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 266 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 267 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 268 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 269 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 270 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 271 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 272 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), 273 #ifdef PCI_IOV 274 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 275 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 276 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 277 #endif /* PCI_IOV */ 278 DEVMETHOD_END 279 }; 280 281 /* 282 * TUNEABLE PARAMETERS: 283 */ 284 285 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 286 "IXGBE driver parameters"); 287 static driver_t ixgbe_if_driver = { 288 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) 289 }; 290 291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 293 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 294 295 /* Flow control setting, default to full */ 296 static int ixgbe_flow_control = ixgbe_fc_full; 297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 298 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 299 300 /* Advertise Speed, default to 0 (auto) */ 301 static int ixgbe_advertise_speed = 0; 302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 304 305 /* 306 * Smart speed setting, default to on 307 * this only works as a compile option 308 * right now as its during attach, set 309 * this to 'ixgbe_smart_speed_off' to 310 * disable. 311 */ 312 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 313 314 /* 315 * MSI-X should be the default for best performance, 316 * but this allows it to be forced off for testing. 317 */ 318 static int ixgbe_enable_msix = 1; 319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 320 "Enable MSI-X interrupts"); 321 322 /* 323 * Defining this on will allow the use 324 * of unsupported SFP+ modules, note that 325 * doing so you are on your own :) 326 */ 327 static int allow_unsupported_sfp = false; 328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 329 &allow_unsupported_sfp, 0, 330 "Allow unsupported SFP modules...use at your own risk"); 331 332 /* 333 * Not sure if Flow Director is fully baked, 334 * so we'll default to turning it off. 335 */ 336 static int ixgbe_enable_fdir = 0; 337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 338 "Enable Flow Director"); 339 340 /* Receive-Side Scaling */ 341 static int ixgbe_enable_rss = 1; 342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 343 "Enable Receive-Side Scaling (RSS)"); 344 345 /* 346 * AIM: Adaptive Interrupt Moderation 347 * which means that the interrupt rate 348 * is varied over time based on the 349 * traffic for that interrupt vector 350 */ 351 static int ixgbe_enable_aim = false; 352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 353 "Enable adaptive interrupt moderation"); 354 355 #if 0 356 /* Keep running tab on them for sanity check */ 357 static int ixgbe_total_ports; 358 #endif 359 360 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 361 362 /* 363 * For Flow Director: this is the number of TX packets we sample 364 * for the filter pool, this means every 20th packet will be probed. 365 * 366 * This feature can be disabled by setting this to 0. 367 */ 368 static int atr_sample_rate = 20; 369 370 extern struct if_txrx ixgbe_txrx; 371 372 static struct if_shared_ctx ixgbe_sctx_init = { 373 .isc_magic = IFLIB_MAGIC, 374 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 375 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 376 .isc_tx_maxsegsize = PAGE_SIZE, 377 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 378 .isc_tso_maxsegsize = PAGE_SIZE, 379 .isc_rx_maxsize = PAGE_SIZE*4, 380 .isc_rx_nsegments = 1, 381 .isc_rx_maxsegsize = PAGE_SIZE*4, 382 .isc_nfl = 1, 383 .isc_ntxqs = 1, 384 .isc_nrxqs = 1, 385 386 .isc_admin_intrcnt = 1, 387 .isc_vendor_info = ixgbe_vendor_info_array, 388 .isc_driver_version = ixgbe_driver_version, 389 .isc_driver = &ixgbe_if_driver, 390 .isc_flags = IFLIB_TSO_INIT_IP, 391 392 .isc_nrxd_min = {MIN_RXD}, 393 .isc_ntxd_min = {MIN_TXD}, 394 .isc_nrxd_max = {MAX_RXD}, 395 .isc_ntxd_max = {MAX_TXD}, 396 .isc_nrxd_default = {DEFAULT_RXD}, 397 .isc_ntxd_default = {DEFAULT_TXD}, 398 }; 399 400 /************************************************************************ 401 * ixgbe_if_tx_queues_alloc 402 ************************************************************************/ 403 static int 404 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 405 int ntxqs, int ntxqsets) 406 { 407 struct ixgbe_softc *sc = iflib_get_softc(ctx); 408 if_softc_ctx_t scctx = sc->shared; 409 struct ix_tx_queue *que; 410 int i, j, error; 411 412 MPASS(sc->num_tx_queues > 0); 413 MPASS(sc->num_tx_queues == ntxqsets); 414 MPASS(ntxqs == 1); 415 416 /* Allocate queue structure memory */ 417 sc->tx_queues = 418 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 419 M_IXGBE, M_NOWAIT | M_ZERO); 420 if (!sc->tx_queues) { 421 device_printf(iflib_get_dev(ctx), 422 "Unable to allocate TX ring memory\n"); 423 return (ENOMEM); 424 } 425 426 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 427 struct tx_ring *txr = &que->txr; 428 429 /* In case SR-IOV is enabled, align the index properly */ 430 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 431 i); 432 433 txr->sc = que->sc = sc; 434 435 /* Allocate report status array */ 436 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 437 if (txr->tx_rsq == NULL) { 438 error = ENOMEM; 439 goto fail; 440 } 441 for (j = 0; j < scctx->isc_ntxd[0]; j++) 442 txr->tx_rsq[j] = QIDX_INVALID; 443 /* get the virtual and physical address of the hardware queues */ 444 txr->tail = IXGBE_TDT(txr->me); 445 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 446 txr->tx_paddr = paddrs[i]; 447 448 txr->bytes = 0; 449 txr->total_packets = 0; 450 451 /* Set the rate at which we sample packets */ 452 if (sc->feat_en & IXGBE_FEATURE_FDIR) 453 txr->atr_sample = atr_sample_rate; 454 455 } 456 457 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 458 sc->num_tx_queues); 459 460 return (0); 461 462 fail: 463 ixgbe_if_queues_free(ctx); 464 465 return (error); 466 } /* ixgbe_if_tx_queues_alloc */ 467 468 /************************************************************************ 469 * ixgbe_if_rx_queues_alloc 470 ************************************************************************/ 471 static int 472 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 473 int nrxqs, int nrxqsets) 474 { 475 struct ixgbe_softc *sc = iflib_get_softc(ctx); 476 struct ix_rx_queue *que; 477 int i; 478 479 MPASS(sc->num_rx_queues > 0); 480 MPASS(sc->num_rx_queues == nrxqsets); 481 MPASS(nrxqs == 1); 482 483 /* Allocate queue structure memory */ 484 sc->rx_queues = 485 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 486 M_IXGBE, M_NOWAIT | M_ZERO); 487 if (!sc->rx_queues) { 488 device_printf(iflib_get_dev(ctx), 489 "Unable to allocate TX ring memory\n"); 490 return (ENOMEM); 491 } 492 493 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 494 struct rx_ring *rxr = &que->rxr; 495 496 /* In case SR-IOV is enabled, align the index properly */ 497 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 498 i); 499 500 rxr->sc = que->sc = sc; 501 502 /* get the virtual and physical address of the hw queues */ 503 rxr->tail = IXGBE_RDT(rxr->me); 504 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 505 rxr->rx_paddr = paddrs[i]; 506 rxr->bytes = 0; 507 rxr->que = que; 508 } 509 510 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 511 sc->num_rx_queues); 512 513 return (0); 514 } /* ixgbe_if_rx_queues_alloc */ 515 516 /************************************************************************ 517 * ixgbe_if_queues_free 518 ************************************************************************/ 519 static void 520 ixgbe_if_queues_free(if_ctx_t ctx) 521 { 522 struct ixgbe_softc *sc = iflib_get_softc(ctx); 523 struct ix_tx_queue *tx_que = sc->tx_queues; 524 struct ix_rx_queue *rx_que = sc->rx_queues; 525 int i; 526 527 if (tx_que != NULL) { 528 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { 529 struct tx_ring *txr = &tx_que->txr; 530 if (txr->tx_rsq == NULL) 531 break; 532 533 free(txr->tx_rsq, M_IXGBE); 534 txr->tx_rsq = NULL; 535 } 536 537 free(sc->tx_queues, M_IXGBE); 538 sc->tx_queues = NULL; 539 } 540 if (rx_que != NULL) { 541 free(sc->rx_queues, M_IXGBE); 542 sc->rx_queues = NULL; 543 } 544 } /* ixgbe_if_queues_free */ 545 546 /************************************************************************ 547 * ixgbe_initialize_rss_mapping 548 ************************************************************************/ 549 static void 550 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 551 { 552 struct ixgbe_hw *hw = &sc->hw; 553 u32 reta = 0, mrqc, rss_key[10]; 554 int queue_id, table_size, index_mult; 555 int i, j; 556 u32 rss_hash_config; 557 558 if (sc->feat_en & IXGBE_FEATURE_RSS) { 559 /* Fetch the configured RSS key */ 560 rss_getkey((uint8_t *)&rss_key); 561 } else { 562 /* set up random bits */ 563 arc4rand(&rss_key, sizeof(rss_key), 0); 564 } 565 566 /* Set multiplier for RETA setup and table size based on MAC */ 567 index_mult = 0x1; 568 table_size = 128; 569 switch (sc->hw.mac.type) { 570 case ixgbe_mac_82598EB: 571 index_mult = 0x11; 572 break; 573 case ixgbe_mac_X550: 574 case ixgbe_mac_X550EM_x: 575 case ixgbe_mac_X550EM_a: 576 table_size = 512; 577 break; 578 default: 579 break; 580 } 581 582 /* Set up the redirection table */ 583 for (i = 0, j = 0; i < table_size; i++, j++) { 584 if (j == sc->num_rx_queues) 585 j = 0; 586 587 if (sc->feat_en & IXGBE_FEATURE_RSS) { 588 /* 589 * Fetch the RSS bucket id for the given indirection 590 * entry. Cap it at the number of configured buckets 591 * (which is num_rx_queues.) 592 */ 593 queue_id = rss_get_indirection_to_bucket(i); 594 queue_id = queue_id % sc->num_rx_queues; 595 } else 596 queue_id = (j * index_mult); 597 598 /* 599 * The low 8 bits are for hash value (n+0); 600 * The next 8 bits are for hash value (n+1), etc. 601 */ 602 reta = reta >> 8; 603 reta = reta | (((uint32_t)queue_id) << 24); 604 if ((i & 3) == 3) { 605 if (i < 128) 606 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 607 else 608 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 609 reta); 610 reta = 0; 611 } 612 } 613 614 /* Now fill our hash function seeds */ 615 for (i = 0; i < 10; i++) 616 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 617 618 /* Perform hash on these packet types */ 619 if (sc->feat_en & IXGBE_FEATURE_RSS) 620 rss_hash_config = rss_gethashconfig(); 621 else { 622 /* 623 * Disable UDP - IP fragments aren't currently being handled 624 * and so we end up with a mix of 2-tuple and 4-tuple 625 * traffic. 626 */ 627 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 628 | RSS_HASHTYPE_RSS_TCP_IPV4 629 | RSS_HASHTYPE_RSS_IPV6 630 | RSS_HASHTYPE_RSS_TCP_IPV6 631 | RSS_HASHTYPE_RSS_IPV6_EX 632 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 633 } 634 635 mrqc = IXGBE_MRQC_RSSEN; 636 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 638 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 640 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 642 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 644 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 646 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 648 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 649 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 650 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 651 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 652 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 653 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 654 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 655 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 656 } /* ixgbe_initialize_rss_mapping */ 657 658 /************************************************************************ 659 * ixgbe_initialize_receive_units - Setup receive registers and features. 660 ************************************************************************/ 661 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 662 663 static void 664 ixgbe_initialize_receive_units(if_ctx_t ctx) 665 { 666 struct ixgbe_softc *sc = iflib_get_softc(ctx); 667 if_softc_ctx_t scctx = sc->shared; 668 struct ixgbe_hw *hw = &sc->hw; 669 struct ifnet *ifp = iflib_get_ifp(ctx); 670 struct ix_rx_queue *que; 671 int i, j; 672 u32 bufsz, fctrl, srrctl, rxcsum; 673 u32 hlreg; 674 675 /* 676 * Make sure receives are disabled while 677 * setting up the descriptor ring 678 */ 679 ixgbe_disable_rx(hw); 680 681 /* Enable broadcasts */ 682 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 683 fctrl |= IXGBE_FCTRL_BAM; 684 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 685 fctrl |= IXGBE_FCTRL_DPF; 686 fctrl |= IXGBE_FCTRL_PMCF; 687 } 688 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 689 690 /* Set for Jumbo Frames? */ 691 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 692 if (ifp->if_mtu > ETHERMTU) 693 hlreg |= IXGBE_HLREG0_JUMBOEN; 694 else 695 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 696 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 697 698 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 699 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 700 701 /* Setup the Base and Length of the Rx Descriptor Ring */ 702 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { 703 struct rx_ring *rxr = &que->rxr; 704 u64 rdba = rxr->rx_paddr; 705 706 j = rxr->me; 707 708 /* Setup the Base and Length of the Rx Descriptor Ring */ 709 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 710 (rdba & 0x00000000ffffffffULL)); 711 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 712 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 713 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 714 715 /* Set up the SRRCTL register */ 716 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 717 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 718 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 719 srrctl |= bufsz; 720 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 721 722 /* 723 * Set DROP_EN iff we have no flow control and >1 queue. 724 * Note that srrctl was cleared shortly before during reset, 725 * so we do not need to clear the bit, but do it just in case 726 * this code is moved elsewhere. 727 */ 728 if (sc->num_rx_queues > 1 && 729 sc->hw.fc.requested_mode == ixgbe_fc_none) { 730 srrctl |= IXGBE_SRRCTL_DROP_EN; 731 } else { 732 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 733 } 734 735 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 736 737 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 738 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 739 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 740 741 /* Set the driver rx tail address */ 742 rxr->tail = IXGBE_RDT(rxr->me); 743 } 744 745 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 746 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 747 | IXGBE_PSRTYPE_UDPHDR 748 | IXGBE_PSRTYPE_IPV4HDR 749 | IXGBE_PSRTYPE_IPV6HDR; 750 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 751 } 752 753 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 754 755 ixgbe_initialize_rss_mapping(sc); 756 757 if (sc->num_rx_queues > 1) { 758 /* RSS and RX IPP Checksum are mutually exclusive */ 759 rxcsum |= IXGBE_RXCSUM_PCSD; 760 } 761 762 if (ifp->if_capenable & IFCAP_RXCSUM) 763 rxcsum |= IXGBE_RXCSUM_PCSD; 764 765 /* This is useful for calculating UDP/IP fragment checksums */ 766 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 767 rxcsum |= IXGBE_RXCSUM_IPPCSE; 768 769 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 770 771 } /* ixgbe_initialize_receive_units */ 772 773 /************************************************************************ 774 * ixgbe_initialize_transmit_units - Enable transmit units. 775 ************************************************************************/ 776 static void 777 ixgbe_initialize_transmit_units(if_ctx_t ctx) 778 { 779 struct ixgbe_softc *sc = iflib_get_softc(ctx); 780 struct ixgbe_hw *hw = &sc->hw; 781 if_softc_ctx_t scctx = sc->shared; 782 struct ix_tx_queue *que; 783 int i; 784 785 /* Setup the Base and Length of the Tx Descriptor Ring */ 786 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; 787 i++, que++) { 788 struct tx_ring *txr = &que->txr; 789 u64 tdba = txr->tx_paddr; 790 u32 txctrl = 0; 791 int j = txr->me; 792 793 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 794 (tdba & 0x00000000ffffffffULL)); 795 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 796 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 797 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 798 799 /* Setup the HW Tx Head and Tail descriptor pointers */ 800 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 801 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 802 803 /* Cache the tail address */ 804 txr->tail = IXGBE_TDT(txr->me); 805 806 txr->tx_rs_cidx = txr->tx_rs_pidx; 807 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 808 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 809 txr->tx_rsq[k] = QIDX_INVALID; 810 811 /* Disable Head Writeback */ 812 /* 813 * Note: for X550 series devices, these registers are actually 814 * prefixed with TPH_ isntead of DCA_, but the addresses and 815 * fields remain the same. 816 */ 817 switch (hw->mac.type) { 818 case ixgbe_mac_82598EB: 819 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 820 break; 821 default: 822 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 823 break; 824 } 825 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 826 switch (hw->mac.type) { 827 case ixgbe_mac_82598EB: 828 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 829 break; 830 default: 831 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 832 break; 833 } 834 835 } 836 837 if (hw->mac.type != ixgbe_mac_82598EB) { 838 u32 dmatxctl, rttdcs; 839 840 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 841 dmatxctl |= IXGBE_DMATXCTL_TE; 842 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 843 /* Disable arbiter to set MTQC */ 844 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 845 rttdcs |= IXGBE_RTTDCS_ARBDIS; 846 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 847 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 848 ixgbe_get_mtqc(sc->iov_mode)); 849 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 850 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 851 } 852 853 } /* ixgbe_initialize_transmit_units */ 854 855 /************************************************************************ 856 * ixgbe_register 857 ************************************************************************/ 858 static void * 859 ixgbe_register(device_t dev) 860 { 861 return (&ixgbe_sctx_init); 862 } /* ixgbe_register */ 863 864 /************************************************************************ 865 * ixgbe_if_attach_pre - Device initialization routine, part 1 866 * 867 * Called when the driver is being loaded. 868 * Identifies the type of hardware, initializes the hardware, 869 * and initializes iflib structures. 870 * 871 * return 0 on success, positive on failure 872 ************************************************************************/ 873 static int 874 ixgbe_if_attach_pre(if_ctx_t ctx) 875 { 876 struct ixgbe_softc *sc; 877 device_t dev; 878 if_softc_ctx_t scctx; 879 struct ixgbe_hw *hw; 880 int error = 0; 881 u32 ctrl_ext; 882 883 INIT_DEBUGOUT("ixgbe_attach: begin"); 884 885 /* Allocate, clear, and link in our adapter structure */ 886 dev = iflib_get_dev(ctx); 887 sc = iflib_get_softc(ctx); 888 sc->hw.back = sc; 889 sc->ctx = ctx; 890 sc->dev = dev; 891 scctx = sc->shared = iflib_get_softc_ctx(ctx); 892 sc->media = iflib_get_media(ctx); 893 hw = &sc->hw; 894 895 /* Determine hardware revision */ 896 hw->vendor_id = pci_get_vendor(dev); 897 hw->device_id = pci_get_device(dev); 898 hw->revision_id = pci_get_revid(dev); 899 hw->subsystem_vendor_id = pci_get_subvendor(dev); 900 hw->subsystem_device_id = pci_get_subdevice(dev); 901 902 /* Do base PCI setup - map BAR0 */ 903 if (ixgbe_allocate_pci_resources(ctx)) { 904 device_printf(dev, "Allocation of PCI resources failed\n"); 905 return (ENXIO); 906 } 907 908 /* let hardware know driver is loaded */ 909 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 910 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 911 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 912 913 /* 914 * Initialize the shared code 915 */ 916 if (ixgbe_init_shared_code(hw) != 0) { 917 device_printf(dev, "Unable to initialize the shared code\n"); 918 error = ENXIO; 919 goto err_pci; 920 } 921 922 if (hw->mbx.ops.init_params) 923 hw->mbx.ops.init_params(hw); 924 925 hw->allow_unsupported_sfp = allow_unsupported_sfp; 926 927 if (hw->mac.type != ixgbe_mac_82598EB) 928 hw->phy.smart_speed = ixgbe_smart_speed; 929 930 ixgbe_init_device_features(sc); 931 932 /* Enable WoL (if supported) */ 933 ixgbe_check_wol_support(sc); 934 935 /* Verify adapter fan is still functional (if applicable) */ 936 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 937 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 938 ixgbe_check_fan_failure(sc, esdp, false); 939 } 940 941 /* Ensure SW/FW semaphore is free */ 942 ixgbe_init_swfw_semaphore(hw); 943 944 /* Set an initial default flow control value */ 945 hw->fc.requested_mode = ixgbe_flow_control; 946 947 hw->phy.reset_if_overtemp = true; 948 error = ixgbe_reset_hw(hw); 949 hw->phy.reset_if_overtemp = false; 950 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 951 /* 952 * No optics in this port, set up 953 * so the timer routine will probe 954 * for later insertion. 955 */ 956 sc->sfp_probe = true; 957 error = 0; 958 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 959 device_printf(dev, "Unsupported SFP+ module detected!\n"); 960 error = EIO; 961 goto err_pci; 962 } else if (error) { 963 device_printf(dev, "Hardware initialization failed\n"); 964 error = EIO; 965 goto err_pci; 966 } 967 968 /* Make sure we have a good EEPROM before we read from it */ 969 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 970 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 971 error = EIO; 972 goto err_pci; 973 } 974 975 error = ixgbe_start_hw(hw); 976 switch (error) { 977 case IXGBE_ERR_EEPROM_VERSION: 978 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 979 break; 980 case IXGBE_ERR_SFP_NOT_SUPPORTED: 981 device_printf(dev, "Unsupported SFP+ Module\n"); 982 error = EIO; 983 goto err_pci; 984 case IXGBE_ERR_SFP_NOT_PRESENT: 985 device_printf(dev, "No SFP+ Module found\n"); 986 /* falls thru */ 987 default: 988 break; 989 } 990 991 /* Most of the iflib initialization... */ 992 993 iflib_set_mac(ctx, hw->mac.addr); 994 switch (sc->hw.mac.type) { 995 case ixgbe_mac_X550: 996 case ixgbe_mac_X550EM_x: 997 case ixgbe_mac_X550EM_a: 998 scctx->isc_rss_table_size = 512; 999 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1000 break; 1001 default: 1002 scctx->isc_rss_table_size = 128; 1003 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1004 } 1005 1006 /* Allow legacy interrupts */ 1007 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1008 1009 scctx->isc_txqsizes[0] = 1010 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1011 sizeof(u32), DBA_ALIGN), 1012 scctx->isc_rxqsizes[0] = 1013 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1014 DBA_ALIGN); 1015 1016 /* XXX */ 1017 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1018 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1019 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 1020 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1021 } else { 1022 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1023 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1024 } 1025 1026 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1027 1028 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1029 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1030 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1031 1032 scctx->isc_txrx = &ixgbe_txrx; 1033 1034 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1035 1036 return (0); 1037 1038 err_pci: 1039 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1040 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1041 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1042 ixgbe_free_pci_resources(ctx); 1043 1044 return (error); 1045 } /* ixgbe_if_attach_pre */ 1046 1047 /********************************************************************* 1048 * ixgbe_if_attach_post - Device initialization routine, part 2 1049 * 1050 * Called during driver load, but after interrupts and 1051 * resources have been allocated and configured. 1052 * Sets up some data structures not relevant to iflib. 1053 * 1054 * return 0 on success, positive on failure 1055 *********************************************************************/ 1056 static int 1057 ixgbe_if_attach_post(if_ctx_t ctx) 1058 { 1059 device_t dev; 1060 struct ixgbe_softc *sc; 1061 struct ixgbe_hw *hw; 1062 int error = 0; 1063 1064 dev = iflib_get_dev(ctx); 1065 sc = iflib_get_softc(ctx); 1066 hw = &sc->hw; 1067 1068 1069 if (sc->intr_type == IFLIB_INTR_LEGACY && 1070 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1071 device_printf(dev, "Device does not support legacy interrupts"); 1072 error = ENXIO; 1073 goto err; 1074 } 1075 1076 /* Allocate multicast array memory. */ 1077 sc->mta = malloc(sizeof(*sc->mta) * 1078 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1079 if (sc->mta == NULL) { 1080 device_printf(dev, "Can not allocate multicast setup array\n"); 1081 error = ENOMEM; 1082 goto err; 1083 } 1084 1085 /* hw.ix defaults init */ 1086 ixgbe_set_advertise(sc, ixgbe_advertise_speed); 1087 1088 /* Enable the optics for 82599 SFP+ fiber */ 1089 ixgbe_enable_tx_laser(hw); 1090 1091 /* Enable power to the phy. */ 1092 ixgbe_set_phy_power(hw, true); 1093 1094 ixgbe_initialize_iov(sc); 1095 1096 error = ixgbe_setup_interface(ctx); 1097 if (error) { 1098 device_printf(dev, "Interface setup failed: %d\n", error); 1099 goto err; 1100 } 1101 1102 ixgbe_if_update_admin_status(ctx); 1103 1104 /* Initialize statistics */ 1105 ixgbe_update_stats_counters(sc); 1106 ixgbe_add_hw_stats(sc); 1107 1108 /* Check PCIE slot type/speed/width */ 1109 ixgbe_get_slot_info(sc); 1110 1111 /* 1112 * Do time init and sysctl init here, but 1113 * only on the first port of a bypass sc. 1114 */ 1115 ixgbe_bypass_init(sc); 1116 1117 /* Display NVM and Option ROM versions */ 1118 ixgbe_print_fw_version(ctx); 1119 1120 /* Set an initial dmac value */ 1121 sc->dmac = 0; 1122 /* Set initial advertised speeds (if applicable) */ 1123 sc->advertise = ixgbe_get_default_advertise(sc); 1124 1125 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1126 ixgbe_define_iov_schemas(dev, &error); 1127 1128 /* Add sysctls */ 1129 ixgbe_add_device_sysctls(ctx); 1130 1131 return (0); 1132 err: 1133 return (error); 1134 } /* ixgbe_if_attach_post */ 1135 1136 /************************************************************************ 1137 * ixgbe_check_wol_support 1138 * 1139 * Checks whether the adapter's ports are capable of 1140 * Wake On LAN by reading the adapter's NVM. 1141 * 1142 * Sets each port's hw->wol_enabled value depending 1143 * on the value read here. 1144 ************************************************************************/ 1145 static void 1146 ixgbe_check_wol_support(struct ixgbe_softc *sc) 1147 { 1148 struct ixgbe_hw *hw = &sc->hw; 1149 u16 dev_caps = 0; 1150 1151 /* Find out WoL support for port */ 1152 sc->wol_support = hw->wol_enabled = 0; 1153 ixgbe_get_device_caps(hw, &dev_caps); 1154 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1155 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1156 hw->bus.func == 0)) 1157 sc->wol_support = hw->wol_enabled = 1; 1158 1159 /* Save initial wake up filter configuration */ 1160 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1161 1162 return; 1163 } /* ixgbe_check_wol_support */ 1164 1165 /************************************************************************ 1166 * ixgbe_setup_interface 1167 * 1168 * Setup networking device structure and register an interface. 1169 ************************************************************************/ 1170 static int 1171 ixgbe_setup_interface(if_ctx_t ctx) 1172 { 1173 struct ifnet *ifp = iflib_get_ifp(ctx); 1174 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1175 1176 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1177 1178 if_setbaudrate(ifp, IF_Gbps(10)); 1179 1180 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1181 1182 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1183 1184 ixgbe_add_media_types(ctx); 1185 1186 /* Autoselect media by default */ 1187 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1188 1189 return (0); 1190 } /* ixgbe_setup_interface */ 1191 1192 /************************************************************************ 1193 * ixgbe_if_get_counter 1194 ************************************************************************/ 1195 static uint64_t 1196 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1197 { 1198 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1199 if_t ifp = iflib_get_ifp(ctx); 1200 1201 switch (cnt) { 1202 case IFCOUNTER_IPACKETS: 1203 return (sc->ipackets); 1204 case IFCOUNTER_OPACKETS: 1205 return (sc->opackets); 1206 case IFCOUNTER_IBYTES: 1207 return (sc->ibytes); 1208 case IFCOUNTER_OBYTES: 1209 return (sc->obytes); 1210 case IFCOUNTER_IMCASTS: 1211 return (sc->imcasts); 1212 case IFCOUNTER_OMCASTS: 1213 return (sc->omcasts); 1214 case IFCOUNTER_COLLISIONS: 1215 return (0); 1216 case IFCOUNTER_IQDROPS: 1217 return (sc->iqdrops); 1218 case IFCOUNTER_OQDROPS: 1219 return (0); 1220 case IFCOUNTER_IERRORS: 1221 return (sc->ierrors); 1222 default: 1223 return (if_get_counter_default(ifp, cnt)); 1224 } 1225 } /* ixgbe_if_get_counter */ 1226 1227 /************************************************************************ 1228 * ixgbe_if_i2c_req 1229 ************************************************************************/ 1230 static int 1231 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1232 { 1233 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1234 struct ixgbe_hw *hw = &sc->hw; 1235 int i; 1236 1237 1238 if (hw->phy.ops.read_i2c_byte == NULL) 1239 return (ENXIO); 1240 for (i = 0; i < req->len; i++) 1241 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1242 req->dev_addr, &req->data[i]); 1243 return (0); 1244 } /* ixgbe_if_i2c_req */ 1245 1246 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1247 * @ctx: iflib context 1248 * @event: event code to check 1249 * 1250 * Defaults to returning true for unknown events. 1251 * 1252 * @returns true if iflib needs to reinit the interface 1253 */ 1254 static bool 1255 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1256 { 1257 switch (event) { 1258 case IFLIB_RESTART_VLAN_CONFIG: 1259 return (false); 1260 default: 1261 return (true); 1262 } 1263 } 1264 1265 /************************************************************************ 1266 * ixgbe_add_media_types 1267 ************************************************************************/ 1268 static void 1269 ixgbe_add_media_types(if_ctx_t ctx) 1270 { 1271 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1272 struct ixgbe_hw *hw = &sc->hw; 1273 device_t dev = iflib_get_dev(ctx); 1274 u64 layer; 1275 1276 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 1277 1278 /* Media types with matching FreeBSD media defines */ 1279 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1280 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1281 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1282 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1283 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1284 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1285 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1286 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1287 1288 if (hw->mac.type == ixgbe_mac_X550) { 1289 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1290 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL); 1291 } 1292 1293 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1294 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1295 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1296 NULL); 1297 1298 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1299 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1300 if (hw->phy.multispeed_fiber) 1301 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, 1302 NULL); 1303 } 1304 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1305 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1306 if (hw->phy.multispeed_fiber) 1307 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, 1308 NULL); 1309 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1310 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1311 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1312 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1313 1314 #ifdef IFM_ETH_XTYPE 1315 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1316 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1317 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1318 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1319 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1320 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1321 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1322 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1323 #else 1324 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1325 device_printf(dev, "Media supported: 10GbaseKR\n"); 1326 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1327 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1328 } 1329 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1330 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1331 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1332 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1333 } 1334 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1335 device_printf(dev, "Media supported: 1000baseKX\n"); 1336 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1337 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1338 } 1339 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1340 device_printf(dev, "Media supported: 2500baseKX\n"); 1341 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1342 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1343 } 1344 #endif 1345 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1346 device_printf(dev, "Media supported: 1000baseBX\n"); 1347 1348 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1349 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1350 0, NULL); 1351 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1352 } 1353 1354 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1355 } /* ixgbe_add_media_types */ 1356 1357 /************************************************************************ 1358 * ixgbe_is_sfp 1359 ************************************************************************/ 1360 static inline bool 1361 ixgbe_is_sfp(struct ixgbe_hw *hw) 1362 { 1363 switch (hw->mac.type) { 1364 case ixgbe_mac_82598EB: 1365 if (hw->phy.type == ixgbe_phy_nl) 1366 return (true); 1367 return (false); 1368 case ixgbe_mac_82599EB: 1369 switch (hw->mac.ops.get_media_type(hw)) { 1370 case ixgbe_media_type_fiber: 1371 case ixgbe_media_type_fiber_qsfp: 1372 return (true); 1373 default: 1374 return (false); 1375 } 1376 case ixgbe_mac_X550EM_x: 1377 case ixgbe_mac_X550EM_a: 1378 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1379 return (true); 1380 return (false); 1381 default: 1382 return (false); 1383 } 1384 } /* ixgbe_is_sfp */ 1385 1386 /************************************************************************ 1387 * ixgbe_config_link 1388 ************************************************************************/ 1389 static void 1390 ixgbe_config_link(if_ctx_t ctx) 1391 { 1392 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1393 struct ixgbe_hw *hw = &sc->hw; 1394 u32 autoneg, err = 0; 1395 bool sfp, negotiate; 1396 1397 sfp = ixgbe_is_sfp(hw); 1398 1399 if (sfp) { 1400 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 1401 iflib_admin_intr_deferred(ctx); 1402 } else { 1403 if (hw->mac.ops.check_link) 1404 err = ixgbe_check_link(hw, &sc->link_speed, 1405 &sc->link_up, false); 1406 if (err) 1407 return; 1408 autoneg = hw->phy.autoneg_advertised; 1409 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1410 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1411 &negotiate); 1412 if (err) 1413 return; 1414 1415 if (hw->mac.type == ixgbe_mac_X550 && 1416 hw->phy.autoneg_advertised == 0) { 1417 /* 1418 * 2.5G and 5G autonegotiation speeds on X550 1419 * are disabled by default due to reported 1420 * interoperability issues with some switches. 1421 * 1422 * The second condition checks if any operations 1423 * involving setting autonegotiation speeds have 1424 * been performed prior to this ixgbe_config_link() 1425 * call. 1426 * 1427 * If hw->phy.autoneg_advertised does not 1428 * equal 0, this means that the user might have 1429 * set autonegotiation speeds via the sysctl 1430 * before bringing the interface up. In this 1431 * case, we should not disable 2.5G and 5G 1432 * since that speeds might be selected by the 1433 * user. 1434 * 1435 * Otherwise (i.e. if hw->phy.autoneg_advertised 1436 * is set to 0), it is the first time we set 1437 * autonegotiation preferences and the default 1438 * set of speeds should exclude 2.5G and 5G. 1439 */ 1440 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 1441 IXGBE_LINK_SPEED_5GB_FULL); 1442 } 1443 1444 if (hw->mac.ops.setup_link) 1445 err = hw->mac.ops.setup_link(hw, autoneg, 1446 sc->link_up); 1447 } 1448 } /* ixgbe_config_link */ 1449 1450 /************************************************************************ 1451 * ixgbe_update_stats_counters - Update board statistics counters. 1452 ************************************************************************/ 1453 static void 1454 ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1455 { 1456 struct ixgbe_hw *hw = &sc->hw; 1457 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1458 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1459 u32 lxoffrxc; 1460 u64 total_missed_rx = 0; 1461 1462 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1463 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1464 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1465 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1466 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1467 1468 for (int i = 0; i < 16; i++) { 1469 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1470 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1471 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1472 } 1473 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1474 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1475 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1476 1477 /* Hardware workaround, gprc counts missed packets */ 1478 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1479 stats->gprc -= missed_rx; 1480 1481 if (hw->mac.type != ixgbe_mac_82598EB) { 1482 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1483 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1484 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1485 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1486 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1487 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1488 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1489 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1490 stats->lxoffrxc += lxoffrxc; 1491 } else { 1492 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1493 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1494 stats->lxoffrxc += lxoffrxc; 1495 /* 82598 only has a counter in the high register */ 1496 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1497 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1498 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1499 } 1500 1501 /* 1502 * For watchdog management we need to know if we have been paused 1503 * during the last interval, so capture that here. 1504 */ 1505 if (lxoffrxc) 1506 sc->shared->isc_pause_frames = 1; 1507 1508 /* 1509 * Workaround: mprc hardware is incorrectly counting 1510 * broadcasts, so for now we subtract those. 1511 */ 1512 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1513 stats->bprc += bprc; 1514 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1515 if (hw->mac.type == ixgbe_mac_82598EB) 1516 stats->mprc -= bprc; 1517 1518 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1519 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1520 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1521 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1522 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1523 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1524 1525 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1526 stats->lxontxc += lxon; 1527 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1528 stats->lxofftxc += lxoff; 1529 total = lxon + lxoff; 1530 1531 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1532 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1533 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1534 stats->gptc -= total; 1535 stats->mptc -= total; 1536 stats->ptc64 -= total; 1537 stats->gotc -= total * ETHER_MIN_LEN; 1538 1539 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1540 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1541 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1542 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1543 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1544 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1545 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1546 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1547 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1548 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1549 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1550 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1551 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1552 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1553 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1554 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1555 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1556 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1557 /* Only read FCOE on 82599 */ 1558 if (hw->mac.type != ixgbe_mac_82598EB) { 1559 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1560 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1561 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1562 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1563 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1564 } 1565 1566 /* Fill out the OS statistics structure */ 1567 IXGBE_SET_IPACKETS(sc, stats->gprc); 1568 IXGBE_SET_OPACKETS(sc, stats->gptc); 1569 IXGBE_SET_IBYTES(sc, stats->gorc); 1570 IXGBE_SET_OBYTES(sc, stats->gotc); 1571 IXGBE_SET_IMCASTS(sc, stats->mprc); 1572 IXGBE_SET_OMCASTS(sc, stats->mptc); 1573 IXGBE_SET_COLLISIONS(sc, 0); 1574 IXGBE_SET_IQDROPS(sc, total_missed_rx); 1575 1576 /* 1577 * Aggregate following types of errors as RX errors: 1578 * - CRC error count, 1579 * - illegal byte error count, 1580 * - checksum error count, 1581 * - missed packets count, 1582 * - length error count, 1583 * - undersized packets count, 1584 * - fragmented packets count, 1585 * - oversized packets count, 1586 * - jabber count. 1587 * 1588 * Ignore XEC errors for 82599 to workaround errata about 1589 * UDP frames with zero checksum. 1590 */ 1591 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + 1592 (hw->mac.type != ixgbe_mac_82599EB ? stats->xec : 0) + 1593 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + 1594 stats->rjc); 1595 } /* ixgbe_update_stats_counters */ 1596 1597 /************************************************************************ 1598 * ixgbe_add_hw_stats 1599 * 1600 * Add sysctl variables, one per statistic, to the system. 1601 ************************************************************************/ 1602 static void 1603 ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1604 { 1605 device_t dev = iflib_get_dev(sc->ctx); 1606 struct ix_rx_queue *rx_que; 1607 struct ix_tx_queue *tx_que; 1608 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1609 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1610 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1611 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1612 struct sysctl_oid *stat_node, *queue_node; 1613 struct sysctl_oid_list *stat_list, *queue_list; 1614 int i; 1615 1616 #define QUEUE_NAME_LEN 32 1617 char namebuf[QUEUE_NAME_LEN]; 1618 1619 /* Driver Statistics */ 1620 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1621 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); 1622 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1623 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1624 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1625 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1626 1627 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 1628 struct tx_ring *txr = &tx_que->txr; 1629 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1630 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1631 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1632 queue_list = SYSCTL_CHILDREN(queue_node); 1633 1634 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1635 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1636 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1637 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1638 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1639 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1640 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1641 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1642 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1643 CTLFLAG_RD, &txr->total_packets, 1644 "Queue Packets Transmitted"); 1645 } 1646 1647 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 1648 struct rx_ring *rxr = &rx_que->rxr; 1649 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1650 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1651 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1652 queue_list = SYSCTL_CHILDREN(queue_node); 1653 1654 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1655 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 1656 &sc->rx_queues[i], 0, 1657 ixgbe_sysctl_interrupt_rate_handler, "IU", 1658 "Interrupt Rate"); 1659 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1660 CTLFLAG_RD, &(sc->rx_queues[i].irqs), 1661 "irqs on this queue"); 1662 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1663 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1664 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1665 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1666 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1667 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1668 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1669 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1670 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1671 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1672 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1673 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1674 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1675 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1676 } 1677 1678 /* MAC stats get their own sub node */ 1679 1680 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1681 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); 1682 stat_list = SYSCTL_CHILDREN(stat_node); 1683 1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", 1685 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); 1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1687 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1689 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1691 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1693 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1695 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1697 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1698 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1699 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1701 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1702 1703 /* Flow Control stats */ 1704 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1705 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1707 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1708 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1709 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1710 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1711 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1712 1713 /* Packet Reception Stats */ 1714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1715 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1716 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1717 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1719 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1721 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1722 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1723 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1725 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1727 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1728 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1729 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1731 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1733 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1734 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1735 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1737 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1739 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1740 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1741 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1742 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1743 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1744 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1745 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1746 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1747 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1748 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1749 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1750 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1751 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1752 1753 /* Packet Transmission Stats */ 1754 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1755 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1756 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1757 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1758 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1759 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1760 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1761 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1762 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1763 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1764 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1765 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1766 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1767 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1768 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1769 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1770 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1771 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1772 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1773 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1774 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1775 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1776 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1777 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1778 } /* ixgbe_add_hw_stats */ 1779 1780 /************************************************************************ 1781 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1782 * 1783 * Retrieves the TDH value from the hardware 1784 ************************************************************************/ 1785 static int 1786 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1787 { 1788 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1789 int error; 1790 unsigned int val; 1791 1792 if (!txr) 1793 return (0); 1794 1795 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); 1796 error = sysctl_handle_int(oidp, &val, 0, req); 1797 if (error || !req->newptr) 1798 return error; 1799 1800 return (0); 1801 } /* ixgbe_sysctl_tdh_handler */ 1802 1803 /************************************************************************ 1804 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1805 * 1806 * Retrieves the TDT value from the hardware 1807 ************************************************************************/ 1808 static int 1809 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1810 { 1811 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1812 int error; 1813 unsigned int val; 1814 1815 if (!txr) 1816 return (0); 1817 1818 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); 1819 error = sysctl_handle_int(oidp, &val, 0, req); 1820 if (error || !req->newptr) 1821 return error; 1822 1823 return (0); 1824 } /* ixgbe_sysctl_tdt_handler */ 1825 1826 /************************************************************************ 1827 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1828 * 1829 * Retrieves the RDH value from the hardware 1830 ************************************************************************/ 1831 static int 1832 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1833 { 1834 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1835 int error; 1836 unsigned int val; 1837 1838 if (!rxr) 1839 return (0); 1840 1841 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); 1842 error = sysctl_handle_int(oidp, &val, 0, req); 1843 if (error || !req->newptr) 1844 return error; 1845 1846 return (0); 1847 } /* ixgbe_sysctl_rdh_handler */ 1848 1849 /************************************************************************ 1850 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1851 * 1852 * Retrieves the RDT value from the hardware 1853 ************************************************************************/ 1854 static int 1855 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1856 { 1857 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1858 int error; 1859 unsigned int val; 1860 1861 if (!rxr) 1862 return (0); 1863 1864 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); 1865 error = sysctl_handle_int(oidp, &val, 0, req); 1866 if (error || !req->newptr) 1867 return error; 1868 1869 return (0); 1870 } /* ixgbe_sysctl_rdt_handler */ 1871 1872 /************************************************************************ 1873 * ixgbe_if_vlan_register 1874 * 1875 * Run via vlan config EVENT, it enables us to use the 1876 * HW Filter table since we can get the vlan id. This 1877 * just creates the entry in the soft version of the 1878 * VFTA, init will repopulate the real table. 1879 ************************************************************************/ 1880 static void 1881 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1882 { 1883 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1884 u16 index, bit; 1885 1886 index = (vtag >> 5) & 0x7F; 1887 bit = vtag & 0x1F; 1888 sc->shadow_vfta[index] |= (1 << bit); 1889 ++sc->num_vlans; 1890 ixgbe_setup_vlan_hw_support(ctx); 1891 } /* ixgbe_if_vlan_register */ 1892 1893 /************************************************************************ 1894 * ixgbe_if_vlan_unregister 1895 * 1896 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1897 ************************************************************************/ 1898 static void 1899 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1900 { 1901 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1902 u16 index, bit; 1903 1904 index = (vtag >> 5) & 0x7F; 1905 bit = vtag & 0x1F; 1906 sc->shadow_vfta[index] &= ~(1 << bit); 1907 --sc->num_vlans; 1908 /* Re-init to load the changes */ 1909 ixgbe_setup_vlan_hw_support(ctx); 1910 } /* ixgbe_if_vlan_unregister */ 1911 1912 /************************************************************************ 1913 * ixgbe_setup_vlan_hw_support 1914 ************************************************************************/ 1915 static void 1916 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1917 { 1918 struct ifnet *ifp = iflib_get_ifp(ctx); 1919 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1920 struct ixgbe_hw *hw = &sc->hw; 1921 struct rx_ring *rxr; 1922 int i; 1923 u32 ctrl; 1924 1925 1926 /* 1927 * We get here thru init_locked, meaning 1928 * a soft reset, this has already cleared 1929 * the VFTA and other state, so if there 1930 * have been no vlan's registered do nothing. 1931 */ 1932 if (sc->num_vlans == 0 || (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1933 /* Clear the vlan hw flag */ 1934 for (i = 0; i < sc->num_rx_queues; i++) { 1935 rxr = &sc->rx_queues[i].rxr; 1936 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1937 if (hw->mac.type != ixgbe_mac_82598EB) { 1938 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1939 ctrl &= ~IXGBE_RXDCTL_VME; 1940 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1941 } 1942 rxr->vtag_strip = false; 1943 } 1944 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1945 /* Enable the Filter Table if enabled */ 1946 ctrl |= IXGBE_VLNCTRL_CFIEN; 1947 ctrl &= ~IXGBE_VLNCTRL_VFE; 1948 if (hw->mac.type == ixgbe_mac_82598EB) 1949 ctrl &= ~IXGBE_VLNCTRL_VME; 1950 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1951 return; 1952 } 1953 1954 /* Setup the queues for vlans */ 1955 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1956 for (i = 0; i < sc->num_rx_queues; i++) { 1957 rxr = &sc->rx_queues[i].rxr; 1958 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1959 if (hw->mac.type != ixgbe_mac_82598EB) { 1960 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1961 ctrl |= IXGBE_RXDCTL_VME; 1962 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1963 } 1964 rxr->vtag_strip = true; 1965 } 1966 } 1967 1968 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1969 return; 1970 /* 1971 * A soft reset zero's out the VFTA, so 1972 * we need to repopulate it now. 1973 */ 1974 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1975 if (sc->shadow_vfta[i] != 0) 1976 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1977 sc->shadow_vfta[i]); 1978 1979 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1980 /* Enable the Filter Table if enabled */ 1981 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1982 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1983 ctrl |= IXGBE_VLNCTRL_VFE; 1984 } 1985 if (hw->mac.type == ixgbe_mac_82598EB) 1986 ctrl |= IXGBE_VLNCTRL_VME; 1987 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1988 } /* ixgbe_setup_vlan_hw_support */ 1989 1990 /************************************************************************ 1991 * ixgbe_get_slot_info 1992 * 1993 * Get the width and transaction speed of 1994 * the slot this adapter is plugged into. 1995 ************************************************************************/ 1996 static void 1997 ixgbe_get_slot_info(struct ixgbe_softc *sc) 1998 { 1999 device_t dev = iflib_get_dev(sc->ctx); 2000 struct ixgbe_hw *hw = &sc->hw; 2001 int bus_info_valid = true; 2002 u32 offset; 2003 u16 link; 2004 2005 /* Some devices are behind an internal bridge */ 2006 switch (hw->device_id) { 2007 case IXGBE_DEV_ID_82599_SFP_SF_QP: 2008 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 2009 goto get_parent_info; 2010 default: 2011 break; 2012 } 2013 2014 ixgbe_get_bus_info(hw); 2015 2016 /* 2017 * Some devices don't use PCI-E, but there is no need 2018 * to display "Unknown" for bus speed and width. 2019 */ 2020 switch (hw->mac.type) { 2021 case ixgbe_mac_X550EM_x: 2022 case ixgbe_mac_X550EM_a: 2023 return; 2024 default: 2025 goto display; 2026 } 2027 2028 get_parent_info: 2029 /* 2030 * For the Quad port adapter we need to parse back 2031 * up the PCI tree to find the speed of the expansion 2032 * slot into which this adapter is plugged. A bit more work. 2033 */ 2034 dev = device_get_parent(device_get_parent(dev)); 2035 #ifdef IXGBE_DEBUG 2036 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2037 pci_get_slot(dev), pci_get_function(dev)); 2038 #endif 2039 dev = device_get_parent(device_get_parent(dev)); 2040 #ifdef IXGBE_DEBUG 2041 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2042 pci_get_slot(dev), pci_get_function(dev)); 2043 #endif 2044 /* Now get the PCI Express Capabilities offset */ 2045 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 2046 /* 2047 * Hmm...can't get PCI-Express capabilities. 2048 * Falling back to default method. 2049 */ 2050 bus_info_valid = false; 2051 ixgbe_get_bus_info(hw); 2052 goto display; 2053 } 2054 /* ...and read the Link Status Register */ 2055 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2056 ixgbe_set_pci_config_data_generic(hw, link); 2057 2058 display: 2059 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2060 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2061 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2062 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2063 "Unknown"), 2064 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 2065 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 2066 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 2067 "Unknown")); 2068 2069 if (bus_info_valid) { 2070 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2071 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2072 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2073 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2074 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 2075 } 2076 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2077 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2078 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2079 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2080 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 2081 } 2082 } else 2083 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2084 2085 return; 2086 } /* ixgbe_get_slot_info */ 2087 2088 /************************************************************************ 2089 * ixgbe_if_msix_intr_assign 2090 * 2091 * Setup MSI-X Interrupt resources and handlers 2092 ************************************************************************/ 2093 static int 2094 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2095 { 2096 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2097 struct ix_rx_queue *rx_que = sc->rx_queues; 2098 struct ix_tx_queue *tx_que; 2099 int error, rid, vector = 0; 2100 char buf[16]; 2101 2102 /* Admin Que is vector 0*/ 2103 rid = vector + 1; 2104 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 2105 rid = vector + 1; 2106 2107 snprintf(buf, sizeof(buf), "rxq%d", i); 2108 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2109 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2110 2111 if (error) { 2112 device_printf(iflib_get_dev(ctx), 2113 "Failed to allocate que int %d err: %d", i, error); 2114 sc->num_rx_queues = i + 1; 2115 goto fail; 2116 } 2117 2118 rx_que->msix = vector; 2119 } 2120 for (int i = 0; i < sc->num_tx_queues; i++) { 2121 snprintf(buf, sizeof(buf), "txq%d", i); 2122 tx_que = &sc->tx_queues[i]; 2123 tx_que->msix = i % sc->num_rx_queues; 2124 iflib_softirq_alloc_generic(ctx, 2125 &sc->rx_queues[tx_que->msix].que_irq, 2126 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2127 } 2128 rid = vector + 1; 2129 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 2130 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq"); 2131 if (error) { 2132 device_printf(iflib_get_dev(ctx), 2133 "Failed to register admin handler"); 2134 return (error); 2135 } 2136 2137 sc->vector = vector; 2138 2139 return (0); 2140 fail: 2141 iflib_irq_free(ctx, &sc->irq); 2142 rx_que = sc->rx_queues; 2143 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 2144 iflib_irq_free(ctx, &rx_que->que_irq); 2145 2146 return (error); 2147 } /* ixgbe_if_msix_intr_assign */ 2148 2149 static inline void 2150 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que) 2151 { 2152 uint32_t newitr = 0; 2153 struct rx_ring *rxr = &que->rxr; 2154 2155 /* 2156 * Do Adaptive Interrupt Moderation: 2157 * - Write out last calculated setting 2158 * - Calculate based on average size over 2159 * the last interval. 2160 */ 2161 if (que->eitr_setting) { 2162 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), 2163 que->eitr_setting); 2164 } 2165 2166 que->eitr_setting = 0; 2167 /* Idle, do nothing */ 2168 if (rxr->bytes == 0) { 2169 return; 2170 } 2171 2172 if ((rxr->bytes) && (rxr->packets)) { 2173 newitr = (rxr->bytes / rxr->packets); 2174 } 2175 2176 newitr += 24; /* account for hardware frame, crc */ 2177 /* set an upper boundary */ 2178 newitr = min(newitr, 3000); 2179 2180 /* Be nice to the mid range */ 2181 if ((newitr > 300) && (newitr < 1200)) { 2182 newitr = (newitr / 3); 2183 } else { 2184 newitr = (newitr / 2); 2185 } 2186 2187 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2188 newitr |= newitr << 16; 2189 } else { 2190 newitr |= IXGBE_EITR_CNT_WDIS; 2191 } 2192 2193 /* save for next interrupt */ 2194 que->eitr_setting = newitr; 2195 2196 /* Reset state */ 2197 rxr->bytes = 0; 2198 rxr->packets = 0; 2199 2200 return; 2201 } 2202 2203 /********************************************************************* 2204 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2205 **********************************************************************/ 2206 static int 2207 ixgbe_msix_que(void *arg) 2208 { 2209 struct ix_rx_queue *que = arg; 2210 struct ixgbe_softc *sc = que->sc; 2211 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx); 2212 2213 /* Protect against spurious interrupts */ 2214 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2215 return (FILTER_HANDLED); 2216 2217 ixgbe_disable_queue(sc, que->msix); 2218 ++que->irqs; 2219 2220 /* Check for AIM */ 2221 if (sc->enable_aim) { 2222 ixgbe_perform_aim(sc, que); 2223 } 2224 2225 return (FILTER_SCHEDULE_THREAD); 2226 } /* ixgbe_msix_que */ 2227 2228 /************************************************************************ 2229 * ixgbe_media_status - Media Ioctl callback 2230 * 2231 * Called whenever the user queries the status of 2232 * the interface using ifconfig. 2233 ************************************************************************/ 2234 static void 2235 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2236 { 2237 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2238 struct ixgbe_hw *hw = &sc->hw; 2239 int layer; 2240 2241 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2242 2243 ifmr->ifm_status = IFM_AVALID; 2244 ifmr->ifm_active = IFM_ETHER; 2245 2246 if (!sc->link_active) 2247 return; 2248 2249 ifmr->ifm_status |= IFM_ACTIVE; 2250 layer = sc->phy_layer; 2251 2252 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2253 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2254 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2255 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2256 switch (sc->link_speed) { 2257 case IXGBE_LINK_SPEED_10GB_FULL: 2258 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2259 break; 2260 case IXGBE_LINK_SPEED_1GB_FULL: 2261 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2262 break; 2263 case IXGBE_LINK_SPEED_100_FULL: 2264 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2265 break; 2266 case IXGBE_LINK_SPEED_10_FULL: 2267 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2268 break; 2269 } 2270 if (hw->mac.type == ixgbe_mac_X550) 2271 switch (sc->link_speed) { 2272 case IXGBE_LINK_SPEED_5GB_FULL: 2273 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2274 break; 2275 case IXGBE_LINK_SPEED_2_5GB_FULL: 2276 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2277 break; 2278 } 2279 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2280 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2281 switch (sc->link_speed) { 2282 case IXGBE_LINK_SPEED_10GB_FULL: 2283 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2284 break; 2285 } 2286 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2287 switch (sc->link_speed) { 2288 case IXGBE_LINK_SPEED_10GB_FULL: 2289 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2290 break; 2291 case IXGBE_LINK_SPEED_1GB_FULL: 2292 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2293 break; 2294 } 2295 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2296 switch (sc->link_speed) { 2297 case IXGBE_LINK_SPEED_10GB_FULL: 2298 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2299 break; 2300 case IXGBE_LINK_SPEED_1GB_FULL: 2301 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2302 break; 2303 } 2304 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2305 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2306 switch (sc->link_speed) { 2307 case IXGBE_LINK_SPEED_10GB_FULL: 2308 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2309 break; 2310 case IXGBE_LINK_SPEED_1GB_FULL: 2311 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2312 break; 2313 } 2314 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2315 switch (sc->link_speed) { 2316 case IXGBE_LINK_SPEED_10GB_FULL: 2317 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2318 break; 2319 } 2320 /* 2321 * XXX: These need to use the proper media types once 2322 * they're added. 2323 */ 2324 #ifndef IFM_ETH_XTYPE 2325 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2326 switch (sc->link_speed) { 2327 case IXGBE_LINK_SPEED_10GB_FULL: 2328 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2329 break; 2330 case IXGBE_LINK_SPEED_2_5GB_FULL: 2331 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2332 break; 2333 case IXGBE_LINK_SPEED_1GB_FULL: 2334 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2335 break; 2336 } 2337 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2338 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2339 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2340 switch (sc->link_speed) { 2341 case IXGBE_LINK_SPEED_10GB_FULL: 2342 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2343 break; 2344 case IXGBE_LINK_SPEED_2_5GB_FULL: 2345 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2346 break; 2347 case IXGBE_LINK_SPEED_1GB_FULL: 2348 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2349 break; 2350 } 2351 #else 2352 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2353 switch (sc->link_speed) { 2354 case IXGBE_LINK_SPEED_10GB_FULL: 2355 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2356 break; 2357 case IXGBE_LINK_SPEED_2_5GB_FULL: 2358 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2359 break; 2360 case IXGBE_LINK_SPEED_1GB_FULL: 2361 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2362 break; 2363 } 2364 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2365 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2366 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2367 switch (sc->link_speed) { 2368 case IXGBE_LINK_SPEED_10GB_FULL: 2369 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2370 break; 2371 case IXGBE_LINK_SPEED_2_5GB_FULL: 2372 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2373 break; 2374 case IXGBE_LINK_SPEED_1GB_FULL: 2375 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2376 break; 2377 } 2378 #endif 2379 2380 /* If nothing is recognized... */ 2381 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2382 ifmr->ifm_active |= IFM_UNKNOWN; 2383 2384 /* Display current flow control setting used on link */ 2385 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2386 hw->fc.current_mode == ixgbe_fc_full) 2387 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2388 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2389 hw->fc.current_mode == ixgbe_fc_full) 2390 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2391 } /* ixgbe_media_status */ 2392 2393 /************************************************************************ 2394 * ixgbe_media_change - Media Ioctl callback 2395 * 2396 * Called when the user changes speed/duplex using 2397 * media/mediopt option with ifconfig. 2398 ************************************************************************/ 2399 static int 2400 ixgbe_if_media_change(if_ctx_t ctx) 2401 { 2402 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2403 struct ifmedia *ifm = iflib_get_media(ctx); 2404 struct ixgbe_hw *hw = &sc->hw; 2405 ixgbe_link_speed speed = 0; 2406 2407 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2408 2409 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2410 return (EINVAL); 2411 2412 if (hw->phy.media_type == ixgbe_media_type_backplane) 2413 return (EPERM); 2414 2415 /* 2416 * We don't actually need to check against the supported 2417 * media types of the adapter; ifmedia will take care of 2418 * that for us. 2419 */ 2420 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2421 case IFM_AUTO: 2422 case IFM_10G_T: 2423 speed |= IXGBE_LINK_SPEED_100_FULL; 2424 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2425 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2426 break; 2427 case IFM_10G_LRM: 2428 case IFM_10G_LR: 2429 #ifndef IFM_ETH_XTYPE 2430 case IFM_10G_SR: /* KR, too */ 2431 case IFM_10G_CX4: /* KX4 */ 2432 #else 2433 case IFM_10G_KR: 2434 case IFM_10G_KX4: 2435 #endif 2436 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2437 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2438 break; 2439 #ifndef IFM_ETH_XTYPE 2440 case IFM_1000_CX: /* KX */ 2441 #else 2442 case IFM_1000_KX: 2443 #endif 2444 case IFM_1000_LX: 2445 case IFM_1000_SX: 2446 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2447 break; 2448 case IFM_1000_T: 2449 speed |= IXGBE_LINK_SPEED_100_FULL; 2450 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2451 break; 2452 case IFM_10G_TWINAX: 2453 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2454 break; 2455 case IFM_5000_T: 2456 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2457 break; 2458 case IFM_2500_T: 2459 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2460 break; 2461 case IFM_100_TX: 2462 speed |= IXGBE_LINK_SPEED_100_FULL; 2463 break; 2464 case IFM_10_T: 2465 speed |= IXGBE_LINK_SPEED_10_FULL; 2466 break; 2467 default: 2468 goto invalid; 2469 } 2470 2471 hw->mac.autotry_restart = true; 2472 hw->mac.ops.setup_link(hw, speed, true); 2473 sc->advertise = 2474 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 2475 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 2476 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 2477 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 2478 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 2479 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 2480 2481 return (0); 2482 2483 invalid: 2484 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2485 2486 return (EINVAL); 2487 } /* ixgbe_if_media_change */ 2488 2489 /************************************************************************ 2490 * ixgbe_set_promisc 2491 ************************************************************************/ 2492 static int 2493 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2494 { 2495 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2496 struct ifnet *ifp = iflib_get_ifp(ctx); 2497 u32 rctl; 2498 int mcnt = 0; 2499 2500 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 2501 rctl &= (~IXGBE_FCTRL_UPE); 2502 if (ifp->if_flags & IFF_ALLMULTI) 2503 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2504 else { 2505 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2506 } 2507 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2508 rctl &= (~IXGBE_FCTRL_MPE); 2509 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2510 2511 if (ifp->if_flags & IFF_PROMISC) { 2512 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2513 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2514 } else if (ifp->if_flags & IFF_ALLMULTI) { 2515 rctl |= IXGBE_FCTRL_MPE; 2516 rctl &= ~IXGBE_FCTRL_UPE; 2517 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2518 } 2519 return (0); 2520 } /* ixgbe_if_promisc_set */ 2521 2522 /************************************************************************ 2523 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2524 ************************************************************************/ 2525 static int 2526 ixgbe_msix_link(void *arg) 2527 { 2528 struct ixgbe_softc *sc = arg; 2529 struct ixgbe_hw *hw = &sc->hw; 2530 u32 eicr, eicr_mask; 2531 s32 retval; 2532 2533 ++sc->link_irq; 2534 2535 /* Pause other interrupts */ 2536 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2537 2538 /* First get the cause */ 2539 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2540 /* Be sure the queue bits are not cleared */ 2541 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2542 /* Clear interrupt with write */ 2543 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2544 2545 /* Link status change */ 2546 if (eicr & IXGBE_EICR_LSC) { 2547 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2548 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; 2549 } 2550 2551 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 2552 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 2553 (eicr & IXGBE_EICR_FLOW_DIR)) { 2554 /* This is probably overkill :) */ 2555 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) 2556 return (FILTER_HANDLED); 2557 /* Disable the interrupt */ 2558 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2559 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2560 } else 2561 if (eicr & IXGBE_EICR_ECC) { 2562 device_printf(iflib_get_dev(sc->ctx), 2563 "Received ECC Err, initiating reset\n"); 2564 hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2565 ixgbe_reset_hw(hw); 2566 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2567 } 2568 2569 /* Check for over temp condition */ 2570 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2571 switch (sc->hw.mac.type) { 2572 case ixgbe_mac_X550EM_a: 2573 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2574 break; 2575 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2576 IXGBE_EICR_GPI_SDP0_X550EM_a); 2577 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2578 IXGBE_EICR_GPI_SDP0_X550EM_a); 2579 retval = hw->phy.ops.check_overtemp(hw); 2580 if (retval != IXGBE_ERR_OVERTEMP) 2581 break; 2582 device_printf(iflib_get_dev(sc->ctx), 2583 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2584 device_printf(iflib_get_dev(sc->ctx), 2585 "System shutdown required!\n"); 2586 break; 2587 default: 2588 if (!(eicr & IXGBE_EICR_TS)) 2589 break; 2590 retval = hw->phy.ops.check_overtemp(hw); 2591 if (retval != IXGBE_ERR_OVERTEMP) 2592 break; 2593 device_printf(iflib_get_dev(sc->ctx), 2594 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2595 device_printf(iflib_get_dev(sc->ctx), 2596 "System shutdown required!\n"); 2597 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2598 break; 2599 } 2600 } 2601 2602 /* Check for VF message */ 2603 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 2604 (eicr & IXGBE_EICR_MAILBOX)) 2605 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; 2606 } 2607 2608 if (ixgbe_is_sfp(hw)) { 2609 /* Pluggable optics-related interrupt */ 2610 if (hw->mac.type >= ixgbe_mac_X540) 2611 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2612 else 2613 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2614 2615 if (eicr & eicr_mask) { 2616 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2617 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 2618 } 2619 2620 if ((hw->mac.type == ixgbe_mac_82599EB) && 2621 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2622 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2623 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2624 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 2625 } 2626 } 2627 2628 /* Check for fan failure */ 2629 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2630 ixgbe_check_fan_failure(sc, eicr, true); 2631 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2632 } 2633 2634 /* External PHY interrupt */ 2635 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2636 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2637 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2638 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 2639 } 2640 2641 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2642 } /* ixgbe_msix_link */ 2643 2644 /************************************************************************ 2645 * ixgbe_sysctl_interrupt_rate_handler 2646 ************************************************************************/ 2647 static int 2648 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2649 { 2650 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2651 int error; 2652 unsigned int reg, usec, rate; 2653 2654 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); 2655 usec = ((reg & 0x0FF8) >> 3); 2656 if (usec > 0) 2657 rate = 500000 / usec; 2658 else 2659 rate = 0; 2660 error = sysctl_handle_int(oidp, &rate, 0, req); 2661 if (error || !req->newptr) 2662 return error; 2663 reg &= ~0xfff; /* default, no limitation */ 2664 ixgbe_max_interrupt_rate = 0; 2665 if (rate > 0 && rate < 500000) { 2666 if (rate < 1000) 2667 rate = 1000; 2668 ixgbe_max_interrupt_rate = rate; 2669 reg |= ((4000000/rate) & 0xff8); 2670 } 2671 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); 2672 2673 return (0); 2674 } /* ixgbe_sysctl_interrupt_rate_handler */ 2675 2676 /************************************************************************ 2677 * ixgbe_add_device_sysctls 2678 ************************************************************************/ 2679 static void 2680 ixgbe_add_device_sysctls(if_ctx_t ctx) 2681 { 2682 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2683 device_t dev = iflib_get_dev(ctx); 2684 struct ixgbe_hw *hw = &sc->hw; 2685 struct sysctl_oid_list *child; 2686 struct sysctl_ctx_list *ctx_list; 2687 2688 ctx_list = device_get_sysctl_ctx(dev); 2689 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2690 2691 /* Sysctls for all devices */ 2692 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2693 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2694 sc, 0, ixgbe_sysctl_flowcntl, "I", 2695 IXGBE_SYSCTL_DESC_SET_FC); 2696 2697 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2698 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2699 sc, 0, ixgbe_sysctl_advertise, "I", 2700 IXGBE_SYSCTL_DESC_ADV_SPEED); 2701 2702 sc->enable_aim = ixgbe_enable_aim; 2703 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2704 &sc->enable_aim, 0, "Interrupt Moderation"); 2705 2706 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", 2707 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2708 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); 2709 2710 #ifdef IXGBE_DEBUG 2711 /* testing sysctls (for all devices) */ 2712 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2713 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2714 sc, 0, ixgbe_sysctl_power_state, 2715 "I", "PCI Power State"); 2716 2717 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2718 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2719 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2720 #endif 2721 /* for X550 series devices */ 2722 if (hw->mac.type >= ixgbe_mac_X550) 2723 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2724 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2725 sc, 0, ixgbe_sysctl_dmac, 2726 "I", "DMA Coalesce"); 2727 2728 /* for WoL-capable devices */ 2729 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2730 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2731 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2732 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2733 2734 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2735 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2736 sc, 0, ixgbe_sysctl_wufc, 2737 "I", "Enable/Disable Wake Up Filters"); 2738 } 2739 2740 /* for X552/X557-AT devices */ 2741 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2742 struct sysctl_oid *phy_node; 2743 struct sysctl_oid_list *phy_list; 2744 2745 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2746 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); 2747 phy_list = SYSCTL_CHILDREN(phy_node); 2748 2749 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2750 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2751 sc, 0, ixgbe_sysctl_phy_temp, 2752 "I", "Current External PHY Temperature (Celsius)"); 2753 2754 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2755 "overtemp_occurred", 2756 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2757 ixgbe_sysctl_phy_overtemp_occurred, "I", 2758 "External PHY High Temperature Event Occurred"); 2759 } 2760 2761 if (sc->feat_cap & IXGBE_FEATURE_EEE) { 2762 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2763 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2764 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2765 } 2766 } /* ixgbe_add_device_sysctls */ 2767 2768 /************************************************************************ 2769 * ixgbe_allocate_pci_resources 2770 ************************************************************************/ 2771 static int 2772 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2773 { 2774 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2775 device_t dev = iflib_get_dev(ctx); 2776 int rid; 2777 2778 rid = PCIR_BAR(0); 2779 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2780 RF_ACTIVE); 2781 2782 if (!(sc->pci_mem)) { 2783 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2784 return (ENXIO); 2785 } 2786 2787 /* Save bus_space values for READ/WRITE_REG macros */ 2788 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 2789 sc->osdep.mem_bus_space_handle = 2790 rman_get_bushandle(sc->pci_mem); 2791 /* Set hw values for shared code */ 2792 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 2793 2794 return (0); 2795 } /* ixgbe_allocate_pci_resources */ 2796 2797 /************************************************************************ 2798 * ixgbe_detach - Device removal routine 2799 * 2800 * Called when the driver is being removed. 2801 * Stops the adapter and deallocates all the resources 2802 * that were allocated for driver operation. 2803 * 2804 * return 0 on success, positive on failure 2805 ************************************************************************/ 2806 static int 2807 ixgbe_if_detach(if_ctx_t ctx) 2808 { 2809 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2810 device_t dev = iflib_get_dev(ctx); 2811 u32 ctrl_ext; 2812 2813 INIT_DEBUGOUT("ixgbe_detach: begin"); 2814 2815 if (ixgbe_pci_iov_detach(dev) != 0) { 2816 device_printf(dev, "SR-IOV in use; detach first.\n"); 2817 return (EBUSY); 2818 } 2819 2820 ixgbe_setup_low_power_mode(ctx); 2821 2822 /* let hardware know driver is unloading */ 2823 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 2824 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2825 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 2826 2827 ixgbe_free_pci_resources(ctx); 2828 free(sc->mta, M_IXGBE); 2829 2830 return (0); 2831 } /* ixgbe_if_detach */ 2832 2833 /************************************************************************ 2834 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2835 * 2836 * Prepare the adapter/port for LPLU and/or WoL 2837 ************************************************************************/ 2838 static int 2839 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2840 { 2841 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2842 struct ixgbe_hw *hw = &sc->hw; 2843 device_t dev = iflib_get_dev(ctx); 2844 s32 error = 0; 2845 2846 if (!hw->wol_enabled) 2847 ixgbe_set_phy_power(hw, false); 2848 2849 /* Limit power management flow to X550EM baseT */ 2850 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2851 hw->phy.ops.enter_lplu) { 2852 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2853 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2854 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2855 2856 /* 2857 * Clear Wake Up Status register to prevent any previous wakeup 2858 * events from waking us up immediately after we suspend. 2859 */ 2860 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2861 2862 /* 2863 * Program the Wakeup Filter Control register with user filter 2864 * settings 2865 */ 2866 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 2867 2868 /* Enable wakeups and power management in Wakeup Control */ 2869 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2870 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2871 2872 /* X550EM baseT adapters need a special LPLU flow */ 2873 hw->phy.reset_disable = true; 2874 ixgbe_if_stop(ctx); 2875 error = hw->phy.ops.enter_lplu(hw); 2876 if (error) 2877 device_printf(dev, "Error entering LPLU: %d\n", error); 2878 hw->phy.reset_disable = false; 2879 } else { 2880 /* Just stop for other adapters */ 2881 ixgbe_if_stop(ctx); 2882 } 2883 2884 return error; 2885 } /* ixgbe_setup_low_power_mode */ 2886 2887 /************************************************************************ 2888 * ixgbe_shutdown - Shutdown entry point 2889 ************************************************************************/ 2890 static int 2891 ixgbe_if_shutdown(if_ctx_t ctx) 2892 { 2893 int error = 0; 2894 2895 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2896 2897 error = ixgbe_setup_low_power_mode(ctx); 2898 2899 return (error); 2900 } /* ixgbe_if_shutdown */ 2901 2902 /************************************************************************ 2903 * ixgbe_suspend 2904 * 2905 * From D0 to D3 2906 ************************************************************************/ 2907 static int 2908 ixgbe_if_suspend(if_ctx_t ctx) 2909 { 2910 int error = 0; 2911 2912 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2913 2914 error = ixgbe_setup_low_power_mode(ctx); 2915 2916 return (error); 2917 } /* ixgbe_if_suspend */ 2918 2919 /************************************************************************ 2920 * ixgbe_resume 2921 * 2922 * From D3 to D0 2923 ************************************************************************/ 2924 static int 2925 ixgbe_if_resume(if_ctx_t ctx) 2926 { 2927 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2928 device_t dev = iflib_get_dev(ctx); 2929 struct ifnet *ifp = iflib_get_ifp(ctx); 2930 struct ixgbe_hw *hw = &sc->hw; 2931 u32 wus; 2932 2933 INIT_DEBUGOUT("ixgbe_resume: begin"); 2934 2935 /* Read & clear WUS register */ 2936 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2937 if (wus) 2938 device_printf(dev, "Woken up by (WUS): %#010x\n", 2939 IXGBE_READ_REG(hw, IXGBE_WUS)); 2940 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2941 /* And clear WUFC until next low-power transition */ 2942 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2943 2944 /* 2945 * Required after D3->D0 transition; 2946 * will re-advertise all previous advertised speeds 2947 */ 2948 if (ifp->if_flags & IFF_UP) 2949 ixgbe_if_init(ctx); 2950 2951 return (0); 2952 } /* ixgbe_if_resume */ 2953 2954 /************************************************************************ 2955 * ixgbe_if_mtu_set - Ioctl mtu entry point 2956 * 2957 * Return 0 on success, EINVAL on failure 2958 ************************************************************************/ 2959 static int 2960 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2961 { 2962 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2963 int error = 0; 2964 2965 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2966 2967 if (mtu > IXGBE_MAX_MTU) { 2968 error = EINVAL; 2969 } else { 2970 sc->max_frame_size = mtu + IXGBE_MTU_HDR; 2971 } 2972 2973 return error; 2974 } /* ixgbe_if_mtu_set */ 2975 2976 /************************************************************************ 2977 * ixgbe_if_crcstrip_set 2978 ************************************************************************/ 2979 static void 2980 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2981 { 2982 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2983 struct ixgbe_hw *hw = &sc->hw; 2984 /* crc stripping is set in two places: 2985 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2986 * IXGBE_RDRXCTL (set by the original driver in 2987 * ixgbe_setup_hw_rsc() called in init_locked. 2988 * We disable the setting when netmap is compiled in). 2989 * We update the values here, but also in ixgbe.c because 2990 * init_locked sometimes is called outside our control. 2991 */ 2992 uint32_t hl, rxc; 2993 2994 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2995 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2996 #ifdef NETMAP 2997 if (netmap_verbose) 2998 D("%s read HLREG 0x%x rxc 0x%x", 2999 onoff ? "enter" : "exit", hl, rxc); 3000 #endif 3001 /* hw requirements ... */ 3002 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 3003 rxc |= IXGBE_RDRXCTL_RSCACKC; 3004 if (onoff && !crcstrip) { 3005 /* keep the crc. Fast rx */ 3006 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 3007 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 3008 } else { 3009 /* reset default mode */ 3010 hl |= IXGBE_HLREG0_RXCRCSTRP; 3011 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 3012 } 3013 #ifdef NETMAP 3014 if (netmap_verbose) 3015 D("%s write HLREG 0x%x rxc 0x%x", 3016 onoff ? "enter" : "exit", hl, rxc); 3017 #endif 3018 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 3019 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 3020 } /* ixgbe_if_crcstrip_set */ 3021 3022 /********************************************************************* 3023 * ixgbe_if_init - Init entry point 3024 * 3025 * Used in two ways: It is used by the stack as an init 3026 * entry point in network interface structure. It is also 3027 * used by the driver as a hw/sw initialization routine to 3028 * get to a consistent state. 3029 * 3030 * Return 0 on success, positive on failure 3031 **********************************************************************/ 3032 void 3033 ixgbe_if_init(if_ctx_t ctx) 3034 { 3035 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3036 struct ifnet *ifp = iflib_get_ifp(ctx); 3037 device_t dev = iflib_get_dev(ctx); 3038 struct ixgbe_hw *hw = &sc->hw; 3039 struct ix_rx_queue *rx_que; 3040 struct ix_tx_queue *tx_que; 3041 u32 txdctl, mhadd; 3042 u32 rxdctl, rxctrl; 3043 u32 ctrl_ext; 3044 3045 int i, j, err; 3046 3047 INIT_DEBUGOUT("ixgbe_if_init: begin"); 3048 3049 /* Queue indices may change with IOV mode */ 3050 ixgbe_align_all_queue_indices(sc); 3051 3052 /* reprogram the RAR[0] in case user changed it. */ 3053 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 3054 3055 /* Get the latest mac address, User can use a LAA */ 3056 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3057 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 3058 hw->addr_ctrl.rar_used_count = 1; 3059 3060 ixgbe_init_hw(hw); 3061 3062 ixgbe_initialize_iov(sc); 3063 3064 ixgbe_initialize_transmit_units(ctx); 3065 3066 /* Setup Multicast table */ 3067 ixgbe_if_multi_set(ctx); 3068 3069 /* Determine the correct mbuf pool, based on frame size */ 3070 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 3071 3072 /* Configure RX settings */ 3073 ixgbe_initialize_receive_units(ctx); 3074 3075 /* 3076 * Initialize variable holding task enqueue requests 3077 * from MSI-X interrupts 3078 */ 3079 sc->task_requests = 0; 3080 3081 /* Enable SDP & MSI-X interrupts based on adapter */ 3082 ixgbe_config_gpie(sc); 3083 3084 /* Set MTU size */ 3085 if (ifp->if_mtu > ETHERMTU) { 3086 /* aka IXGBE_MAXFRS on 82599 and newer */ 3087 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3088 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3089 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3090 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3091 } 3092 3093 /* Now enable all the queues */ 3094 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 3095 struct tx_ring *txr = &tx_que->txr; 3096 3097 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3098 txdctl |= IXGBE_TXDCTL_ENABLE; 3099 /* Set WTHRESH to 8, burst writeback */ 3100 txdctl |= (8 << 16); 3101 /* 3102 * When the internal queue falls below PTHRESH (32), 3103 * start prefetching as long as there are at least 3104 * HTHRESH (1) buffers ready. The values are taken 3105 * from the Intel linux driver 3.8.21. 3106 * Prefetching enables tx line rate even with 1 queue. 3107 */ 3108 txdctl |= (32 << 0) | (1 << 8); 3109 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 3110 } 3111 3112 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 3113 struct rx_ring *rxr = &rx_que->rxr; 3114 3115 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3116 if (hw->mac.type == ixgbe_mac_82598EB) { 3117 /* 3118 * PTHRESH = 21 3119 * HTHRESH = 4 3120 * WTHRESH = 8 3121 */ 3122 rxdctl &= ~0x3FFFFF; 3123 rxdctl |= 0x080420; 3124 } 3125 rxdctl |= IXGBE_RXDCTL_ENABLE; 3126 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 3127 for (j = 0; j < 10; j++) { 3128 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 3129 IXGBE_RXDCTL_ENABLE) 3130 break; 3131 else 3132 msec_delay(1); 3133 } 3134 wmb(); 3135 } 3136 3137 /* Enable Receive engine */ 3138 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3139 if (hw->mac.type == ixgbe_mac_82598EB) 3140 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3141 rxctrl |= IXGBE_RXCTRL_RXEN; 3142 ixgbe_enable_rx_dma(hw, rxctrl); 3143 3144 /* Set up MSI/MSI-X routing */ 3145 if (ixgbe_enable_msix) { 3146 ixgbe_configure_ivars(sc); 3147 /* Set up auto-mask */ 3148 if (hw->mac.type == ixgbe_mac_82598EB) 3149 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3150 else { 3151 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3152 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3153 } 3154 } else { /* Simple settings for Legacy/MSI */ 3155 ixgbe_set_ivar(sc, 0, 0, 0); 3156 ixgbe_set_ivar(sc, 0, 0, 1); 3157 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3158 } 3159 3160 ixgbe_init_fdir(sc); 3161 3162 /* 3163 * Check on any SFP devices that 3164 * need to be kick-started 3165 */ 3166 if (hw->phy.type == ixgbe_phy_none) { 3167 err = hw->phy.ops.identify(hw); 3168 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3169 device_printf(dev, 3170 "Unsupported SFP+ module type was detected.\n"); 3171 return; 3172 } 3173 } 3174 3175 /* Set moderation on the Link interrupt */ 3176 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); 3177 3178 /* Enable power to the phy. */ 3179 ixgbe_set_phy_power(hw, true); 3180 3181 /* Config/Enable Link */ 3182 ixgbe_config_link(ctx); 3183 3184 /* Hardware Packet Buffer & Flow Control setup */ 3185 ixgbe_config_delay_values(sc); 3186 3187 /* Initialize the FC settings */ 3188 ixgbe_start_hw(hw); 3189 3190 /* Set up VLAN support and filter */ 3191 ixgbe_setup_vlan_hw_support(ctx); 3192 3193 /* Setup DMA Coalescing */ 3194 ixgbe_config_dmac(sc); 3195 3196 /* And now turn on interrupts */ 3197 ixgbe_if_enable_intr(ctx); 3198 3199 /* Enable the use of the MBX by the VF's */ 3200 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 3201 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3202 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3203 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3204 } 3205 3206 } /* ixgbe_init_locked */ 3207 3208 /************************************************************************ 3209 * ixgbe_set_ivar 3210 * 3211 * Setup the correct IVAR register for a particular MSI-X interrupt 3212 * (yes this is all very magic and confusing :) 3213 * - entry is the register array entry 3214 * - vector is the MSI-X vector for this queue 3215 * - type is RX/TX/MISC 3216 ************************************************************************/ 3217 static void 3218 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 3219 { 3220 struct ixgbe_hw *hw = &sc->hw; 3221 u32 ivar, index; 3222 3223 vector |= IXGBE_IVAR_ALLOC_VAL; 3224 3225 switch (hw->mac.type) { 3226 case ixgbe_mac_82598EB: 3227 if (type == -1) 3228 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3229 else 3230 entry += (type * 64); 3231 index = (entry >> 2) & 0x1F; 3232 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3233 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3234 ivar |= (vector << (8 * (entry & 0x3))); 3235 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 3236 break; 3237 case ixgbe_mac_82599EB: 3238 case ixgbe_mac_X540: 3239 case ixgbe_mac_X550: 3240 case ixgbe_mac_X550EM_x: 3241 case ixgbe_mac_X550EM_a: 3242 if (type == -1) { /* MISC IVAR */ 3243 index = (entry & 1) * 8; 3244 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3245 ivar &= ~(0xFF << index); 3246 ivar |= (vector << index); 3247 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3248 } else { /* RX/TX IVARS */ 3249 index = (16 * (entry & 1)) + (8 * type); 3250 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3251 ivar &= ~(0xFF << index); 3252 ivar |= (vector << index); 3253 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3254 } 3255 default: 3256 break; 3257 } 3258 } /* ixgbe_set_ivar */ 3259 3260 /************************************************************************ 3261 * ixgbe_configure_ivars 3262 ************************************************************************/ 3263 static void 3264 ixgbe_configure_ivars(struct ixgbe_softc *sc) 3265 { 3266 struct ix_rx_queue *rx_que = sc->rx_queues; 3267 struct ix_tx_queue *tx_que = sc->tx_queues; 3268 u32 newitr; 3269 3270 if (ixgbe_max_interrupt_rate > 0) 3271 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3272 else { 3273 /* 3274 * Disable DMA coalescing if interrupt moderation is 3275 * disabled. 3276 */ 3277 sc->dmac = 0; 3278 newitr = 0; 3279 } 3280 3281 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 3282 struct rx_ring *rxr = &rx_que->rxr; 3283 3284 /* First the RX queue entry */ 3285 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); 3286 3287 /* Set an Initial EITR value */ 3288 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); 3289 } 3290 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 3291 struct tx_ring *txr = &tx_que->txr; 3292 3293 /* ... and the TX */ 3294 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); 3295 } 3296 /* For the Link interrupt */ 3297 ixgbe_set_ivar(sc, 1, sc->vector, -1); 3298 } /* ixgbe_configure_ivars */ 3299 3300 /************************************************************************ 3301 * ixgbe_config_gpie 3302 ************************************************************************/ 3303 static void 3304 ixgbe_config_gpie(struct ixgbe_softc *sc) 3305 { 3306 struct ixgbe_hw *hw = &sc->hw; 3307 u32 gpie; 3308 3309 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3310 3311 if (sc->intr_type == IFLIB_INTR_MSIX) { 3312 /* Enable Enhanced MSI-X mode */ 3313 gpie |= IXGBE_GPIE_MSIX_MODE 3314 | IXGBE_GPIE_EIAME 3315 | IXGBE_GPIE_PBA_SUPPORT 3316 | IXGBE_GPIE_OCD; 3317 } 3318 3319 /* Fan Failure Interrupt */ 3320 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3321 gpie |= IXGBE_SDP1_GPIEN; 3322 3323 /* Thermal Sensor Interrupt */ 3324 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3325 gpie |= IXGBE_SDP0_GPIEN_X540; 3326 3327 /* Link detection */ 3328 switch (hw->mac.type) { 3329 case ixgbe_mac_82599EB: 3330 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3331 break; 3332 case ixgbe_mac_X550EM_x: 3333 case ixgbe_mac_X550EM_a: 3334 gpie |= IXGBE_SDP0_GPIEN_X540; 3335 break; 3336 default: 3337 break; 3338 } 3339 3340 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3341 3342 } /* ixgbe_config_gpie */ 3343 3344 /************************************************************************ 3345 * ixgbe_config_delay_values 3346 * 3347 * Requires sc->max_frame_size to be set. 3348 ************************************************************************/ 3349 static void 3350 ixgbe_config_delay_values(struct ixgbe_softc *sc) 3351 { 3352 struct ixgbe_hw *hw = &sc->hw; 3353 u32 rxpb, frame, size, tmp; 3354 3355 frame = sc->max_frame_size; 3356 3357 /* Calculate High Water */ 3358 switch (hw->mac.type) { 3359 case ixgbe_mac_X540: 3360 case ixgbe_mac_X550: 3361 case ixgbe_mac_X550EM_x: 3362 case ixgbe_mac_X550EM_a: 3363 tmp = IXGBE_DV_X540(frame, frame); 3364 break; 3365 default: 3366 tmp = IXGBE_DV(frame, frame); 3367 break; 3368 } 3369 size = IXGBE_BT2KB(tmp); 3370 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3371 hw->fc.high_water[0] = rxpb - size; 3372 3373 /* Now calculate Low Water */ 3374 switch (hw->mac.type) { 3375 case ixgbe_mac_X540: 3376 case ixgbe_mac_X550: 3377 case ixgbe_mac_X550EM_x: 3378 case ixgbe_mac_X550EM_a: 3379 tmp = IXGBE_LOW_DV_X540(frame); 3380 break; 3381 default: 3382 tmp = IXGBE_LOW_DV(frame); 3383 break; 3384 } 3385 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3386 3387 hw->fc.pause_time = IXGBE_FC_PAUSE; 3388 hw->fc.send_xon = true; 3389 } /* ixgbe_config_delay_values */ 3390 3391 /************************************************************************ 3392 * ixgbe_set_multi - Multicast Update 3393 * 3394 * Called whenever multicast address list is updated. 3395 ************************************************************************/ 3396 static u_int 3397 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) 3398 { 3399 struct ixgbe_softc *sc = arg; 3400 struct ixgbe_mc_addr *mta = sc->mta; 3401 3402 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 3403 return (0); 3404 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3405 mta[idx].vmdq = sc->pool; 3406 3407 return (1); 3408 } /* ixgbe_mc_filter_apply */ 3409 3410 static void 3411 ixgbe_if_multi_set(if_ctx_t ctx) 3412 { 3413 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3414 struct ixgbe_mc_addr *mta; 3415 struct ifnet *ifp = iflib_get_ifp(ctx); 3416 u8 *update_ptr; 3417 u32 fctrl; 3418 u_int mcnt; 3419 3420 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3421 3422 mta = sc->mta; 3423 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3424 3425 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc); 3426 3427 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3428 update_ptr = (u8 *)mta; 3429 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 3430 ixgbe_mc_array_itr, true); 3431 } 3432 3433 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 3434 3435 if (ifp->if_flags & IFF_PROMISC) 3436 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3437 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3438 ifp->if_flags & IFF_ALLMULTI) { 3439 fctrl |= IXGBE_FCTRL_MPE; 3440 fctrl &= ~IXGBE_FCTRL_UPE; 3441 } else 3442 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3443 3444 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 3445 } /* ixgbe_if_multi_set */ 3446 3447 /************************************************************************ 3448 * ixgbe_mc_array_itr 3449 * 3450 * An iterator function needed by the multicast shared code. 3451 * It feeds the shared code routine the addresses in the 3452 * array of ixgbe_set_multi() one by one. 3453 ************************************************************************/ 3454 static u8 * 3455 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3456 { 3457 struct ixgbe_mc_addr *mta; 3458 3459 mta = (struct ixgbe_mc_addr *)*update_ptr; 3460 *vmdq = mta->vmdq; 3461 3462 *update_ptr = (u8*)(mta + 1); 3463 3464 return (mta->addr); 3465 } /* ixgbe_mc_array_itr */ 3466 3467 /************************************************************************ 3468 * ixgbe_local_timer - Timer routine 3469 * 3470 * Checks for link status, updates statistics, 3471 * and runs the watchdog check. 3472 ************************************************************************/ 3473 static void 3474 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3475 { 3476 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3477 3478 if (qid != 0) 3479 return; 3480 3481 /* Check for pluggable optics */ 3482 if (sc->sfp_probe) 3483 if (!ixgbe_sfp_probe(ctx)) 3484 return; /* Nothing to do */ 3485 3486 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3487 3488 /* Fire off the adminq task */ 3489 iflib_admin_intr_deferred(ctx); 3490 3491 } /* ixgbe_if_timer */ 3492 3493 /************************************************************************ 3494 * ixgbe_sfp_probe 3495 * 3496 * Determine if a port had optics inserted. 3497 ************************************************************************/ 3498 static bool 3499 ixgbe_sfp_probe(if_ctx_t ctx) 3500 { 3501 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3502 struct ixgbe_hw *hw = &sc->hw; 3503 device_t dev = iflib_get_dev(ctx); 3504 bool result = false; 3505 3506 if ((hw->phy.type == ixgbe_phy_nl) && 3507 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3508 s32 ret = hw->phy.ops.identify_sfp(hw); 3509 if (ret) 3510 goto out; 3511 ret = hw->phy.ops.reset(hw); 3512 sc->sfp_probe = false; 3513 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3514 device_printf(dev, "Unsupported SFP+ module detected!"); 3515 device_printf(dev, 3516 "Reload driver with supported module.\n"); 3517 goto out; 3518 } else 3519 device_printf(dev, "SFP+ module detected!\n"); 3520 /* We now have supported optics */ 3521 result = true; 3522 } 3523 out: 3524 3525 return (result); 3526 } /* ixgbe_sfp_probe */ 3527 3528 /************************************************************************ 3529 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3530 ************************************************************************/ 3531 static void 3532 ixgbe_handle_mod(void *context) 3533 { 3534 if_ctx_t ctx = context; 3535 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3536 struct ixgbe_hw *hw = &sc->hw; 3537 device_t dev = iflib_get_dev(ctx); 3538 u32 err, cage_full = 0; 3539 3540 if (sc->hw.need_crosstalk_fix) { 3541 switch (hw->mac.type) { 3542 case ixgbe_mac_82599EB: 3543 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3544 IXGBE_ESDP_SDP2; 3545 break; 3546 case ixgbe_mac_X550EM_x: 3547 case ixgbe_mac_X550EM_a: 3548 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3549 IXGBE_ESDP_SDP0; 3550 break; 3551 default: 3552 break; 3553 } 3554 3555 if (!cage_full) 3556 goto handle_mod_out; 3557 } 3558 3559 err = hw->phy.ops.identify_sfp(hw); 3560 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3561 device_printf(dev, 3562 "Unsupported SFP+ module type was detected.\n"); 3563 goto handle_mod_out; 3564 } 3565 3566 if (hw->mac.type == ixgbe_mac_82598EB) 3567 err = hw->phy.ops.reset(hw); 3568 else 3569 err = hw->mac.ops.setup_sfp(hw); 3570 3571 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3572 device_printf(dev, 3573 "Setup failure - unsupported SFP+ module type.\n"); 3574 goto handle_mod_out; 3575 } 3576 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3577 return; 3578 3579 handle_mod_out: 3580 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3581 } /* ixgbe_handle_mod */ 3582 3583 3584 /************************************************************************ 3585 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3586 ************************************************************************/ 3587 static void 3588 ixgbe_handle_msf(void *context) 3589 { 3590 if_ctx_t ctx = context; 3591 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3592 struct ixgbe_hw *hw = &sc->hw; 3593 u32 autoneg; 3594 bool negotiate; 3595 3596 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3597 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 3598 3599 autoneg = hw->phy.autoneg_advertised; 3600 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3601 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3602 if (hw->mac.ops.setup_link) 3603 hw->mac.ops.setup_link(hw, autoneg, true); 3604 3605 /* Adjust media types shown in ifconfig */ 3606 ifmedia_removeall(sc->media); 3607 ixgbe_add_media_types(sc->ctx); 3608 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 3609 } /* ixgbe_handle_msf */ 3610 3611 /************************************************************************ 3612 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3613 ************************************************************************/ 3614 static void 3615 ixgbe_handle_phy(void *context) 3616 { 3617 if_ctx_t ctx = context; 3618 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3619 struct ixgbe_hw *hw = &sc->hw; 3620 int error; 3621 3622 error = hw->phy.ops.handle_lasi(hw); 3623 if (error == IXGBE_ERR_OVERTEMP) 3624 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3625 else if (error) 3626 device_printf(sc->dev, 3627 "Error handling LASI interrupt: %d\n", error); 3628 } /* ixgbe_handle_phy */ 3629 3630 /************************************************************************ 3631 * ixgbe_if_stop - Stop the hardware 3632 * 3633 * Disables all traffic on the adapter by issuing a 3634 * global reset on the MAC and deallocates TX/RX buffers. 3635 ************************************************************************/ 3636 static void 3637 ixgbe_if_stop(if_ctx_t ctx) 3638 { 3639 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3640 struct ixgbe_hw *hw = &sc->hw; 3641 3642 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3643 3644 ixgbe_reset_hw(hw); 3645 hw->adapter_stopped = false; 3646 ixgbe_stop_adapter(hw); 3647 if (hw->mac.type == ixgbe_mac_82599EB) 3648 ixgbe_stop_mac_link_on_d3_82599(hw); 3649 /* Turn off the laser - noop with no optics */ 3650 ixgbe_disable_tx_laser(hw); 3651 3652 /* Update the stack */ 3653 sc->link_up = false; 3654 ixgbe_if_update_admin_status(ctx); 3655 3656 /* reprogram the RAR[0] in case user changed it. */ 3657 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 3658 3659 return; 3660 } /* ixgbe_if_stop */ 3661 3662 /************************************************************************ 3663 * ixgbe_update_link_status - Update OS on link state 3664 * 3665 * Note: Only updates the OS on the cached link state. 3666 * The real check of the hardware only happens with 3667 * a link interrupt. 3668 ************************************************************************/ 3669 static void 3670 ixgbe_if_update_admin_status(if_ctx_t ctx) 3671 { 3672 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3673 device_t dev = iflib_get_dev(ctx); 3674 3675 if (sc->link_up) { 3676 if (sc->link_active == false) { 3677 if (bootverbose) 3678 device_printf(dev, "Link is up %d Gbps %s \n", 3679 ((sc->link_speed == 128) ? 10 : 1), 3680 "Full Duplex"); 3681 sc->link_active = true; 3682 /* Update any Flow Control changes */ 3683 ixgbe_fc_enable(&sc->hw); 3684 /* Update DMA coalescing config */ 3685 ixgbe_config_dmac(sc); 3686 /* should actually be negotiated value */ 3687 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3688 3689 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3690 ixgbe_ping_all_vfs(sc); 3691 } 3692 } else { /* Link down */ 3693 if (sc->link_active == true) { 3694 if (bootverbose) 3695 device_printf(dev, "Link is Down\n"); 3696 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3697 sc->link_active = false; 3698 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3699 ixgbe_ping_all_vfs(sc); 3700 } 3701 } 3702 3703 /* Handle task requests from msix_link() */ 3704 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) 3705 ixgbe_handle_mod(ctx); 3706 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) 3707 ixgbe_handle_msf(ctx); 3708 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) 3709 ixgbe_handle_mbx(ctx); 3710 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) 3711 ixgbe_reinit_fdir(ctx); 3712 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) 3713 ixgbe_handle_phy(ctx); 3714 sc->task_requests = 0; 3715 3716 ixgbe_update_stats_counters(sc); 3717 } /* ixgbe_if_update_admin_status */ 3718 3719 /************************************************************************ 3720 * ixgbe_config_dmac - Configure DMA Coalescing 3721 ************************************************************************/ 3722 static void 3723 ixgbe_config_dmac(struct ixgbe_softc *sc) 3724 { 3725 struct ixgbe_hw *hw = &sc->hw; 3726 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3727 3728 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3729 return; 3730 3731 if (dcfg->watchdog_timer ^ sc->dmac || 3732 dcfg->link_speed ^ sc->link_speed) { 3733 dcfg->watchdog_timer = sc->dmac; 3734 dcfg->fcoe_en = false; 3735 dcfg->link_speed = sc->link_speed; 3736 dcfg->num_tcs = 1; 3737 3738 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3739 dcfg->watchdog_timer, dcfg->link_speed); 3740 3741 hw->mac.ops.dmac_config(hw); 3742 } 3743 } /* ixgbe_config_dmac */ 3744 3745 /************************************************************************ 3746 * ixgbe_if_enable_intr 3747 ************************************************************************/ 3748 void 3749 ixgbe_if_enable_intr(if_ctx_t ctx) 3750 { 3751 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3752 struct ixgbe_hw *hw = &sc->hw; 3753 struct ix_rx_queue *que = sc->rx_queues; 3754 u32 mask, fwsm; 3755 3756 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3757 3758 switch (sc->hw.mac.type) { 3759 case ixgbe_mac_82599EB: 3760 mask |= IXGBE_EIMS_ECC; 3761 /* Temperature sensor on some scs */ 3762 mask |= IXGBE_EIMS_GPI_SDP0; 3763 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3764 mask |= IXGBE_EIMS_GPI_SDP1; 3765 mask |= IXGBE_EIMS_GPI_SDP2; 3766 break; 3767 case ixgbe_mac_X540: 3768 /* Detect if Thermal Sensor is enabled */ 3769 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3770 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3771 mask |= IXGBE_EIMS_TS; 3772 mask |= IXGBE_EIMS_ECC; 3773 break; 3774 case ixgbe_mac_X550: 3775 /* MAC thermal sensor is automatically enabled */ 3776 mask |= IXGBE_EIMS_TS; 3777 mask |= IXGBE_EIMS_ECC; 3778 break; 3779 case ixgbe_mac_X550EM_x: 3780 case ixgbe_mac_X550EM_a: 3781 /* Some devices use SDP0 for important information */ 3782 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3783 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3784 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3785 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3786 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3787 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3788 mask |= IXGBE_EICR_GPI_SDP0_X540; 3789 mask |= IXGBE_EIMS_ECC; 3790 break; 3791 default: 3792 break; 3793 } 3794 3795 /* Enable Fan Failure detection */ 3796 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3797 mask |= IXGBE_EIMS_GPI_SDP1; 3798 /* Enable SR-IOV */ 3799 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3800 mask |= IXGBE_EIMS_MAILBOX; 3801 /* Enable Flow Director */ 3802 if (sc->feat_en & IXGBE_FEATURE_FDIR) 3803 mask |= IXGBE_EIMS_FLOW_DIR; 3804 3805 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3806 3807 /* With MSI-X we use auto clear */ 3808 if (sc->intr_type == IFLIB_INTR_MSIX) { 3809 mask = IXGBE_EIMS_ENABLE_MASK; 3810 /* Don't autoclear Link */ 3811 mask &= ~IXGBE_EIMS_OTHER; 3812 mask &= ~IXGBE_EIMS_LSC; 3813 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 3814 mask &= ~IXGBE_EIMS_MAILBOX; 3815 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3816 } 3817 3818 /* 3819 * Now enable all queues, this is done separately to 3820 * allow for handling the extended (beyond 32) MSI-X 3821 * vectors that can be used by 82599 3822 */ 3823 for (int i = 0; i < sc->num_rx_queues; i++, que++) 3824 ixgbe_enable_queue(sc, que->msix); 3825 3826 IXGBE_WRITE_FLUSH(hw); 3827 3828 } /* ixgbe_if_enable_intr */ 3829 3830 /************************************************************************ 3831 * ixgbe_disable_intr 3832 ************************************************************************/ 3833 static void 3834 ixgbe_if_disable_intr(if_ctx_t ctx) 3835 { 3836 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3837 3838 if (sc->intr_type == IFLIB_INTR_MSIX) 3839 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 3840 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 3841 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 3842 } else { 3843 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 3844 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 3845 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 3846 } 3847 IXGBE_WRITE_FLUSH(&sc->hw); 3848 3849 } /* ixgbe_if_disable_intr */ 3850 3851 /************************************************************************ 3852 * ixgbe_link_intr_enable 3853 ************************************************************************/ 3854 static void 3855 ixgbe_link_intr_enable(if_ctx_t ctx) 3856 { 3857 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; 3858 3859 /* Re-enable other interrupts */ 3860 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3861 } /* ixgbe_link_intr_enable */ 3862 3863 /************************************************************************ 3864 * ixgbe_if_rx_queue_intr_enable 3865 ************************************************************************/ 3866 static int 3867 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3868 { 3869 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3870 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 3871 3872 ixgbe_enable_queue(sc, que->msix); 3873 3874 return (0); 3875 } /* ixgbe_if_rx_queue_intr_enable */ 3876 3877 /************************************************************************ 3878 * ixgbe_enable_queue 3879 ************************************************************************/ 3880 static void 3881 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 3882 { 3883 struct ixgbe_hw *hw = &sc->hw; 3884 u64 queue = 1ULL << vector; 3885 u32 mask; 3886 3887 if (hw->mac.type == ixgbe_mac_82598EB) { 3888 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3889 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3890 } else { 3891 mask = (queue & 0xFFFFFFFF); 3892 if (mask) 3893 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3894 mask = (queue >> 32); 3895 if (mask) 3896 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3897 } 3898 } /* ixgbe_enable_queue */ 3899 3900 /************************************************************************ 3901 * ixgbe_disable_queue 3902 ************************************************************************/ 3903 static void 3904 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 3905 { 3906 struct ixgbe_hw *hw = &sc->hw; 3907 u64 queue = 1ULL << vector; 3908 u32 mask; 3909 3910 if (hw->mac.type == ixgbe_mac_82598EB) { 3911 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3912 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3913 } else { 3914 mask = (queue & 0xFFFFFFFF); 3915 if (mask) 3916 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3917 mask = (queue >> 32); 3918 if (mask) 3919 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3920 } 3921 } /* ixgbe_disable_queue */ 3922 3923 /************************************************************************ 3924 * ixgbe_intr - Legacy Interrupt Service Routine 3925 ************************************************************************/ 3926 int 3927 ixgbe_intr(void *arg) 3928 { 3929 struct ixgbe_softc *sc = arg; 3930 struct ix_rx_queue *que = sc->rx_queues; 3931 struct ixgbe_hw *hw = &sc->hw; 3932 if_ctx_t ctx = sc->ctx; 3933 u32 eicr, eicr_mask; 3934 3935 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3936 3937 ++que->irqs; 3938 if (eicr == 0) { 3939 ixgbe_if_enable_intr(ctx); 3940 return (FILTER_HANDLED); 3941 } 3942 3943 /* Check for fan failure */ 3944 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3945 (eicr & IXGBE_EICR_GPI_SDP1)) { 3946 device_printf(sc->dev, 3947 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3948 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3949 } 3950 3951 /* Link status change */ 3952 if (eicr & IXGBE_EICR_LSC) { 3953 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3954 iflib_admin_intr_deferred(ctx); 3955 } 3956 3957 if (ixgbe_is_sfp(hw)) { 3958 /* Pluggable optics-related interrupt */ 3959 if (hw->mac.type >= ixgbe_mac_X540) 3960 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3961 else 3962 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3963 3964 if (eicr & eicr_mask) { 3965 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3966 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 3967 } 3968 3969 if ((hw->mac.type == ixgbe_mac_82599EB) && 3970 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3971 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3972 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3973 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3974 } 3975 } 3976 3977 /* External PHY interrupt */ 3978 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3979 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3980 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 3981 3982 return (FILTER_SCHEDULE_THREAD); 3983 } /* ixgbe_intr */ 3984 3985 /************************************************************************ 3986 * ixgbe_free_pci_resources 3987 ************************************************************************/ 3988 static void 3989 ixgbe_free_pci_resources(if_ctx_t ctx) 3990 { 3991 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3992 struct ix_rx_queue *que = sc->rx_queues; 3993 device_t dev = iflib_get_dev(ctx); 3994 3995 /* Release all MSI-X queue resources */ 3996 if (sc->intr_type == IFLIB_INTR_MSIX) 3997 iflib_irq_free(ctx, &sc->irq); 3998 3999 if (que != NULL) { 4000 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 4001 iflib_irq_free(ctx, &que->que_irq); 4002 } 4003 } 4004 4005 if (sc->pci_mem != NULL) 4006 bus_release_resource(dev, SYS_RES_MEMORY, 4007 rman_get_rid(sc->pci_mem), sc->pci_mem); 4008 } /* ixgbe_free_pci_resources */ 4009 4010 /************************************************************************ 4011 * ixgbe_sysctl_flowcntl 4012 * 4013 * SYSCTL wrapper around setting Flow Control 4014 ************************************************************************/ 4015 static int 4016 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 4017 { 4018 struct ixgbe_softc *sc; 4019 int error, fc; 4020 4021 sc = (struct ixgbe_softc *)arg1; 4022 fc = sc->hw.fc.current_mode; 4023 4024 error = sysctl_handle_int(oidp, &fc, 0, req); 4025 if ((error) || (req->newptr == NULL)) 4026 return (error); 4027 4028 /* Don't bother if it's not changed */ 4029 if (fc == sc->hw.fc.current_mode) 4030 return (0); 4031 4032 return ixgbe_set_flowcntl(sc, fc); 4033 } /* ixgbe_sysctl_flowcntl */ 4034 4035 /************************************************************************ 4036 * ixgbe_set_flowcntl - Set flow control 4037 * 4038 * Flow control values: 4039 * 0 - off 4040 * 1 - rx pause 4041 * 2 - tx pause 4042 * 3 - full 4043 ************************************************************************/ 4044 static int 4045 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 4046 { 4047 switch (fc) { 4048 case ixgbe_fc_rx_pause: 4049 case ixgbe_fc_tx_pause: 4050 case ixgbe_fc_full: 4051 sc->hw.fc.requested_mode = fc; 4052 if (sc->num_rx_queues > 1) 4053 ixgbe_disable_rx_drop(sc); 4054 break; 4055 case ixgbe_fc_none: 4056 sc->hw.fc.requested_mode = ixgbe_fc_none; 4057 if (sc->num_rx_queues > 1) 4058 ixgbe_enable_rx_drop(sc); 4059 break; 4060 default: 4061 return (EINVAL); 4062 } 4063 4064 /* Don't autoneg if forcing a value */ 4065 sc->hw.fc.disable_fc_autoneg = true; 4066 ixgbe_fc_enable(&sc->hw); 4067 4068 return (0); 4069 } /* ixgbe_set_flowcntl */ 4070 4071 /************************************************************************ 4072 * ixgbe_enable_rx_drop 4073 * 4074 * Enable the hardware to drop packets when the buffer is 4075 * full. This is useful with multiqueue, so that no single 4076 * queue being full stalls the entire RX engine. We only 4077 * enable this when Multiqueue is enabled AND Flow Control 4078 * is disabled. 4079 ************************************************************************/ 4080 static void 4081 ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 4082 { 4083 struct ixgbe_hw *hw = &sc->hw; 4084 struct rx_ring *rxr; 4085 u32 srrctl; 4086 4087 for (int i = 0; i < sc->num_rx_queues; i++) { 4088 rxr = &sc->rx_queues[i].rxr; 4089 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4090 srrctl |= IXGBE_SRRCTL_DROP_EN; 4091 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4092 } 4093 4094 /* enable drop for each vf */ 4095 for (int i = 0; i < sc->num_vfs; i++) { 4096 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4097 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4098 IXGBE_QDE_ENABLE)); 4099 } 4100 } /* ixgbe_enable_rx_drop */ 4101 4102 /************************************************************************ 4103 * ixgbe_disable_rx_drop 4104 ************************************************************************/ 4105 static void 4106 ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 4107 { 4108 struct ixgbe_hw *hw = &sc->hw; 4109 struct rx_ring *rxr; 4110 u32 srrctl; 4111 4112 for (int i = 0; i < sc->num_rx_queues; i++) { 4113 rxr = &sc->rx_queues[i].rxr; 4114 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4115 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4116 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4117 } 4118 4119 /* disable drop for each vf */ 4120 for (int i = 0; i < sc->num_vfs; i++) { 4121 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4122 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4123 } 4124 } /* ixgbe_disable_rx_drop */ 4125 4126 /************************************************************************ 4127 * ixgbe_sysctl_advertise 4128 * 4129 * SYSCTL wrapper around setting advertised speed 4130 ************************************************************************/ 4131 static int 4132 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4133 { 4134 struct ixgbe_softc *sc; 4135 int error, advertise; 4136 4137 sc = (struct ixgbe_softc *)arg1; 4138 advertise = sc->advertise; 4139 4140 error = sysctl_handle_int(oidp, &advertise, 0, req); 4141 if ((error) || (req->newptr == NULL)) 4142 return (error); 4143 4144 return ixgbe_set_advertise(sc, advertise); 4145 } /* ixgbe_sysctl_advertise */ 4146 4147 /************************************************************************ 4148 * ixgbe_set_advertise - Control advertised link speed 4149 * 4150 * Flags: 4151 * 0x1 - advertise 100 Mb 4152 * 0x2 - advertise 1G 4153 * 0x4 - advertise 10G 4154 * 0x8 - advertise 10 Mb (yes, Mb) 4155 * 0x10 - advertise 2.5G (disabled by default) 4156 * 0x20 - advertise 5G (disabled by default) 4157 * 4158 ************************************************************************/ 4159 static int 4160 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 4161 { 4162 device_t dev = iflib_get_dev(sc->ctx); 4163 struct ixgbe_hw *hw; 4164 ixgbe_link_speed speed = 0; 4165 ixgbe_link_speed link_caps = 0; 4166 s32 err = IXGBE_NOT_IMPLEMENTED; 4167 bool negotiate = false; 4168 4169 /* Checks to validate new value */ 4170 if (sc->advertise == advertise) /* no change */ 4171 return (0); 4172 4173 hw = &sc->hw; 4174 4175 /* No speed changes for backplane media */ 4176 if (hw->phy.media_type == ixgbe_media_type_backplane) 4177 return (ENODEV); 4178 4179 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4180 (hw->phy.multispeed_fiber))) { 4181 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4182 return (EINVAL); 4183 } 4184 4185 if (advertise < 0x1 || advertise > 0x3F) { 4186 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n"); 4187 return (EINVAL); 4188 } 4189 4190 if (hw->mac.ops.get_link_capabilities) { 4191 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4192 &negotiate); 4193 if (err != IXGBE_SUCCESS) { 4194 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4195 return (ENODEV); 4196 } 4197 } 4198 4199 /* Set new value and report new advertised mode */ 4200 if (advertise & 0x1) { 4201 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4202 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4203 return (EINVAL); 4204 } 4205 speed |= IXGBE_LINK_SPEED_100_FULL; 4206 } 4207 if (advertise & 0x2) { 4208 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4209 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4210 return (EINVAL); 4211 } 4212 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4213 } 4214 if (advertise & 0x4) { 4215 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4216 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4217 return (EINVAL); 4218 } 4219 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4220 } 4221 if (advertise & 0x8) { 4222 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4223 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4224 return (EINVAL); 4225 } 4226 speed |= IXGBE_LINK_SPEED_10_FULL; 4227 } 4228 if (advertise & 0x10) { 4229 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 4230 device_printf(dev, "Interface does not support 2.5G advertised speed\n"); 4231 return (EINVAL); 4232 } 4233 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 4234 } 4235 if (advertise & 0x20) { 4236 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 4237 device_printf(dev, "Interface does not support 5G advertised speed\n"); 4238 return (EINVAL); 4239 } 4240 speed |= IXGBE_LINK_SPEED_5GB_FULL; 4241 } 4242 4243 hw->mac.autotry_restart = true; 4244 hw->mac.ops.setup_link(hw, speed, true); 4245 sc->advertise = advertise; 4246 4247 return (0); 4248 } /* ixgbe_set_advertise */ 4249 4250 /************************************************************************ 4251 * ixgbe_get_default_advertise - Get default advertised speed settings 4252 * 4253 * Formatted for sysctl usage. 4254 * Flags: 4255 * 0x1 - advertise 100 Mb 4256 * 0x2 - advertise 1G 4257 * 0x4 - advertise 10G 4258 * 0x8 - advertise 10 Mb (yes, Mb) 4259 * 0x10 - advertise 2.5G (disabled by default) 4260 * 0x20 - advertise 5G (disabled by default) 4261 ************************************************************************/ 4262 static int 4263 ixgbe_get_default_advertise(struct ixgbe_softc *sc) 4264 { 4265 struct ixgbe_hw *hw = &sc->hw; 4266 int speed; 4267 ixgbe_link_speed link_caps = 0; 4268 s32 err; 4269 bool negotiate = false; 4270 4271 /* 4272 * Advertised speed means nothing unless it's copper or 4273 * multi-speed fiber 4274 */ 4275 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4276 !(hw->phy.multispeed_fiber)) 4277 return (0); 4278 4279 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4280 if (err != IXGBE_SUCCESS) 4281 return (0); 4282 4283 if (hw->mac.type == ixgbe_mac_X550) { 4284 /* 4285 * 2.5G and 5G autonegotiation speeds on X550 4286 * are disabled by default due to reported 4287 * interoperability issues with some switches. 4288 */ 4289 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 4290 IXGBE_LINK_SPEED_5GB_FULL); 4291 } 4292 4293 speed = 4294 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 4295 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 4296 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 4297 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 4298 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 4299 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 4300 4301 return speed; 4302 } /* ixgbe_get_default_advertise */ 4303 4304 /************************************************************************ 4305 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4306 * 4307 * Control values: 4308 * 0/1 - off / on (use default value of 1000) 4309 * 4310 * Legal timer values are: 4311 * 50,100,250,500,1000,2000,5000,10000 4312 * 4313 * Turning off interrupt moderation will also turn this off. 4314 ************************************************************************/ 4315 static int 4316 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4317 { 4318 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4319 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4320 int error; 4321 u16 newval; 4322 4323 newval = sc->dmac; 4324 error = sysctl_handle_16(oidp, &newval, 0, req); 4325 if ((error) || (req->newptr == NULL)) 4326 return (error); 4327 4328 switch (newval) { 4329 case 0: 4330 /* Disabled */ 4331 sc->dmac = 0; 4332 break; 4333 case 1: 4334 /* Enable and use default */ 4335 sc->dmac = 1000; 4336 break; 4337 case 50: 4338 case 100: 4339 case 250: 4340 case 500: 4341 case 1000: 4342 case 2000: 4343 case 5000: 4344 case 10000: 4345 /* Legal values - allow */ 4346 sc->dmac = newval; 4347 break; 4348 default: 4349 /* Do nothing, illegal value */ 4350 return (EINVAL); 4351 } 4352 4353 /* Re-initialize hardware if it's already running */ 4354 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4355 ifp->if_init(ifp); 4356 4357 return (0); 4358 } /* ixgbe_sysctl_dmac */ 4359 4360 #ifdef IXGBE_DEBUG 4361 /************************************************************************ 4362 * ixgbe_sysctl_power_state 4363 * 4364 * Sysctl to test power states 4365 * Values: 4366 * 0 - set device to D0 4367 * 3 - set device to D3 4368 * (none) - get current device power state 4369 ************************************************************************/ 4370 static int 4371 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4372 { 4373 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4374 device_t dev = sc->dev; 4375 int curr_ps, new_ps, error = 0; 4376 4377 curr_ps = new_ps = pci_get_powerstate(dev); 4378 4379 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4380 if ((error) || (req->newptr == NULL)) 4381 return (error); 4382 4383 if (new_ps == curr_ps) 4384 return (0); 4385 4386 if (new_ps == 3 && curr_ps == 0) 4387 error = DEVICE_SUSPEND(dev); 4388 else if (new_ps == 0 && curr_ps == 3) 4389 error = DEVICE_RESUME(dev); 4390 else 4391 return (EINVAL); 4392 4393 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4394 4395 return (error); 4396 } /* ixgbe_sysctl_power_state */ 4397 #endif 4398 4399 /************************************************************************ 4400 * ixgbe_sysctl_wol_enable 4401 * 4402 * Sysctl to enable/disable the WoL capability, 4403 * if supported by the adapter. 4404 * 4405 * Values: 4406 * 0 - disabled 4407 * 1 - enabled 4408 ************************************************************************/ 4409 static int 4410 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4411 { 4412 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4413 struct ixgbe_hw *hw = &sc->hw; 4414 int new_wol_enabled; 4415 int error = 0; 4416 4417 new_wol_enabled = hw->wol_enabled; 4418 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4419 if ((error) || (req->newptr == NULL)) 4420 return (error); 4421 new_wol_enabled = !!(new_wol_enabled); 4422 if (new_wol_enabled == hw->wol_enabled) 4423 return (0); 4424 4425 if (new_wol_enabled > 0 && !sc->wol_support) 4426 return (ENODEV); 4427 else 4428 hw->wol_enabled = new_wol_enabled; 4429 4430 return (0); 4431 } /* ixgbe_sysctl_wol_enable */ 4432 4433 /************************************************************************ 4434 * ixgbe_sysctl_wufc - Wake Up Filter Control 4435 * 4436 * Sysctl to enable/disable the types of packets that the 4437 * adapter will wake up on upon receipt. 4438 * Flags: 4439 * 0x1 - Link Status Change 4440 * 0x2 - Magic Packet 4441 * 0x4 - Direct Exact 4442 * 0x8 - Directed Multicast 4443 * 0x10 - Broadcast 4444 * 0x20 - ARP/IPv4 Request Packet 4445 * 0x40 - Direct IPv4 Packet 4446 * 0x80 - Direct IPv6 Packet 4447 * 4448 * Settings not listed above will cause the sysctl to return an error. 4449 ************************************************************************/ 4450 static int 4451 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4452 { 4453 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4454 int error = 0; 4455 u32 new_wufc; 4456 4457 new_wufc = sc->wufc; 4458 4459 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4460 if ((error) || (req->newptr == NULL)) 4461 return (error); 4462 if (new_wufc == sc->wufc) 4463 return (0); 4464 4465 if (new_wufc & 0xffffff00) 4466 return (EINVAL); 4467 4468 new_wufc &= 0xff; 4469 new_wufc |= (0xffffff & sc->wufc); 4470 sc->wufc = new_wufc; 4471 4472 return (0); 4473 } /* ixgbe_sysctl_wufc */ 4474 4475 #ifdef IXGBE_DEBUG 4476 /************************************************************************ 4477 * ixgbe_sysctl_print_rss_config 4478 ************************************************************************/ 4479 static int 4480 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4481 { 4482 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4483 struct ixgbe_hw *hw = &sc->hw; 4484 device_t dev = sc->dev; 4485 struct sbuf *buf; 4486 int error = 0, reta_size; 4487 u32 reg; 4488 4489 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4490 if (!buf) { 4491 device_printf(dev, "Could not allocate sbuf for output.\n"); 4492 return (ENOMEM); 4493 } 4494 4495 // TODO: use sbufs to make a string to print out 4496 /* Set multiplier for RETA setup and table size based on MAC */ 4497 switch (sc->hw.mac.type) { 4498 case ixgbe_mac_X550: 4499 case ixgbe_mac_X550EM_x: 4500 case ixgbe_mac_X550EM_a: 4501 reta_size = 128; 4502 break; 4503 default: 4504 reta_size = 32; 4505 break; 4506 } 4507 4508 /* Print out the redirection table */ 4509 sbuf_cat(buf, "\n"); 4510 for (int i = 0; i < reta_size; i++) { 4511 if (i < 32) { 4512 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4513 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4514 } else { 4515 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4516 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4517 } 4518 } 4519 4520 // TODO: print more config 4521 4522 error = sbuf_finish(buf); 4523 if (error) 4524 device_printf(dev, "Error finishing sbuf: %d\n", error); 4525 4526 sbuf_delete(buf); 4527 4528 return (0); 4529 } /* ixgbe_sysctl_print_rss_config */ 4530 #endif /* IXGBE_DEBUG */ 4531 4532 /************************************************************************ 4533 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4534 * 4535 * For X552/X557-AT devices using an external PHY 4536 ************************************************************************/ 4537 static int 4538 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4539 { 4540 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4541 struct ixgbe_hw *hw = &sc->hw; 4542 u16 reg; 4543 4544 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4545 device_printf(iflib_get_dev(sc->ctx), 4546 "Device has no supported external thermal sensor.\n"); 4547 return (ENODEV); 4548 } 4549 4550 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4551 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4552 device_printf(iflib_get_dev(sc->ctx), 4553 "Error reading from PHY's current temperature register\n"); 4554 return (EAGAIN); 4555 } 4556 4557 /* Shift temp for output */ 4558 reg = reg >> 8; 4559 4560 return (sysctl_handle_16(oidp, NULL, reg, req)); 4561 } /* ixgbe_sysctl_phy_temp */ 4562 4563 /************************************************************************ 4564 * ixgbe_sysctl_phy_overtemp_occurred 4565 * 4566 * Reports (directly from the PHY) whether the current PHY 4567 * temperature is over the overtemp threshold. 4568 ************************************************************************/ 4569 static int 4570 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4571 { 4572 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4573 struct ixgbe_hw *hw = &sc->hw; 4574 u16 reg; 4575 4576 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4577 device_printf(iflib_get_dev(sc->ctx), 4578 "Device has no supported external thermal sensor.\n"); 4579 return (ENODEV); 4580 } 4581 4582 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4583 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4584 device_printf(iflib_get_dev(sc->ctx), 4585 "Error reading from PHY's temperature status register\n"); 4586 return (EAGAIN); 4587 } 4588 4589 /* Get occurrence bit */ 4590 reg = !!(reg & 0x4000); 4591 4592 return (sysctl_handle_16(oidp, 0, reg, req)); 4593 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4594 4595 /************************************************************************ 4596 * ixgbe_sysctl_eee_state 4597 * 4598 * Sysctl to set EEE power saving feature 4599 * Values: 4600 * 0 - disable EEE 4601 * 1 - enable EEE 4602 * (none) - get current device EEE state 4603 ************************************************************************/ 4604 static int 4605 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4606 { 4607 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4608 device_t dev = sc->dev; 4609 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4610 int curr_eee, new_eee, error = 0; 4611 s32 retval; 4612 4613 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 4614 4615 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4616 if ((error) || (req->newptr == NULL)) 4617 return (error); 4618 4619 /* Nothing to do */ 4620 if (new_eee == curr_eee) 4621 return (0); 4622 4623 /* Not supported */ 4624 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 4625 return (EINVAL); 4626 4627 /* Bounds checking */ 4628 if ((new_eee < 0) || (new_eee > 1)) 4629 return (EINVAL); 4630 4631 retval = ixgbe_setup_eee(&sc->hw, new_eee); 4632 if (retval) { 4633 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4634 return (EINVAL); 4635 } 4636 4637 /* Restart auto-neg */ 4638 ifp->if_init(ifp); 4639 4640 device_printf(dev, "New EEE state: %d\n", new_eee); 4641 4642 /* Cache new value */ 4643 if (new_eee) 4644 sc->feat_en |= IXGBE_FEATURE_EEE; 4645 else 4646 sc->feat_en &= ~IXGBE_FEATURE_EEE; 4647 4648 return (error); 4649 } /* ixgbe_sysctl_eee_state */ 4650 4651 /************************************************************************ 4652 * ixgbe_init_device_features 4653 ************************************************************************/ 4654 static void 4655 ixgbe_init_device_features(struct ixgbe_softc *sc) 4656 { 4657 sc->feat_cap = IXGBE_FEATURE_NETMAP 4658 | IXGBE_FEATURE_RSS 4659 | IXGBE_FEATURE_MSI 4660 | IXGBE_FEATURE_MSIX 4661 | IXGBE_FEATURE_LEGACY_IRQ; 4662 4663 /* Set capabilities first... */ 4664 switch (sc->hw.mac.type) { 4665 case ixgbe_mac_82598EB: 4666 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 4667 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4668 break; 4669 case ixgbe_mac_X540: 4670 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4671 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4672 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4673 (sc->hw.bus.func == 0)) 4674 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4675 break; 4676 case ixgbe_mac_X550: 4677 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4678 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4679 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4680 break; 4681 case ixgbe_mac_X550EM_x: 4682 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4683 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4684 break; 4685 case ixgbe_mac_X550EM_a: 4686 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4687 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4688 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4689 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4690 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4691 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4692 sc->feat_cap |= IXGBE_FEATURE_EEE; 4693 } 4694 break; 4695 case ixgbe_mac_82599EB: 4696 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4697 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4698 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4699 (sc->hw.bus.func == 0)) 4700 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4701 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4702 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4703 break; 4704 default: 4705 break; 4706 } 4707 4708 /* Enabled by default... */ 4709 /* Fan failure detection */ 4710 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4711 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4712 /* Netmap */ 4713 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 4714 sc->feat_en |= IXGBE_FEATURE_NETMAP; 4715 /* EEE */ 4716 if (sc->feat_cap & IXGBE_FEATURE_EEE) 4717 sc->feat_en |= IXGBE_FEATURE_EEE; 4718 /* Thermal Sensor */ 4719 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4720 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4721 4722 /* Enabled via global sysctl... */ 4723 /* Flow Director */ 4724 if (ixgbe_enable_fdir) { 4725 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 4726 sc->feat_en |= IXGBE_FEATURE_FDIR; 4727 else 4728 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled."); 4729 } 4730 /* 4731 * Message Signal Interrupts - Extended (MSI-X) 4732 * Normal MSI is only enabled if MSI-X calls fail. 4733 */ 4734 if (!ixgbe_enable_msix) 4735 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 4736 /* Receive-Side Scaling (RSS) */ 4737 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4738 sc->feat_en |= IXGBE_FEATURE_RSS; 4739 4740 /* Disable features with unmet dependencies... */ 4741 /* No MSI-X */ 4742 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 4743 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 4744 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4745 sc->feat_en &= ~IXGBE_FEATURE_RSS; 4746 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 4747 } 4748 } /* ixgbe_init_device_features */ 4749 4750 /************************************************************************ 4751 * ixgbe_check_fan_failure 4752 ************************************************************************/ 4753 static void 4754 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 4755 { 4756 u32 mask; 4757 4758 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 4759 IXGBE_ESDP_SDP1; 4760 4761 if (reg & mask) 4762 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4763 } /* ixgbe_check_fan_failure */ 4764 4765 /************************************************************************ 4766 * ixgbe_sbuf_fw_version 4767 ************************************************************************/ 4768 static void 4769 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) 4770 { 4771 struct ixgbe_nvm_version nvm_ver = {0}; 4772 uint16_t phyfw = 0; 4773 int status; 4774 const char *space = ""; 4775 4776 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ 4777 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ 4778 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ 4779 status = ixgbe_get_phy_firmware_version(hw, &phyfw); 4780 4781 if (nvm_ver.oem_valid) { 4782 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, 4783 nvm_ver.oem_minor, nvm_ver.oem_release); 4784 space = " "; 4785 } 4786 4787 if (nvm_ver.or_valid) { 4788 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 4789 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); 4790 space = " "; 4791 } 4792 4793 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | 4794 NVM_VER_INVALID)) { 4795 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); 4796 space = " "; 4797 } 4798 4799 if (phyfw != 0 && status == IXGBE_SUCCESS) 4800 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); 4801 } /* ixgbe_sbuf_fw_version */ 4802 4803 /************************************************************************ 4804 * ixgbe_print_fw_version 4805 ************************************************************************/ 4806 static void 4807 ixgbe_print_fw_version(if_ctx_t ctx) 4808 { 4809 struct ixgbe_softc *sc = iflib_get_softc(ctx); 4810 struct ixgbe_hw *hw = &sc->hw; 4811 device_t dev = sc->dev; 4812 struct sbuf *buf; 4813 int error = 0; 4814 4815 buf = sbuf_new_auto(); 4816 if (!buf) { 4817 device_printf(dev, "Could not allocate sbuf for output.\n"); 4818 return; 4819 } 4820 4821 ixgbe_sbuf_fw_version(hw, buf); 4822 4823 error = sbuf_finish(buf); 4824 if (error) 4825 device_printf(dev, "Error finishing sbuf: %d\n", error); 4826 else if (sbuf_len(buf)) 4827 device_printf(dev, "%s\n", sbuf_data(buf)); 4828 4829 sbuf_delete(buf); 4830 } /* ixgbe_print_fw_version */ 4831 4832 /************************************************************************ 4833 * ixgbe_sysctl_print_fw_version 4834 ************************************************************************/ 4835 static int 4836 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 4837 { 4838 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4839 struct ixgbe_hw *hw = &sc->hw; 4840 device_t dev = sc->dev; 4841 struct sbuf *buf; 4842 int error = 0; 4843 4844 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4845 if (!buf) { 4846 device_printf(dev, "Could not allocate sbuf for output.\n"); 4847 return (ENOMEM); 4848 } 4849 4850 ixgbe_sbuf_fw_version(hw, buf); 4851 4852 error = sbuf_finish(buf); 4853 if (error) 4854 device_printf(dev, "Error finishing sbuf: %d\n", error); 4855 4856 sbuf_delete(buf); 4857 4858 return (0); 4859 } /* ixgbe_sysctl_print_fw_version */ 4860