1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_rss.h" 38 39 #include "ixgbe.h" 40 #include "ixgbe_sriov.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixgbe_driver_version[] = "4.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixgbe_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixgbe_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), 106 /* required last entry */ 107 PVID_END 108 }; 109 110 static void *ixgbe_register(device_t); 111 static int ixgbe_if_attach_pre(if_ctx_t); 112 static int ixgbe_if_attach_post(if_ctx_t); 113 static int ixgbe_if_detach(if_ctx_t); 114 static int ixgbe_if_shutdown(if_ctx_t); 115 static int ixgbe_if_suspend(if_ctx_t); 116 static int ixgbe_if_resume(if_ctx_t); 117 118 static void ixgbe_if_stop(if_ctx_t); 119 void ixgbe_if_enable_intr(if_ctx_t); 120 static void ixgbe_if_disable_intr(if_ctx_t); 121 static void ixgbe_link_intr_enable(if_ctx_t); 122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); 124 static int ixgbe_if_media_change(if_ctx_t); 125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); 127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); 128 static void ixgbe_if_multi_set(if_ctx_t); 129 static int ixgbe_if_promisc_set(if_ctx_t, int); 130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 132 static void ixgbe_if_queues_free(if_ctx_t); 133 static void ixgbe_if_timer(if_ctx_t, uint16_t); 134 static void ixgbe_if_update_admin_status(if_ctx_t); 135 static void ixgbe_if_vlan_register(if_ctx_t, u16); 136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16); 137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); 138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 139 int ixgbe_intr(void *); 140 141 /************************************************************************ 142 * Function prototypes 143 ************************************************************************/ 144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 145 146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32); 147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32); 148 static void ixgbe_add_device_sysctls(if_ctx_t); 149 static int ixgbe_allocate_pci_resources(if_ctx_t); 150 static int ixgbe_setup_low_power_mode(if_ctx_t); 151 152 static void ixgbe_config_dmac(struct ixgbe_softc *); 153 static void ixgbe_configure_ivars(struct ixgbe_softc *); 154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 156 static bool ixgbe_sfp_probe(if_ctx_t); 157 158 static void ixgbe_free_pci_resources(if_ctx_t); 159 160 static int ixgbe_msix_link(void *); 161 static int ixgbe_msix_que(void *); 162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 163 static void ixgbe_initialize_receive_units(if_ctx_t); 164 static void ixgbe_initialize_transmit_units(if_ctx_t); 165 166 static int ixgbe_setup_interface(if_ctx_t); 167 static void ixgbe_init_device_features(struct ixgbe_softc *); 168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); 170 static void ixgbe_print_fw_version(if_ctx_t); 171 static void ixgbe_add_media_types(if_ctx_t); 172 static void ixgbe_update_stats_counters(struct ixgbe_softc *); 173 static void ixgbe_config_link(if_ctx_t); 174 static void ixgbe_get_slot_info(struct ixgbe_softc *); 175 static void ixgbe_check_wol_support(struct ixgbe_softc *); 176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 178 179 static void ixgbe_add_hw_stats(struct ixgbe_softc *); 180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 181 static int ixgbe_set_advertise(struct ixgbe_softc *, int); 182 static int ixgbe_get_default_advertise(struct ixgbe_softc *); 183 static void ixgbe_setup_vlan_hw_support(if_ctx_t); 184 static void ixgbe_config_gpie(struct ixgbe_softc *); 185 static void ixgbe_config_delay_values(struct ixgbe_softc *); 186 187 /* Sysctl handlers */ 188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 195 #ifdef IXGBE_DEBUG 196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 198 #endif 199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 206 207 /* Deferred interrupt tasklets */ 208 static void ixgbe_handle_msf(void *); 209 static void ixgbe_handle_mod(void *); 210 static void ixgbe_handle_phy(void *); 211 212 /************************************************************************ 213 * FreeBSD Device Interface Entry Points 214 ************************************************************************/ 215 static device_method_t ix_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_register, ixgbe_register), 218 DEVMETHOD(device_probe, iflib_device_probe), 219 DEVMETHOD(device_attach, iflib_device_attach), 220 DEVMETHOD(device_detach, iflib_device_detach), 221 DEVMETHOD(device_shutdown, iflib_device_shutdown), 222 DEVMETHOD(device_suspend, iflib_device_suspend), 223 DEVMETHOD(device_resume, iflib_device_resume), 224 #ifdef PCI_IOV 225 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 228 #endif /* PCI_IOV */ 229 DEVMETHOD_END 230 }; 231 232 static driver_t ix_driver = { 233 "ix", ix_methods, sizeof(struct ixgbe_softc), 234 }; 235 236 DRIVER_MODULE(ix, pci, ix_driver, 0, 0); 237 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 238 MODULE_DEPEND(ix, pci, 1, 1, 1); 239 MODULE_DEPEND(ix, ether, 1, 1, 1); 240 MODULE_DEPEND(ix, iflib, 1, 1, 1); 241 242 static device_method_t ixgbe_if_methods[] = { 243 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 244 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 245 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 246 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 247 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 248 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 249 DEVMETHOD(ifdi_init, ixgbe_if_init), 250 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 251 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 252 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 253 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 254 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 255 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 256 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 257 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 258 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 259 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 260 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 261 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 262 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 263 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 264 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 265 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 266 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 267 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 268 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 269 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 270 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 271 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 272 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), 273 #ifdef PCI_IOV 274 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 275 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 276 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 277 #endif /* PCI_IOV */ 278 DEVMETHOD_END 279 }; 280 281 /* 282 * TUNEABLE PARAMETERS: 283 */ 284 285 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 286 "IXGBE driver parameters"); 287 static driver_t ixgbe_if_driver = { 288 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) 289 }; 290 291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 293 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 294 295 /* Flow control setting, default to full */ 296 static int ixgbe_flow_control = ixgbe_fc_full; 297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 298 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 299 300 /* Advertise Speed, default to 0 (auto) */ 301 static int ixgbe_advertise_speed = 0; 302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 304 305 /* 306 * Smart speed setting, default to on 307 * this only works as a compile option 308 * right now as its during attach, set 309 * this to 'ixgbe_smart_speed_off' to 310 * disable. 311 */ 312 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 313 314 /* 315 * MSI-X should be the default for best performance, 316 * but this allows it to be forced off for testing. 317 */ 318 static int ixgbe_enable_msix = 1; 319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 320 "Enable MSI-X interrupts"); 321 322 /* 323 * Defining this on will allow the use 324 * of unsupported SFP+ modules, note that 325 * doing so you are on your own :) 326 */ 327 static int allow_unsupported_sfp = false; 328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 329 &allow_unsupported_sfp, 0, 330 "Allow unsupported SFP modules...use at your own risk"); 331 332 /* 333 * Not sure if Flow Director is fully baked, 334 * so we'll default to turning it off. 335 */ 336 static int ixgbe_enable_fdir = 0; 337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 338 "Enable Flow Director"); 339 340 /* Receive-Side Scaling */ 341 static int ixgbe_enable_rss = 1; 342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 343 "Enable Receive-Side Scaling (RSS)"); 344 345 /* 346 * AIM: Adaptive Interrupt Moderation 347 * which means that the interrupt rate 348 * is varied over time based on the 349 * traffic for that interrupt vector 350 */ 351 static int ixgbe_enable_aim = false; 352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 353 "Enable adaptive interrupt moderation"); 354 355 #if 0 356 /* Keep running tab on them for sanity check */ 357 static int ixgbe_total_ports; 358 #endif 359 360 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 361 362 /* 363 * For Flow Director: this is the number of TX packets we sample 364 * for the filter pool, this means every 20th packet will be probed. 365 * 366 * This feature can be disabled by setting this to 0. 367 */ 368 static int atr_sample_rate = 20; 369 370 extern struct if_txrx ixgbe_txrx; 371 372 static struct if_shared_ctx ixgbe_sctx_init = { 373 .isc_magic = IFLIB_MAGIC, 374 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 375 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 376 .isc_tx_maxsegsize = PAGE_SIZE, 377 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 378 .isc_tso_maxsegsize = PAGE_SIZE, 379 .isc_rx_maxsize = PAGE_SIZE*4, 380 .isc_rx_nsegments = 1, 381 .isc_rx_maxsegsize = PAGE_SIZE*4, 382 .isc_nfl = 1, 383 .isc_ntxqs = 1, 384 .isc_nrxqs = 1, 385 386 .isc_admin_intrcnt = 1, 387 .isc_vendor_info = ixgbe_vendor_info_array, 388 .isc_driver_version = ixgbe_driver_version, 389 .isc_driver = &ixgbe_if_driver, 390 .isc_flags = IFLIB_TSO_INIT_IP, 391 392 .isc_nrxd_min = {MIN_RXD}, 393 .isc_ntxd_min = {MIN_TXD}, 394 .isc_nrxd_max = {MAX_RXD}, 395 .isc_ntxd_max = {MAX_TXD}, 396 .isc_nrxd_default = {DEFAULT_RXD}, 397 .isc_ntxd_default = {DEFAULT_TXD}, 398 }; 399 400 /************************************************************************ 401 * ixgbe_if_tx_queues_alloc 402 ************************************************************************/ 403 static int 404 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 405 int ntxqs, int ntxqsets) 406 { 407 struct ixgbe_softc *sc = iflib_get_softc(ctx); 408 if_softc_ctx_t scctx = sc->shared; 409 struct ix_tx_queue *que; 410 int i, j, error; 411 412 MPASS(sc->num_tx_queues > 0); 413 MPASS(sc->num_tx_queues == ntxqsets); 414 MPASS(ntxqs == 1); 415 416 /* Allocate queue structure memory */ 417 sc->tx_queues = 418 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 419 M_IXGBE, M_NOWAIT | M_ZERO); 420 if (!sc->tx_queues) { 421 device_printf(iflib_get_dev(ctx), 422 "Unable to allocate TX ring memory\n"); 423 return (ENOMEM); 424 } 425 426 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 427 struct tx_ring *txr = &que->txr; 428 429 /* In case SR-IOV is enabled, align the index properly */ 430 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 431 i); 432 433 txr->sc = que->sc = sc; 434 435 /* Allocate report status array */ 436 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 437 if (txr->tx_rsq == NULL) { 438 error = ENOMEM; 439 goto fail; 440 } 441 for (j = 0; j < scctx->isc_ntxd[0]; j++) 442 txr->tx_rsq[j] = QIDX_INVALID; 443 /* get the virtual and physical address of the hardware queues */ 444 txr->tail = IXGBE_TDT(txr->me); 445 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 446 txr->tx_paddr = paddrs[i]; 447 448 txr->bytes = 0; 449 txr->total_packets = 0; 450 451 /* Set the rate at which we sample packets */ 452 if (sc->feat_en & IXGBE_FEATURE_FDIR) 453 txr->atr_sample = atr_sample_rate; 454 455 } 456 457 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 458 sc->num_tx_queues); 459 460 return (0); 461 462 fail: 463 ixgbe_if_queues_free(ctx); 464 465 return (error); 466 } /* ixgbe_if_tx_queues_alloc */ 467 468 /************************************************************************ 469 * ixgbe_if_rx_queues_alloc 470 ************************************************************************/ 471 static int 472 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 473 int nrxqs, int nrxqsets) 474 { 475 struct ixgbe_softc *sc = iflib_get_softc(ctx); 476 struct ix_rx_queue *que; 477 int i; 478 479 MPASS(sc->num_rx_queues > 0); 480 MPASS(sc->num_rx_queues == nrxqsets); 481 MPASS(nrxqs == 1); 482 483 /* Allocate queue structure memory */ 484 sc->rx_queues = 485 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 486 M_IXGBE, M_NOWAIT | M_ZERO); 487 if (!sc->rx_queues) { 488 device_printf(iflib_get_dev(ctx), 489 "Unable to allocate TX ring memory\n"); 490 return (ENOMEM); 491 } 492 493 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 494 struct rx_ring *rxr = &que->rxr; 495 496 /* In case SR-IOV is enabled, align the index properly */ 497 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 498 i); 499 500 rxr->sc = que->sc = sc; 501 502 /* get the virtual and physical address of the hw queues */ 503 rxr->tail = IXGBE_RDT(rxr->me); 504 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 505 rxr->rx_paddr = paddrs[i]; 506 rxr->bytes = 0; 507 rxr->que = que; 508 } 509 510 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 511 sc->num_rx_queues); 512 513 return (0); 514 } /* ixgbe_if_rx_queues_alloc */ 515 516 /************************************************************************ 517 * ixgbe_if_queues_free 518 ************************************************************************/ 519 static void 520 ixgbe_if_queues_free(if_ctx_t ctx) 521 { 522 struct ixgbe_softc *sc = iflib_get_softc(ctx); 523 struct ix_tx_queue *tx_que = sc->tx_queues; 524 struct ix_rx_queue *rx_que = sc->rx_queues; 525 int i; 526 527 if (tx_que != NULL) { 528 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { 529 struct tx_ring *txr = &tx_que->txr; 530 if (txr->tx_rsq == NULL) 531 break; 532 533 free(txr->tx_rsq, M_IXGBE); 534 txr->tx_rsq = NULL; 535 } 536 537 free(sc->tx_queues, M_IXGBE); 538 sc->tx_queues = NULL; 539 } 540 if (rx_que != NULL) { 541 free(sc->rx_queues, M_IXGBE); 542 sc->rx_queues = NULL; 543 } 544 } /* ixgbe_if_queues_free */ 545 546 /************************************************************************ 547 * ixgbe_initialize_rss_mapping 548 ************************************************************************/ 549 static void 550 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 551 { 552 struct ixgbe_hw *hw = &sc->hw; 553 u32 reta = 0, mrqc, rss_key[10]; 554 int queue_id, table_size, index_mult; 555 int i, j; 556 u32 rss_hash_config; 557 558 if (sc->feat_en & IXGBE_FEATURE_RSS) { 559 /* Fetch the configured RSS key */ 560 rss_getkey((uint8_t *)&rss_key); 561 } else { 562 /* set up random bits */ 563 arc4rand(&rss_key, sizeof(rss_key), 0); 564 } 565 566 /* Set multiplier for RETA setup and table size based on MAC */ 567 index_mult = 0x1; 568 table_size = 128; 569 switch (sc->hw.mac.type) { 570 case ixgbe_mac_82598EB: 571 index_mult = 0x11; 572 break; 573 case ixgbe_mac_X550: 574 case ixgbe_mac_X550EM_x: 575 case ixgbe_mac_X550EM_a: 576 table_size = 512; 577 break; 578 default: 579 break; 580 } 581 582 /* Set up the redirection table */ 583 for (i = 0, j = 0; i < table_size; i++, j++) { 584 if (j == sc->num_rx_queues) 585 j = 0; 586 587 if (sc->feat_en & IXGBE_FEATURE_RSS) { 588 /* 589 * Fetch the RSS bucket id for the given indirection 590 * entry. Cap it at the number of configured buckets 591 * (which is num_rx_queues.) 592 */ 593 queue_id = rss_get_indirection_to_bucket(i); 594 queue_id = queue_id % sc->num_rx_queues; 595 } else 596 queue_id = (j * index_mult); 597 598 /* 599 * The low 8 bits are for hash value (n+0); 600 * The next 8 bits are for hash value (n+1), etc. 601 */ 602 reta = reta >> 8; 603 reta = reta | (((uint32_t)queue_id) << 24); 604 if ((i & 3) == 3) { 605 if (i < 128) 606 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 607 else 608 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 609 reta); 610 reta = 0; 611 } 612 } 613 614 /* Now fill our hash function seeds */ 615 for (i = 0; i < 10; i++) 616 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 617 618 /* Perform hash on these packet types */ 619 if (sc->feat_en & IXGBE_FEATURE_RSS) 620 rss_hash_config = rss_gethashconfig(); 621 else { 622 /* 623 * Disable UDP - IP fragments aren't currently being handled 624 * and so we end up with a mix of 2-tuple and 4-tuple 625 * traffic. 626 */ 627 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 628 | RSS_HASHTYPE_RSS_TCP_IPV4 629 | RSS_HASHTYPE_RSS_IPV6 630 | RSS_HASHTYPE_RSS_TCP_IPV6 631 | RSS_HASHTYPE_RSS_IPV6_EX 632 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 633 } 634 635 mrqc = IXGBE_MRQC_RSSEN; 636 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 638 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 640 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 642 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 644 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 646 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 648 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 649 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 650 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 651 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 652 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 653 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 654 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 655 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 656 } /* ixgbe_initialize_rss_mapping */ 657 658 /************************************************************************ 659 * ixgbe_initialize_receive_units - Setup receive registers and features. 660 ************************************************************************/ 661 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 662 663 static void 664 ixgbe_initialize_receive_units(if_ctx_t ctx) 665 { 666 struct ixgbe_softc *sc = iflib_get_softc(ctx); 667 if_softc_ctx_t scctx = sc->shared; 668 struct ixgbe_hw *hw = &sc->hw; 669 struct ifnet *ifp = iflib_get_ifp(ctx); 670 struct ix_rx_queue *que; 671 int i, j; 672 u32 bufsz, fctrl, srrctl, rxcsum; 673 u32 hlreg; 674 675 /* 676 * Make sure receives are disabled while 677 * setting up the descriptor ring 678 */ 679 ixgbe_disable_rx(hw); 680 681 /* Enable broadcasts */ 682 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 683 fctrl |= IXGBE_FCTRL_BAM; 684 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 685 fctrl |= IXGBE_FCTRL_DPF; 686 fctrl |= IXGBE_FCTRL_PMCF; 687 } 688 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 689 690 /* Set for Jumbo Frames? */ 691 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 692 if (ifp->if_mtu > ETHERMTU) 693 hlreg |= IXGBE_HLREG0_JUMBOEN; 694 else 695 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 696 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 697 698 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 699 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 700 701 /* Setup the Base and Length of the Rx Descriptor Ring */ 702 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { 703 struct rx_ring *rxr = &que->rxr; 704 u64 rdba = rxr->rx_paddr; 705 706 j = rxr->me; 707 708 /* Setup the Base and Length of the Rx Descriptor Ring */ 709 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 710 (rdba & 0x00000000ffffffffULL)); 711 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 712 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 713 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 714 715 /* Set up the SRRCTL register */ 716 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 717 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 718 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 719 srrctl |= bufsz; 720 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 721 722 /* 723 * Set DROP_EN iff we have no flow control and >1 queue. 724 * Note that srrctl was cleared shortly before during reset, 725 * so we do not need to clear the bit, but do it just in case 726 * this code is moved elsewhere. 727 */ 728 if (sc->num_rx_queues > 1 && 729 sc->hw.fc.requested_mode == ixgbe_fc_none) { 730 srrctl |= IXGBE_SRRCTL_DROP_EN; 731 } else { 732 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 733 } 734 735 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 736 737 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 738 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 739 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 740 741 /* Set the driver rx tail address */ 742 rxr->tail = IXGBE_RDT(rxr->me); 743 } 744 745 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 746 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 747 | IXGBE_PSRTYPE_UDPHDR 748 | IXGBE_PSRTYPE_IPV4HDR 749 | IXGBE_PSRTYPE_IPV6HDR; 750 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 751 } 752 753 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 754 755 ixgbe_initialize_rss_mapping(sc); 756 757 if (sc->num_rx_queues > 1) { 758 /* RSS and RX IPP Checksum are mutually exclusive */ 759 rxcsum |= IXGBE_RXCSUM_PCSD; 760 } 761 762 if (ifp->if_capenable & IFCAP_RXCSUM) 763 rxcsum |= IXGBE_RXCSUM_PCSD; 764 765 /* This is useful for calculating UDP/IP fragment checksums */ 766 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 767 rxcsum |= IXGBE_RXCSUM_IPPCSE; 768 769 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 770 771 } /* ixgbe_initialize_receive_units */ 772 773 /************************************************************************ 774 * ixgbe_initialize_transmit_units - Enable transmit units. 775 ************************************************************************/ 776 static void 777 ixgbe_initialize_transmit_units(if_ctx_t ctx) 778 { 779 struct ixgbe_softc *sc = iflib_get_softc(ctx); 780 struct ixgbe_hw *hw = &sc->hw; 781 if_softc_ctx_t scctx = sc->shared; 782 struct ix_tx_queue *que; 783 int i; 784 785 /* Setup the Base and Length of the Tx Descriptor Ring */ 786 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; 787 i++, que++) { 788 struct tx_ring *txr = &que->txr; 789 u64 tdba = txr->tx_paddr; 790 u32 txctrl = 0; 791 int j = txr->me; 792 793 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 794 (tdba & 0x00000000ffffffffULL)); 795 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 796 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 797 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 798 799 /* Setup the HW Tx Head and Tail descriptor pointers */ 800 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 801 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 802 803 /* Cache the tail address */ 804 txr->tail = IXGBE_TDT(txr->me); 805 806 txr->tx_rs_cidx = txr->tx_rs_pidx; 807 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 808 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 809 txr->tx_rsq[k] = QIDX_INVALID; 810 811 /* Disable Head Writeback */ 812 /* 813 * Note: for X550 series devices, these registers are actually 814 * prefixed with TPH_ isntead of DCA_, but the addresses and 815 * fields remain the same. 816 */ 817 switch (hw->mac.type) { 818 case ixgbe_mac_82598EB: 819 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 820 break; 821 default: 822 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 823 break; 824 } 825 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 826 switch (hw->mac.type) { 827 case ixgbe_mac_82598EB: 828 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 829 break; 830 default: 831 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 832 break; 833 } 834 835 } 836 837 if (hw->mac.type != ixgbe_mac_82598EB) { 838 u32 dmatxctl, rttdcs; 839 840 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 841 dmatxctl |= IXGBE_DMATXCTL_TE; 842 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 843 /* Disable arbiter to set MTQC */ 844 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 845 rttdcs |= IXGBE_RTTDCS_ARBDIS; 846 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 847 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 848 ixgbe_get_mtqc(sc->iov_mode)); 849 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 850 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 851 } 852 853 } /* ixgbe_initialize_transmit_units */ 854 855 /************************************************************************ 856 * ixgbe_register 857 ************************************************************************/ 858 static void * 859 ixgbe_register(device_t dev) 860 { 861 return (&ixgbe_sctx_init); 862 } /* ixgbe_register */ 863 864 /************************************************************************ 865 * ixgbe_if_attach_pre - Device initialization routine, part 1 866 * 867 * Called when the driver is being loaded. 868 * Identifies the type of hardware, initializes the hardware, 869 * and initializes iflib structures. 870 * 871 * return 0 on success, positive on failure 872 ************************************************************************/ 873 static int 874 ixgbe_if_attach_pre(if_ctx_t ctx) 875 { 876 struct ixgbe_softc *sc; 877 device_t dev; 878 if_softc_ctx_t scctx; 879 struct ixgbe_hw *hw; 880 int error = 0; 881 u32 ctrl_ext; 882 883 INIT_DEBUGOUT("ixgbe_attach: begin"); 884 885 /* Allocate, clear, and link in our adapter structure */ 886 dev = iflib_get_dev(ctx); 887 sc = iflib_get_softc(ctx); 888 sc->hw.back = sc; 889 sc->ctx = ctx; 890 sc->dev = dev; 891 scctx = sc->shared = iflib_get_softc_ctx(ctx); 892 sc->media = iflib_get_media(ctx); 893 hw = &sc->hw; 894 895 /* Determine hardware revision */ 896 hw->vendor_id = pci_get_vendor(dev); 897 hw->device_id = pci_get_device(dev); 898 hw->revision_id = pci_get_revid(dev); 899 hw->subsystem_vendor_id = pci_get_subvendor(dev); 900 hw->subsystem_device_id = pci_get_subdevice(dev); 901 902 /* Do base PCI setup - map BAR0 */ 903 if (ixgbe_allocate_pci_resources(ctx)) { 904 device_printf(dev, "Allocation of PCI resources failed\n"); 905 return (ENXIO); 906 } 907 908 /* let hardware know driver is loaded */ 909 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 910 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 911 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 912 913 /* 914 * Initialize the shared code 915 */ 916 if (ixgbe_init_shared_code(hw) != 0) { 917 device_printf(dev, "Unable to initialize the shared code\n"); 918 error = ENXIO; 919 goto err_pci; 920 } 921 922 if (hw->mbx.ops.init_params) 923 hw->mbx.ops.init_params(hw); 924 925 hw->allow_unsupported_sfp = allow_unsupported_sfp; 926 927 if (hw->mac.type != ixgbe_mac_82598EB) 928 hw->phy.smart_speed = ixgbe_smart_speed; 929 930 ixgbe_init_device_features(sc); 931 932 /* Enable WoL (if supported) */ 933 ixgbe_check_wol_support(sc); 934 935 /* Verify adapter fan is still functional (if applicable) */ 936 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 937 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 938 ixgbe_check_fan_failure(sc, esdp, false); 939 } 940 941 /* Ensure SW/FW semaphore is free */ 942 ixgbe_init_swfw_semaphore(hw); 943 944 /* Set an initial default flow control value */ 945 hw->fc.requested_mode = ixgbe_flow_control; 946 947 hw->phy.reset_if_overtemp = true; 948 error = ixgbe_reset_hw(hw); 949 hw->phy.reset_if_overtemp = false; 950 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 951 /* 952 * No optics in this port, set up 953 * so the timer routine will probe 954 * for later insertion. 955 */ 956 sc->sfp_probe = true; 957 error = 0; 958 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 959 device_printf(dev, "Unsupported SFP+ module detected!\n"); 960 error = EIO; 961 goto err_pci; 962 } else if (error) { 963 device_printf(dev, "Hardware initialization failed\n"); 964 error = EIO; 965 goto err_pci; 966 } 967 968 /* Make sure we have a good EEPROM before we read from it */ 969 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 970 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 971 error = EIO; 972 goto err_pci; 973 } 974 975 error = ixgbe_start_hw(hw); 976 switch (error) { 977 case IXGBE_ERR_EEPROM_VERSION: 978 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 979 break; 980 case IXGBE_ERR_SFP_NOT_SUPPORTED: 981 device_printf(dev, "Unsupported SFP+ Module\n"); 982 error = EIO; 983 goto err_pci; 984 case IXGBE_ERR_SFP_NOT_PRESENT: 985 device_printf(dev, "No SFP+ Module found\n"); 986 /* falls thru */ 987 default: 988 break; 989 } 990 991 /* Most of the iflib initialization... */ 992 993 iflib_set_mac(ctx, hw->mac.addr); 994 switch (sc->hw.mac.type) { 995 case ixgbe_mac_X550: 996 case ixgbe_mac_X550EM_x: 997 case ixgbe_mac_X550EM_a: 998 scctx->isc_rss_table_size = 512; 999 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1000 break; 1001 default: 1002 scctx->isc_rss_table_size = 128; 1003 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1004 } 1005 1006 /* Allow legacy interrupts */ 1007 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1008 1009 scctx->isc_txqsizes[0] = 1010 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1011 sizeof(u32), DBA_ALIGN), 1012 scctx->isc_rxqsizes[0] = 1013 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1014 DBA_ALIGN); 1015 1016 /* XXX */ 1017 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1018 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1019 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 1020 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1021 } else { 1022 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1023 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1024 } 1025 1026 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1027 1028 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1029 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1030 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1031 1032 scctx->isc_txrx = &ixgbe_txrx; 1033 1034 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1035 1036 return (0); 1037 1038 err_pci: 1039 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1040 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1041 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1042 ixgbe_free_pci_resources(ctx); 1043 1044 return (error); 1045 } /* ixgbe_if_attach_pre */ 1046 1047 /********************************************************************* 1048 * ixgbe_if_attach_post - Device initialization routine, part 2 1049 * 1050 * Called during driver load, but after interrupts and 1051 * resources have been allocated and configured. 1052 * Sets up some data structures not relevant to iflib. 1053 * 1054 * return 0 on success, positive on failure 1055 *********************************************************************/ 1056 static int 1057 ixgbe_if_attach_post(if_ctx_t ctx) 1058 { 1059 device_t dev; 1060 struct ixgbe_softc *sc; 1061 struct ixgbe_hw *hw; 1062 int error = 0; 1063 1064 dev = iflib_get_dev(ctx); 1065 sc = iflib_get_softc(ctx); 1066 hw = &sc->hw; 1067 1068 1069 if (sc->intr_type == IFLIB_INTR_LEGACY && 1070 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1071 device_printf(dev, "Device does not support legacy interrupts"); 1072 error = ENXIO; 1073 goto err; 1074 } 1075 1076 /* Allocate multicast array memory. */ 1077 sc->mta = malloc(sizeof(*sc->mta) * 1078 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1079 if (sc->mta == NULL) { 1080 device_printf(dev, "Can not allocate multicast setup array\n"); 1081 error = ENOMEM; 1082 goto err; 1083 } 1084 1085 /* hw.ix defaults init */ 1086 ixgbe_set_advertise(sc, ixgbe_advertise_speed); 1087 1088 /* Enable the optics for 82599 SFP+ fiber */ 1089 ixgbe_enable_tx_laser(hw); 1090 1091 /* Enable power to the phy. */ 1092 ixgbe_set_phy_power(hw, true); 1093 1094 ixgbe_initialize_iov(sc); 1095 1096 error = ixgbe_setup_interface(ctx); 1097 if (error) { 1098 device_printf(dev, "Interface setup failed: %d\n", error); 1099 goto err; 1100 } 1101 1102 ixgbe_if_update_admin_status(ctx); 1103 1104 /* Initialize statistics */ 1105 ixgbe_update_stats_counters(sc); 1106 ixgbe_add_hw_stats(sc); 1107 1108 /* Check PCIE slot type/speed/width */ 1109 ixgbe_get_slot_info(sc); 1110 1111 /* 1112 * Do time init and sysctl init here, but 1113 * only on the first port of a bypass sc. 1114 */ 1115 ixgbe_bypass_init(sc); 1116 1117 /* Display NVM and Option ROM versions */ 1118 ixgbe_print_fw_version(ctx); 1119 1120 /* Set an initial dmac value */ 1121 sc->dmac = 0; 1122 /* Set initial advertised speeds (if applicable) */ 1123 sc->advertise = ixgbe_get_default_advertise(sc); 1124 1125 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1126 ixgbe_define_iov_schemas(dev, &error); 1127 1128 /* Add sysctls */ 1129 ixgbe_add_device_sysctls(ctx); 1130 1131 return (0); 1132 err: 1133 return (error); 1134 } /* ixgbe_if_attach_post */ 1135 1136 /************************************************************************ 1137 * ixgbe_check_wol_support 1138 * 1139 * Checks whether the adapter's ports are capable of 1140 * Wake On LAN by reading the adapter's NVM. 1141 * 1142 * Sets each port's hw->wol_enabled value depending 1143 * on the value read here. 1144 ************************************************************************/ 1145 static void 1146 ixgbe_check_wol_support(struct ixgbe_softc *sc) 1147 { 1148 struct ixgbe_hw *hw = &sc->hw; 1149 u16 dev_caps = 0; 1150 1151 /* Find out WoL support for port */ 1152 sc->wol_support = hw->wol_enabled = 0; 1153 ixgbe_get_device_caps(hw, &dev_caps); 1154 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1155 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1156 hw->bus.func == 0)) 1157 sc->wol_support = hw->wol_enabled = 1; 1158 1159 /* Save initial wake up filter configuration */ 1160 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1161 1162 return; 1163 } /* ixgbe_check_wol_support */ 1164 1165 /************************************************************************ 1166 * ixgbe_setup_interface 1167 * 1168 * Setup networking device structure and register an interface. 1169 ************************************************************************/ 1170 static int 1171 ixgbe_setup_interface(if_ctx_t ctx) 1172 { 1173 struct ifnet *ifp = iflib_get_ifp(ctx); 1174 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1175 1176 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1177 1178 if_setbaudrate(ifp, IF_Gbps(10)); 1179 1180 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1181 1182 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1183 1184 ixgbe_add_media_types(ctx); 1185 1186 /* Autoselect media by default */ 1187 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1188 1189 return (0); 1190 } /* ixgbe_setup_interface */ 1191 1192 /************************************************************************ 1193 * ixgbe_if_get_counter 1194 ************************************************************************/ 1195 static uint64_t 1196 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1197 { 1198 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1199 if_t ifp = iflib_get_ifp(ctx); 1200 1201 switch (cnt) { 1202 case IFCOUNTER_IPACKETS: 1203 return (sc->ipackets); 1204 case IFCOUNTER_OPACKETS: 1205 return (sc->opackets); 1206 case IFCOUNTER_IBYTES: 1207 return (sc->ibytes); 1208 case IFCOUNTER_OBYTES: 1209 return (sc->obytes); 1210 case IFCOUNTER_IMCASTS: 1211 return (sc->imcasts); 1212 case IFCOUNTER_OMCASTS: 1213 return (sc->omcasts); 1214 case IFCOUNTER_COLLISIONS: 1215 return (0); 1216 case IFCOUNTER_IQDROPS: 1217 return (sc->iqdrops); 1218 case IFCOUNTER_OQDROPS: 1219 return (0); 1220 case IFCOUNTER_IERRORS: 1221 return (sc->ierrors); 1222 default: 1223 return (if_get_counter_default(ifp, cnt)); 1224 } 1225 } /* ixgbe_if_get_counter */ 1226 1227 /************************************************************************ 1228 * ixgbe_if_i2c_req 1229 ************************************************************************/ 1230 static int 1231 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1232 { 1233 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1234 struct ixgbe_hw *hw = &sc->hw; 1235 int i; 1236 1237 1238 if (hw->phy.ops.read_i2c_byte == NULL) 1239 return (ENXIO); 1240 for (i = 0; i < req->len; i++) 1241 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1242 req->dev_addr, &req->data[i]); 1243 return (0); 1244 } /* ixgbe_if_i2c_req */ 1245 1246 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1247 * @ctx: iflib context 1248 * @event: event code to check 1249 * 1250 * Defaults to returning true for unknown events. 1251 * 1252 * @returns true if iflib needs to reinit the interface 1253 */ 1254 static bool 1255 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1256 { 1257 switch (event) { 1258 case IFLIB_RESTART_VLAN_CONFIG: 1259 return (false); 1260 default: 1261 return (true); 1262 } 1263 } 1264 1265 /************************************************************************ 1266 * ixgbe_add_media_types 1267 ************************************************************************/ 1268 static void 1269 ixgbe_add_media_types(if_ctx_t ctx) 1270 { 1271 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1272 struct ixgbe_hw *hw = &sc->hw; 1273 device_t dev = iflib_get_dev(ctx); 1274 u64 layer; 1275 1276 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 1277 1278 /* Media types with matching FreeBSD media defines */ 1279 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1280 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1281 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1282 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1283 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1284 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1285 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1286 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1287 1288 if (hw->mac.type == ixgbe_mac_X550) { 1289 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1290 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL); 1291 } 1292 1293 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1294 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1295 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1296 NULL); 1297 1298 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1299 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1300 if (hw->phy.multispeed_fiber) 1301 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, 1302 NULL); 1303 } 1304 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1305 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1306 if (hw->phy.multispeed_fiber) 1307 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, 1308 NULL); 1309 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1310 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1311 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1312 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1313 1314 #ifdef IFM_ETH_XTYPE 1315 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1316 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1317 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1318 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1319 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1320 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1321 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1322 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1323 #else 1324 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1325 device_printf(dev, "Media supported: 10GbaseKR\n"); 1326 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1327 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1328 } 1329 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1330 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1331 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1332 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1333 } 1334 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1335 device_printf(dev, "Media supported: 1000baseKX\n"); 1336 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1337 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1338 } 1339 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1340 device_printf(dev, "Media supported: 2500baseKX\n"); 1341 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1342 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1343 } 1344 #endif 1345 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1346 device_printf(dev, "Media supported: 1000baseBX\n"); 1347 1348 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1349 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1350 0, NULL); 1351 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1352 } 1353 1354 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1355 } /* ixgbe_add_media_types */ 1356 1357 /************************************************************************ 1358 * ixgbe_is_sfp 1359 ************************************************************************/ 1360 static inline bool 1361 ixgbe_is_sfp(struct ixgbe_hw *hw) 1362 { 1363 switch (hw->mac.type) { 1364 case ixgbe_mac_82598EB: 1365 if (hw->phy.type == ixgbe_phy_nl) 1366 return (true); 1367 return (false); 1368 case ixgbe_mac_82599EB: 1369 switch (hw->mac.ops.get_media_type(hw)) { 1370 case ixgbe_media_type_fiber: 1371 case ixgbe_media_type_fiber_qsfp: 1372 return (true); 1373 default: 1374 return (false); 1375 } 1376 case ixgbe_mac_X550EM_x: 1377 case ixgbe_mac_X550EM_a: 1378 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1379 return (true); 1380 return (false); 1381 default: 1382 return (false); 1383 } 1384 } /* ixgbe_is_sfp */ 1385 1386 /************************************************************************ 1387 * ixgbe_config_link 1388 ************************************************************************/ 1389 static void 1390 ixgbe_config_link(if_ctx_t ctx) 1391 { 1392 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1393 struct ixgbe_hw *hw = &sc->hw; 1394 u32 autoneg, err = 0; 1395 bool sfp, negotiate; 1396 1397 sfp = ixgbe_is_sfp(hw); 1398 1399 if (sfp) { 1400 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 1401 iflib_admin_intr_deferred(ctx); 1402 } else { 1403 if (hw->mac.ops.check_link) 1404 err = ixgbe_check_link(hw, &sc->link_speed, 1405 &sc->link_up, false); 1406 if (err) 1407 return; 1408 autoneg = hw->phy.autoneg_advertised; 1409 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1410 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1411 &negotiate); 1412 if (err) 1413 return; 1414 1415 if (hw->mac.type == ixgbe_mac_X550 && 1416 hw->phy.autoneg_advertised == 0) { 1417 /* 1418 * 2.5G and 5G autonegotiation speeds on X550 1419 * are disabled by default due to reported 1420 * interoperability issues with some switches. 1421 * 1422 * The second condition checks if any operations 1423 * involving setting autonegotiation speeds have 1424 * been performed prior to this ixgbe_config_link() 1425 * call. 1426 * 1427 * If hw->phy.autoneg_advertised does not 1428 * equal 0, this means that the user might have 1429 * set autonegotiation speeds via the sysctl 1430 * before bringing the interface up. In this 1431 * case, we should not disable 2.5G and 5G 1432 * since that speeds might be selected by the 1433 * user. 1434 * 1435 * Otherwise (i.e. if hw->phy.autoneg_advertised 1436 * is set to 0), it is the first time we set 1437 * autonegotiation preferences and the default 1438 * set of speeds should exclude 2.5G and 5G. 1439 */ 1440 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 1441 IXGBE_LINK_SPEED_5GB_FULL); 1442 } 1443 1444 if (hw->mac.ops.setup_link) 1445 err = hw->mac.ops.setup_link(hw, autoneg, 1446 sc->link_up); 1447 } 1448 } /* ixgbe_config_link */ 1449 1450 /************************************************************************ 1451 * ixgbe_update_stats_counters - Update board statistics counters. 1452 ************************************************************************/ 1453 static void 1454 ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1455 { 1456 struct ixgbe_hw *hw = &sc->hw; 1457 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1458 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1459 u32 lxoffrxc; 1460 u64 total_missed_rx = 0; 1461 1462 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1463 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1464 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1465 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1466 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1467 1468 for (int i = 0; i < 16; i++) { 1469 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1470 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1471 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1472 } 1473 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1474 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1475 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1476 1477 /* Hardware workaround, gprc counts missed packets */ 1478 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1479 stats->gprc -= missed_rx; 1480 1481 if (hw->mac.type != ixgbe_mac_82598EB) { 1482 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1483 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1484 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1485 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1486 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1487 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1488 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1489 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1490 stats->lxoffrxc += lxoffrxc; 1491 } else { 1492 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1493 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1494 stats->lxoffrxc += lxoffrxc; 1495 /* 82598 only has a counter in the high register */ 1496 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1497 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1498 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1499 } 1500 1501 /* 1502 * For watchdog management we need to know if we have been paused 1503 * during the last interval, so capture that here. 1504 */ 1505 if (lxoffrxc) 1506 sc->shared->isc_pause_frames = 1; 1507 1508 /* 1509 * Workaround: mprc hardware is incorrectly counting 1510 * broadcasts, so for now we subtract those. 1511 */ 1512 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1513 stats->bprc += bprc; 1514 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1515 if (hw->mac.type == ixgbe_mac_82598EB) 1516 stats->mprc -= bprc; 1517 1518 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1519 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1520 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1521 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1522 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1523 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1524 1525 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1526 stats->lxontxc += lxon; 1527 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1528 stats->lxofftxc += lxoff; 1529 total = lxon + lxoff; 1530 1531 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1532 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1533 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1534 stats->gptc -= total; 1535 stats->mptc -= total; 1536 stats->ptc64 -= total; 1537 stats->gotc -= total * ETHER_MIN_LEN; 1538 1539 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1540 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1541 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1542 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1543 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1544 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1545 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1546 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1547 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1548 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1549 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1550 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1551 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1552 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1553 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1554 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1555 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1556 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1557 /* Only read FCOE on 82599 */ 1558 if (hw->mac.type != ixgbe_mac_82598EB) { 1559 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1560 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1561 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1562 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1563 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1564 } 1565 1566 /* Fill out the OS statistics structure */ 1567 IXGBE_SET_IPACKETS(sc, stats->gprc); 1568 IXGBE_SET_OPACKETS(sc, stats->gptc); 1569 IXGBE_SET_IBYTES(sc, stats->gorc); 1570 IXGBE_SET_OBYTES(sc, stats->gotc); 1571 IXGBE_SET_IMCASTS(sc, stats->mprc); 1572 IXGBE_SET_OMCASTS(sc, stats->mptc); 1573 IXGBE_SET_COLLISIONS(sc, 0); 1574 IXGBE_SET_IQDROPS(sc, total_missed_rx); 1575 1576 /* 1577 * Aggregate following types of errors as RX errors: 1578 * - CRC error count, 1579 * - illegal byte error count, 1580 * - checksum error count, 1581 * - missed packets count, 1582 * - length error count, 1583 * - undersized packets count, 1584 * - fragmented packets count, 1585 * - oversized packets count, 1586 * - jabber count. 1587 */ 1588 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + stats->xec + 1589 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + 1590 stats->rjc); 1591 } /* ixgbe_update_stats_counters */ 1592 1593 /************************************************************************ 1594 * ixgbe_add_hw_stats 1595 * 1596 * Add sysctl variables, one per statistic, to the system. 1597 ************************************************************************/ 1598 static void 1599 ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1600 { 1601 device_t dev = iflib_get_dev(sc->ctx); 1602 struct ix_rx_queue *rx_que; 1603 struct ix_tx_queue *tx_que; 1604 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1605 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1606 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1607 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1608 struct sysctl_oid *stat_node, *queue_node; 1609 struct sysctl_oid_list *stat_list, *queue_list; 1610 int i; 1611 1612 #define QUEUE_NAME_LEN 32 1613 char namebuf[QUEUE_NAME_LEN]; 1614 1615 /* Driver Statistics */ 1616 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1617 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); 1618 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1619 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1620 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1621 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1622 1623 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 1624 struct tx_ring *txr = &tx_que->txr; 1625 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1626 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1627 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1628 queue_list = SYSCTL_CHILDREN(queue_node); 1629 1630 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1631 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1632 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1633 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1634 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1635 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1636 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1637 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1638 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1639 CTLFLAG_RD, &txr->total_packets, 1640 "Queue Packets Transmitted"); 1641 } 1642 1643 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 1644 struct rx_ring *rxr = &rx_que->rxr; 1645 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1646 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1647 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1648 queue_list = SYSCTL_CHILDREN(queue_node); 1649 1650 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1651 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 1652 &sc->rx_queues[i], 0, 1653 ixgbe_sysctl_interrupt_rate_handler, "IU", 1654 "Interrupt Rate"); 1655 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1656 CTLFLAG_RD, &(sc->rx_queues[i].irqs), 1657 "irqs on this queue"); 1658 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1659 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1660 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1661 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1662 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1663 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1664 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1665 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1666 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1667 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1668 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1669 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1670 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1671 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1672 } 1673 1674 /* MAC stats get their own sub node */ 1675 1676 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1677 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); 1678 stat_list = SYSCTL_CHILDREN(stat_node); 1679 1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", 1681 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); 1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1683 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1685 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1687 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1689 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1691 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1693 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1695 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1697 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1698 1699 /* Flow Control stats */ 1700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1701 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1702 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1703 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1704 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1705 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1707 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1708 1709 /* Packet Reception Stats */ 1710 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1711 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1713 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1715 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1716 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1717 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1719 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1721 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1722 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1723 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1725 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1727 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1728 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1729 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1731 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1733 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1734 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1735 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1737 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1739 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1740 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1741 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1742 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1743 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1744 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1745 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1746 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1747 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1748 1749 /* Packet Transmission Stats */ 1750 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1751 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1752 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1753 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1754 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1755 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1756 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1757 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1758 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1759 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1760 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1761 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1762 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1763 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1764 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1765 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1766 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1767 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1768 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1769 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1770 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1771 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1772 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1773 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1774 } /* ixgbe_add_hw_stats */ 1775 1776 /************************************************************************ 1777 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1778 * 1779 * Retrieves the TDH value from the hardware 1780 ************************************************************************/ 1781 static int 1782 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1783 { 1784 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1785 int error; 1786 unsigned int val; 1787 1788 if (!txr) 1789 return (0); 1790 1791 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); 1792 error = sysctl_handle_int(oidp, &val, 0, req); 1793 if (error || !req->newptr) 1794 return error; 1795 1796 return (0); 1797 } /* ixgbe_sysctl_tdh_handler */ 1798 1799 /************************************************************************ 1800 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1801 * 1802 * Retrieves the TDT value from the hardware 1803 ************************************************************************/ 1804 static int 1805 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1806 { 1807 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1808 int error; 1809 unsigned int val; 1810 1811 if (!txr) 1812 return (0); 1813 1814 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); 1815 error = sysctl_handle_int(oidp, &val, 0, req); 1816 if (error || !req->newptr) 1817 return error; 1818 1819 return (0); 1820 } /* ixgbe_sysctl_tdt_handler */ 1821 1822 /************************************************************************ 1823 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1824 * 1825 * Retrieves the RDH value from the hardware 1826 ************************************************************************/ 1827 static int 1828 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1829 { 1830 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1831 int error; 1832 unsigned int val; 1833 1834 if (!rxr) 1835 return (0); 1836 1837 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); 1838 error = sysctl_handle_int(oidp, &val, 0, req); 1839 if (error || !req->newptr) 1840 return error; 1841 1842 return (0); 1843 } /* ixgbe_sysctl_rdh_handler */ 1844 1845 /************************************************************************ 1846 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1847 * 1848 * Retrieves the RDT value from the hardware 1849 ************************************************************************/ 1850 static int 1851 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1852 { 1853 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1854 int error; 1855 unsigned int val; 1856 1857 if (!rxr) 1858 return (0); 1859 1860 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); 1861 error = sysctl_handle_int(oidp, &val, 0, req); 1862 if (error || !req->newptr) 1863 return error; 1864 1865 return (0); 1866 } /* ixgbe_sysctl_rdt_handler */ 1867 1868 /************************************************************************ 1869 * ixgbe_if_vlan_register 1870 * 1871 * Run via vlan config EVENT, it enables us to use the 1872 * HW Filter table since we can get the vlan id. This 1873 * just creates the entry in the soft version of the 1874 * VFTA, init will repopulate the real table. 1875 ************************************************************************/ 1876 static void 1877 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1878 { 1879 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1880 u16 index, bit; 1881 1882 index = (vtag >> 5) & 0x7F; 1883 bit = vtag & 0x1F; 1884 sc->shadow_vfta[index] |= (1 << bit); 1885 ++sc->num_vlans; 1886 ixgbe_setup_vlan_hw_support(ctx); 1887 } /* ixgbe_if_vlan_register */ 1888 1889 /************************************************************************ 1890 * ixgbe_if_vlan_unregister 1891 * 1892 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1893 ************************************************************************/ 1894 static void 1895 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1896 { 1897 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1898 u16 index, bit; 1899 1900 index = (vtag >> 5) & 0x7F; 1901 bit = vtag & 0x1F; 1902 sc->shadow_vfta[index] &= ~(1 << bit); 1903 --sc->num_vlans; 1904 /* Re-init to load the changes */ 1905 ixgbe_setup_vlan_hw_support(ctx); 1906 } /* ixgbe_if_vlan_unregister */ 1907 1908 /************************************************************************ 1909 * ixgbe_setup_vlan_hw_support 1910 ************************************************************************/ 1911 static void 1912 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1913 { 1914 struct ifnet *ifp = iflib_get_ifp(ctx); 1915 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1916 struct ixgbe_hw *hw = &sc->hw; 1917 struct rx_ring *rxr; 1918 int i; 1919 u32 ctrl; 1920 1921 1922 /* 1923 * We get here thru init_locked, meaning 1924 * a soft reset, this has already cleared 1925 * the VFTA and other state, so if there 1926 * have been no vlan's registered do nothing. 1927 */ 1928 if (sc->num_vlans == 0 || (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1929 /* Clear the vlan hw flag */ 1930 for (i = 0; i < sc->num_rx_queues; i++) { 1931 rxr = &sc->rx_queues[i].rxr; 1932 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1933 if (hw->mac.type != ixgbe_mac_82598EB) { 1934 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1935 ctrl &= ~IXGBE_RXDCTL_VME; 1936 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1937 } 1938 rxr->vtag_strip = false; 1939 } 1940 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1941 /* Enable the Filter Table if enabled */ 1942 ctrl |= IXGBE_VLNCTRL_CFIEN; 1943 ctrl &= ~IXGBE_VLNCTRL_VFE; 1944 if (hw->mac.type == ixgbe_mac_82598EB) 1945 ctrl &= ~IXGBE_VLNCTRL_VME; 1946 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1947 return; 1948 } 1949 1950 /* Setup the queues for vlans */ 1951 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1952 for (i = 0; i < sc->num_rx_queues; i++) { 1953 rxr = &sc->rx_queues[i].rxr; 1954 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1955 if (hw->mac.type != ixgbe_mac_82598EB) { 1956 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1957 ctrl |= IXGBE_RXDCTL_VME; 1958 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1959 } 1960 rxr->vtag_strip = true; 1961 } 1962 } 1963 1964 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1965 return; 1966 /* 1967 * A soft reset zero's out the VFTA, so 1968 * we need to repopulate it now. 1969 */ 1970 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1971 if (sc->shadow_vfta[i] != 0) 1972 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1973 sc->shadow_vfta[i]); 1974 1975 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1976 /* Enable the Filter Table if enabled */ 1977 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1978 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1979 ctrl |= IXGBE_VLNCTRL_VFE; 1980 } 1981 if (hw->mac.type == ixgbe_mac_82598EB) 1982 ctrl |= IXGBE_VLNCTRL_VME; 1983 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1984 } /* ixgbe_setup_vlan_hw_support */ 1985 1986 /************************************************************************ 1987 * ixgbe_get_slot_info 1988 * 1989 * Get the width and transaction speed of 1990 * the slot this adapter is plugged into. 1991 ************************************************************************/ 1992 static void 1993 ixgbe_get_slot_info(struct ixgbe_softc *sc) 1994 { 1995 device_t dev = iflib_get_dev(sc->ctx); 1996 struct ixgbe_hw *hw = &sc->hw; 1997 int bus_info_valid = true; 1998 u32 offset; 1999 u16 link; 2000 2001 /* Some devices are behind an internal bridge */ 2002 switch (hw->device_id) { 2003 case IXGBE_DEV_ID_82599_SFP_SF_QP: 2004 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 2005 goto get_parent_info; 2006 default: 2007 break; 2008 } 2009 2010 ixgbe_get_bus_info(hw); 2011 2012 /* 2013 * Some devices don't use PCI-E, but there is no need 2014 * to display "Unknown" for bus speed and width. 2015 */ 2016 switch (hw->mac.type) { 2017 case ixgbe_mac_X550EM_x: 2018 case ixgbe_mac_X550EM_a: 2019 return; 2020 default: 2021 goto display; 2022 } 2023 2024 get_parent_info: 2025 /* 2026 * For the Quad port adapter we need to parse back 2027 * up the PCI tree to find the speed of the expansion 2028 * slot into which this adapter is plugged. A bit more work. 2029 */ 2030 dev = device_get_parent(device_get_parent(dev)); 2031 #ifdef IXGBE_DEBUG 2032 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2033 pci_get_slot(dev), pci_get_function(dev)); 2034 #endif 2035 dev = device_get_parent(device_get_parent(dev)); 2036 #ifdef IXGBE_DEBUG 2037 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2038 pci_get_slot(dev), pci_get_function(dev)); 2039 #endif 2040 /* Now get the PCI Express Capabilities offset */ 2041 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 2042 /* 2043 * Hmm...can't get PCI-Express capabilities. 2044 * Falling back to default method. 2045 */ 2046 bus_info_valid = false; 2047 ixgbe_get_bus_info(hw); 2048 goto display; 2049 } 2050 /* ...and read the Link Status Register */ 2051 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2052 ixgbe_set_pci_config_data_generic(hw, link); 2053 2054 display: 2055 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2056 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2057 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2058 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2059 "Unknown"), 2060 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 2061 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 2062 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 2063 "Unknown")); 2064 2065 if (bus_info_valid) { 2066 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2067 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2068 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2069 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2070 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 2071 } 2072 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2073 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2074 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2075 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2076 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 2077 } 2078 } else 2079 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2080 2081 return; 2082 } /* ixgbe_get_slot_info */ 2083 2084 /************************************************************************ 2085 * ixgbe_if_msix_intr_assign 2086 * 2087 * Setup MSI-X Interrupt resources and handlers 2088 ************************************************************************/ 2089 static int 2090 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2091 { 2092 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2093 struct ix_rx_queue *rx_que = sc->rx_queues; 2094 struct ix_tx_queue *tx_que; 2095 int error, rid, vector = 0; 2096 char buf[16]; 2097 2098 /* Admin Que is vector 0*/ 2099 rid = vector + 1; 2100 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 2101 rid = vector + 1; 2102 2103 snprintf(buf, sizeof(buf), "rxq%d", i); 2104 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2105 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2106 2107 if (error) { 2108 device_printf(iflib_get_dev(ctx), 2109 "Failed to allocate que int %d err: %d", i, error); 2110 sc->num_rx_queues = i + 1; 2111 goto fail; 2112 } 2113 2114 rx_que->msix = vector; 2115 } 2116 for (int i = 0; i < sc->num_tx_queues; i++) { 2117 snprintf(buf, sizeof(buf), "txq%d", i); 2118 tx_que = &sc->tx_queues[i]; 2119 tx_que->msix = i % sc->num_rx_queues; 2120 iflib_softirq_alloc_generic(ctx, 2121 &sc->rx_queues[tx_que->msix].que_irq, 2122 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2123 } 2124 rid = vector + 1; 2125 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 2126 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq"); 2127 if (error) { 2128 device_printf(iflib_get_dev(ctx), 2129 "Failed to register admin handler"); 2130 return (error); 2131 } 2132 2133 sc->vector = vector; 2134 2135 return (0); 2136 fail: 2137 iflib_irq_free(ctx, &sc->irq); 2138 rx_que = sc->rx_queues; 2139 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 2140 iflib_irq_free(ctx, &rx_que->que_irq); 2141 2142 return (error); 2143 } /* ixgbe_if_msix_intr_assign */ 2144 2145 static inline void 2146 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que) 2147 { 2148 uint32_t newitr = 0; 2149 struct rx_ring *rxr = &que->rxr; 2150 2151 /* 2152 * Do Adaptive Interrupt Moderation: 2153 * - Write out last calculated setting 2154 * - Calculate based on average size over 2155 * the last interval. 2156 */ 2157 if (que->eitr_setting) { 2158 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), 2159 que->eitr_setting); 2160 } 2161 2162 que->eitr_setting = 0; 2163 /* Idle, do nothing */ 2164 if (rxr->bytes == 0) { 2165 return; 2166 } 2167 2168 if ((rxr->bytes) && (rxr->packets)) { 2169 newitr = (rxr->bytes / rxr->packets); 2170 } 2171 2172 newitr += 24; /* account for hardware frame, crc */ 2173 /* set an upper boundary */ 2174 newitr = min(newitr, 3000); 2175 2176 /* Be nice to the mid range */ 2177 if ((newitr > 300) && (newitr < 1200)) { 2178 newitr = (newitr / 3); 2179 } else { 2180 newitr = (newitr / 2); 2181 } 2182 2183 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2184 newitr |= newitr << 16; 2185 } else { 2186 newitr |= IXGBE_EITR_CNT_WDIS; 2187 } 2188 2189 /* save for next interrupt */ 2190 que->eitr_setting = newitr; 2191 2192 /* Reset state */ 2193 rxr->bytes = 0; 2194 rxr->packets = 0; 2195 2196 return; 2197 } 2198 2199 /********************************************************************* 2200 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2201 **********************************************************************/ 2202 static int 2203 ixgbe_msix_que(void *arg) 2204 { 2205 struct ix_rx_queue *que = arg; 2206 struct ixgbe_softc *sc = que->sc; 2207 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx); 2208 2209 /* Protect against spurious interrupts */ 2210 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2211 return (FILTER_HANDLED); 2212 2213 ixgbe_disable_queue(sc, que->msix); 2214 ++que->irqs; 2215 2216 /* Check for AIM */ 2217 if (sc->enable_aim) { 2218 ixgbe_perform_aim(sc, que); 2219 } 2220 2221 return (FILTER_SCHEDULE_THREAD); 2222 } /* ixgbe_msix_que */ 2223 2224 /************************************************************************ 2225 * ixgbe_media_status - Media Ioctl callback 2226 * 2227 * Called whenever the user queries the status of 2228 * the interface using ifconfig. 2229 ************************************************************************/ 2230 static void 2231 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2232 { 2233 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2234 struct ixgbe_hw *hw = &sc->hw; 2235 int layer; 2236 2237 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2238 2239 ifmr->ifm_status = IFM_AVALID; 2240 ifmr->ifm_active = IFM_ETHER; 2241 2242 if (!sc->link_active) 2243 return; 2244 2245 ifmr->ifm_status |= IFM_ACTIVE; 2246 layer = sc->phy_layer; 2247 2248 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2249 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2250 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2251 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2252 switch (sc->link_speed) { 2253 case IXGBE_LINK_SPEED_10GB_FULL: 2254 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2255 break; 2256 case IXGBE_LINK_SPEED_1GB_FULL: 2257 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2258 break; 2259 case IXGBE_LINK_SPEED_100_FULL: 2260 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2261 break; 2262 case IXGBE_LINK_SPEED_10_FULL: 2263 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2264 break; 2265 } 2266 if (hw->mac.type == ixgbe_mac_X550) 2267 switch (sc->link_speed) { 2268 case IXGBE_LINK_SPEED_5GB_FULL: 2269 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2270 break; 2271 case IXGBE_LINK_SPEED_2_5GB_FULL: 2272 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2273 break; 2274 } 2275 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2276 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2277 switch (sc->link_speed) { 2278 case IXGBE_LINK_SPEED_10GB_FULL: 2279 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2280 break; 2281 } 2282 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2283 switch (sc->link_speed) { 2284 case IXGBE_LINK_SPEED_10GB_FULL: 2285 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2286 break; 2287 case IXGBE_LINK_SPEED_1GB_FULL: 2288 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2289 break; 2290 } 2291 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2292 switch (sc->link_speed) { 2293 case IXGBE_LINK_SPEED_10GB_FULL: 2294 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2295 break; 2296 case IXGBE_LINK_SPEED_1GB_FULL: 2297 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2298 break; 2299 } 2300 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2301 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2302 switch (sc->link_speed) { 2303 case IXGBE_LINK_SPEED_10GB_FULL: 2304 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2305 break; 2306 case IXGBE_LINK_SPEED_1GB_FULL: 2307 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2308 break; 2309 } 2310 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2311 switch (sc->link_speed) { 2312 case IXGBE_LINK_SPEED_10GB_FULL: 2313 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2314 break; 2315 } 2316 /* 2317 * XXX: These need to use the proper media types once 2318 * they're added. 2319 */ 2320 #ifndef IFM_ETH_XTYPE 2321 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2322 switch (sc->link_speed) { 2323 case IXGBE_LINK_SPEED_10GB_FULL: 2324 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2325 break; 2326 case IXGBE_LINK_SPEED_2_5GB_FULL: 2327 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2328 break; 2329 case IXGBE_LINK_SPEED_1GB_FULL: 2330 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2331 break; 2332 } 2333 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2334 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2335 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2336 switch (sc->link_speed) { 2337 case IXGBE_LINK_SPEED_10GB_FULL: 2338 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2339 break; 2340 case IXGBE_LINK_SPEED_2_5GB_FULL: 2341 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2342 break; 2343 case IXGBE_LINK_SPEED_1GB_FULL: 2344 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2345 break; 2346 } 2347 #else 2348 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2349 switch (sc->link_speed) { 2350 case IXGBE_LINK_SPEED_10GB_FULL: 2351 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2352 break; 2353 case IXGBE_LINK_SPEED_2_5GB_FULL: 2354 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2355 break; 2356 case IXGBE_LINK_SPEED_1GB_FULL: 2357 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2358 break; 2359 } 2360 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2361 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2362 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2363 switch (sc->link_speed) { 2364 case IXGBE_LINK_SPEED_10GB_FULL: 2365 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2366 break; 2367 case IXGBE_LINK_SPEED_2_5GB_FULL: 2368 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2369 break; 2370 case IXGBE_LINK_SPEED_1GB_FULL: 2371 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2372 break; 2373 } 2374 #endif 2375 2376 /* If nothing is recognized... */ 2377 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2378 ifmr->ifm_active |= IFM_UNKNOWN; 2379 2380 /* Display current flow control setting used on link */ 2381 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2382 hw->fc.current_mode == ixgbe_fc_full) 2383 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2384 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2385 hw->fc.current_mode == ixgbe_fc_full) 2386 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2387 } /* ixgbe_media_status */ 2388 2389 /************************************************************************ 2390 * ixgbe_media_change - Media Ioctl callback 2391 * 2392 * Called when the user changes speed/duplex using 2393 * media/mediopt option with ifconfig. 2394 ************************************************************************/ 2395 static int 2396 ixgbe_if_media_change(if_ctx_t ctx) 2397 { 2398 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2399 struct ifmedia *ifm = iflib_get_media(ctx); 2400 struct ixgbe_hw *hw = &sc->hw; 2401 ixgbe_link_speed speed = 0; 2402 2403 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2404 2405 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2406 return (EINVAL); 2407 2408 if (hw->phy.media_type == ixgbe_media_type_backplane) 2409 return (EPERM); 2410 2411 /* 2412 * We don't actually need to check against the supported 2413 * media types of the adapter; ifmedia will take care of 2414 * that for us. 2415 */ 2416 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2417 case IFM_AUTO: 2418 case IFM_10G_T: 2419 speed |= IXGBE_LINK_SPEED_100_FULL; 2420 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2421 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2422 break; 2423 case IFM_10G_LRM: 2424 case IFM_10G_LR: 2425 #ifndef IFM_ETH_XTYPE 2426 case IFM_10G_SR: /* KR, too */ 2427 case IFM_10G_CX4: /* KX4 */ 2428 #else 2429 case IFM_10G_KR: 2430 case IFM_10G_KX4: 2431 #endif 2432 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2433 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2434 break; 2435 #ifndef IFM_ETH_XTYPE 2436 case IFM_1000_CX: /* KX */ 2437 #else 2438 case IFM_1000_KX: 2439 #endif 2440 case IFM_1000_LX: 2441 case IFM_1000_SX: 2442 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2443 break; 2444 case IFM_1000_T: 2445 speed |= IXGBE_LINK_SPEED_100_FULL; 2446 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2447 break; 2448 case IFM_10G_TWINAX: 2449 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2450 break; 2451 case IFM_5000_T: 2452 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2453 break; 2454 case IFM_2500_T: 2455 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2456 break; 2457 case IFM_100_TX: 2458 speed |= IXGBE_LINK_SPEED_100_FULL; 2459 break; 2460 case IFM_10_T: 2461 speed |= IXGBE_LINK_SPEED_10_FULL; 2462 break; 2463 default: 2464 goto invalid; 2465 } 2466 2467 hw->mac.autotry_restart = true; 2468 hw->mac.ops.setup_link(hw, speed, true); 2469 sc->advertise = 2470 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 2471 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 2472 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 2473 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 2474 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 2475 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 2476 2477 return (0); 2478 2479 invalid: 2480 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2481 2482 return (EINVAL); 2483 } /* ixgbe_if_media_change */ 2484 2485 /************************************************************************ 2486 * ixgbe_set_promisc 2487 ************************************************************************/ 2488 static int 2489 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2490 { 2491 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2492 struct ifnet *ifp = iflib_get_ifp(ctx); 2493 u32 rctl; 2494 int mcnt = 0; 2495 2496 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 2497 rctl &= (~IXGBE_FCTRL_UPE); 2498 if (ifp->if_flags & IFF_ALLMULTI) 2499 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2500 else { 2501 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2502 } 2503 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2504 rctl &= (~IXGBE_FCTRL_MPE); 2505 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2506 2507 if (ifp->if_flags & IFF_PROMISC) { 2508 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2509 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2510 } else if (ifp->if_flags & IFF_ALLMULTI) { 2511 rctl |= IXGBE_FCTRL_MPE; 2512 rctl &= ~IXGBE_FCTRL_UPE; 2513 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2514 } 2515 return (0); 2516 } /* ixgbe_if_promisc_set */ 2517 2518 /************************************************************************ 2519 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2520 ************************************************************************/ 2521 static int 2522 ixgbe_msix_link(void *arg) 2523 { 2524 struct ixgbe_softc *sc = arg; 2525 struct ixgbe_hw *hw = &sc->hw; 2526 u32 eicr, eicr_mask; 2527 s32 retval; 2528 2529 ++sc->link_irq; 2530 2531 /* Pause other interrupts */ 2532 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2533 2534 /* First get the cause */ 2535 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2536 /* Be sure the queue bits are not cleared */ 2537 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2538 /* Clear interrupt with write */ 2539 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2540 2541 /* Link status change */ 2542 if (eicr & IXGBE_EICR_LSC) { 2543 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2544 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; 2545 } 2546 2547 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 2548 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 2549 (eicr & IXGBE_EICR_FLOW_DIR)) { 2550 /* This is probably overkill :) */ 2551 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) 2552 return (FILTER_HANDLED); 2553 /* Disable the interrupt */ 2554 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2555 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2556 } else 2557 if (eicr & IXGBE_EICR_ECC) { 2558 device_printf(iflib_get_dev(sc->ctx), 2559 "Received ECC Err, initiating reset\n"); 2560 hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2561 ixgbe_reset_hw(hw); 2562 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2563 } 2564 2565 /* Check for over temp condition */ 2566 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2567 switch (sc->hw.mac.type) { 2568 case ixgbe_mac_X550EM_a: 2569 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2570 break; 2571 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2572 IXGBE_EICR_GPI_SDP0_X550EM_a); 2573 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2574 IXGBE_EICR_GPI_SDP0_X550EM_a); 2575 retval = hw->phy.ops.check_overtemp(hw); 2576 if (retval != IXGBE_ERR_OVERTEMP) 2577 break; 2578 device_printf(iflib_get_dev(sc->ctx), 2579 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2580 device_printf(iflib_get_dev(sc->ctx), 2581 "System shutdown required!\n"); 2582 break; 2583 default: 2584 if (!(eicr & IXGBE_EICR_TS)) 2585 break; 2586 retval = hw->phy.ops.check_overtemp(hw); 2587 if (retval != IXGBE_ERR_OVERTEMP) 2588 break; 2589 device_printf(iflib_get_dev(sc->ctx), 2590 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2591 device_printf(iflib_get_dev(sc->ctx), 2592 "System shutdown required!\n"); 2593 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2594 break; 2595 } 2596 } 2597 2598 /* Check for VF message */ 2599 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 2600 (eicr & IXGBE_EICR_MAILBOX)) 2601 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; 2602 } 2603 2604 if (ixgbe_is_sfp(hw)) { 2605 /* Pluggable optics-related interrupt */ 2606 if (hw->mac.type >= ixgbe_mac_X540) 2607 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2608 else 2609 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2610 2611 if (eicr & eicr_mask) { 2612 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2613 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 2614 } 2615 2616 if ((hw->mac.type == ixgbe_mac_82599EB) && 2617 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2618 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2619 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2620 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 2621 } 2622 } 2623 2624 /* Check for fan failure */ 2625 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2626 ixgbe_check_fan_failure(sc, eicr, true); 2627 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2628 } 2629 2630 /* External PHY interrupt */ 2631 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2632 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2633 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2634 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 2635 } 2636 2637 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2638 } /* ixgbe_msix_link */ 2639 2640 /************************************************************************ 2641 * ixgbe_sysctl_interrupt_rate_handler 2642 ************************************************************************/ 2643 static int 2644 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2645 { 2646 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2647 int error; 2648 unsigned int reg, usec, rate; 2649 2650 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); 2651 usec = ((reg & 0x0FF8) >> 3); 2652 if (usec > 0) 2653 rate = 500000 / usec; 2654 else 2655 rate = 0; 2656 error = sysctl_handle_int(oidp, &rate, 0, req); 2657 if (error || !req->newptr) 2658 return error; 2659 reg &= ~0xfff; /* default, no limitation */ 2660 ixgbe_max_interrupt_rate = 0; 2661 if (rate > 0 && rate < 500000) { 2662 if (rate < 1000) 2663 rate = 1000; 2664 ixgbe_max_interrupt_rate = rate; 2665 reg |= ((4000000/rate) & 0xff8); 2666 } 2667 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); 2668 2669 return (0); 2670 } /* ixgbe_sysctl_interrupt_rate_handler */ 2671 2672 /************************************************************************ 2673 * ixgbe_add_device_sysctls 2674 ************************************************************************/ 2675 static void 2676 ixgbe_add_device_sysctls(if_ctx_t ctx) 2677 { 2678 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2679 device_t dev = iflib_get_dev(ctx); 2680 struct ixgbe_hw *hw = &sc->hw; 2681 struct sysctl_oid_list *child; 2682 struct sysctl_ctx_list *ctx_list; 2683 2684 ctx_list = device_get_sysctl_ctx(dev); 2685 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2686 2687 /* Sysctls for all devices */ 2688 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2689 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2690 sc, 0, ixgbe_sysctl_flowcntl, "I", 2691 IXGBE_SYSCTL_DESC_SET_FC); 2692 2693 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2694 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2695 sc, 0, ixgbe_sysctl_advertise, "I", 2696 IXGBE_SYSCTL_DESC_ADV_SPEED); 2697 2698 sc->enable_aim = ixgbe_enable_aim; 2699 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2700 &sc->enable_aim, 0, "Interrupt Moderation"); 2701 2702 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", 2703 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2704 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); 2705 2706 #ifdef IXGBE_DEBUG 2707 /* testing sysctls (for all devices) */ 2708 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2709 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2710 sc, 0, ixgbe_sysctl_power_state, 2711 "I", "PCI Power State"); 2712 2713 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2714 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2715 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2716 #endif 2717 /* for X550 series devices */ 2718 if (hw->mac.type >= ixgbe_mac_X550) 2719 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2720 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2721 sc, 0, ixgbe_sysctl_dmac, 2722 "I", "DMA Coalesce"); 2723 2724 /* for WoL-capable devices */ 2725 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2726 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2727 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2728 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2729 2730 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2731 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2732 sc, 0, ixgbe_sysctl_wufc, 2733 "I", "Enable/Disable Wake Up Filters"); 2734 } 2735 2736 /* for X552/X557-AT devices */ 2737 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2738 struct sysctl_oid *phy_node; 2739 struct sysctl_oid_list *phy_list; 2740 2741 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2742 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); 2743 phy_list = SYSCTL_CHILDREN(phy_node); 2744 2745 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2746 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2747 sc, 0, ixgbe_sysctl_phy_temp, 2748 "I", "Current External PHY Temperature (Celsius)"); 2749 2750 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2751 "overtemp_occurred", 2752 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2753 ixgbe_sysctl_phy_overtemp_occurred, "I", 2754 "External PHY High Temperature Event Occurred"); 2755 } 2756 2757 if (sc->feat_cap & IXGBE_FEATURE_EEE) { 2758 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2759 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2760 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2761 } 2762 } /* ixgbe_add_device_sysctls */ 2763 2764 /************************************************************************ 2765 * ixgbe_allocate_pci_resources 2766 ************************************************************************/ 2767 static int 2768 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2769 { 2770 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2771 device_t dev = iflib_get_dev(ctx); 2772 int rid; 2773 2774 rid = PCIR_BAR(0); 2775 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2776 RF_ACTIVE); 2777 2778 if (!(sc->pci_mem)) { 2779 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2780 return (ENXIO); 2781 } 2782 2783 /* Save bus_space values for READ/WRITE_REG macros */ 2784 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 2785 sc->osdep.mem_bus_space_handle = 2786 rman_get_bushandle(sc->pci_mem); 2787 /* Set hw values for shared code */ 2788 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 2789 2790 return (0); 2791 } /* ixgbe_allocate_pci_resources */ 2792 2793 /************************************************************************ 2794 * ixgbe_detach - Device removal routine 2795 * 2796 * Called when the driver is being removed. 2797 * Stops the adapter and deallocates all the resources 2798 * that were allocated for driver operation. 2799 * 2800 * return 0 on success, positive on failure 2801 ************************************************************************/ 2802 static int 2803 ixgbe_if_detach(if_ctx_t ctx) 2804 { 2805 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2806 device_t dev = iflib_get_dev(ctx); 2807 u32 ctrl_ext; 2808 2809 INIT_DEBUGOUT("ixgbe_detach: begin"); 2810 2811 if (ixgbe_pci_iov_detach(dev) != 0) { 2812 device_printf(dev, "SR-IOV in use; detach first.\n"); 2813 return (EBUSY); 2814 } 2815 2816 ixgbe_setup_low_power_mode(ctx); 2817 2818 /* let hardware know driver is unloading */ 2819 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 2820 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2821 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 2822 2823 ixgbe_free_pci_resources(ctx); 2824 free(sc->mta, M_IXGBE); 2825 2826 return (0); 2827 } /* ixgbe_if_detach */ 2828 2829 /************************************************************************ 2830 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2831 * 2832 * Prepare the adapter/port for LPLU and/or WoL 2833 ************************************************************************/ 2834 static int 2835 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2836 { 2837 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2838 struct ixgbe_hw *hw = &sc->hw; 2839 device_t dev = iflib_get_dev(ctx); 2840 s32 error = 0; 2841 2842 if (!hw->wol_enabled) 2843 ixgbe_set_phy_power(hw, false); 2844 2845 /* Limit power management flow to X550EM baseT */ 2846 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2847 hw->phy.ops.enter_lplu) { 2848 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2849 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2850 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2851 2852 /* 2853 * Clear Wake Up Status register to prevent any previous wakeup 2854 * events from waking us up immediately after we suspend. 2855 */ 2856 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2857 2858 /* 2859 * Program the Wakeup Filter Control register with user filter 2860 * settings 2861 */ 2862 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 2863 2864 /* Enable wakeups and power management in Wakeup Control */ 2865 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2866 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2867 2868 /* X550EM baseT adapters need a special LPLU flow */ 2869 hw->phy.reset_disable = true; 2870 ixgbe_if_stop(ctx); 2871 error = hw->phy.ops.enter_lplu(hw); 2872 if (error) 2873 device_printf(dev, "Error entering LPLU: %d\n", error); 2874 hw->phy.reset_disable = false; 2875 } else { 2876 /* Just stop for other adapters */ 2877 ixgbe_if_stop(ctx); 2878 } 2879 2880 return error; 2881 } /* ixgbe_setup_low_power_mode */ 2882 2883 /************************************************************************ 2884 * ixgbe_shutdown - Shutdown entry point 2885 ************************************************************************/ 2886 static int 2887 ixgbe_if_shutdown(if_ctx_t ctx) 2888 { 2889 int error = 0; 2890 2891 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2892 2893 error = ixgbe_setup_low_power_mode(ctx); 2894 2895 return (error); 2896 } /* ixgbe_if_shutdown */ 2897 2898 /************************************************************************ 2899 * ixgbe_suspend 2900 * 2901 * From D0 to D3 2902 ************************************************************************/ 2903 static int 2904 ixgbe_if_suspend(if_ctx_t ctx) 2905 { 2906 int error = 0; 2907 2908 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2909 2910 error = ixgbe_setup_low_power_mode(ctx); 2911 2912 return (error); 2913 } /* ixgbe_if_suspend */ 2914 2915 /************************************************************************ 2916 * ixgbe_resume 2917 * 2918 * From D3 to D0 2919 ************************************************************************/ 2920 static int 2921 ixgbe_if_resume(if_ctx_t ctx) 2922 { 2923 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2924 device_t dev = iflib_get_dev(ctx); 2925 struct ifnet *ifp = iflib_get_ifp(ctx); 2926 struct ixgbe_hw *hw = &sc->hw; 2927 u32 wus; 2928 2929 INIT_DEBUGOUT("ixgbe_resume: begin"); 2930 2931 /* Read & clear WUS register */ 2932 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2933 if (wus) 2934 device_printf(dev, "Woken up by (WUS): %#010x\n", 2935 IXGBE_READ_REG(hw, IXGBE_WUS)); 2936 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2937 /* And clear WUFC until next low-power transition */ 2938 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2939 2940 /* 2941 * Required after D3->D0 transition; 2942 * will re-advertise all previous advertised speeds 2943 */ 2944 if (ifp->if_flags & IFF_UP) 2945 ixgbe_if_init(ctx); 2946 2947 return (0); 2948 } /* ixgbe_if_resume */ 2949 2950 /************************************************************************ 2951 * ixgbe_if_mtu_set - Ioctl mtu entry point 2952 * 2953 * Return 0 on success, EINVAL on failure 2954 ************************************************************************/ 2955 static int 2956 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2957 { 2958 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2959 int error = 0; 2960 2961 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2962 2963 if (mtu > IXGBE_MAX_MTU) { 2964 error = EINVAL; 2965 } else { 2966 sc->max_frame_size = mtu + IXGBE_MTU_HDR; 2967 } 2968 2969 return error; 2970 } /* ixgbe_if_mtu_set */ 2971 2972 /************************************************************************ 2973 * ixgbe_if_crcstrip_set 2974 ************************************************************************/ 2975 static void 2976 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2977 { 2978 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2979 struct ixgbe_hw *hw = &sc->hw; 2980 /* crc stripping is set in two places: 2981 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2982 * IXGBE_RDRXCTL (set by the original driver in 2983 * ixgbe_setup_hw_rsc() called in init_locked. 2984 * We disable the setting when netmap is compiled in). 2985 * We update the values here, but also in ixgbe.c because 2986 * init_locked sometimes is called outside our control. 2987 */ 2988 uint32_t hl, rxc; 2989 2990 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2991 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2992 #ifdef NETMAP 2993 if (netmap_verbose) 2994 D("%s read HLREG 0x%x rxc 0x%x", 2995 onoff ? "enter" : "exit", hl, rxc); 2996 #endif 2997 /* hw requirements ... */ 2998 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2999 rxc |= IXGBE_RDRXCTL_RSCACKC; 3000 if (onoff && !crcstrip) { 3001 /* keep the crc. Fast rx */ 3002 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 3003 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 3004 } else { 3005 /* reset default mode */ 3006 hl |= IXGBE_HLREG0_RXCRCSTRP; 3007 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 3008 } 3009 #ifdef NETMAP 3010 if (netmap_verbose) 3011 D("%s write HLREG 0x%x rxc 0x%x", 3012 onoff ? "enter" : "exit", hl, rxc); 3013 #endif 3014 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 3015 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 3016 } /* ixgbe_if_crcstrip_set */ 3017 3018 /********************************************************************* 3019 * ixgbe_if_init - Init entry point 3020 * 3021 * Used in two ways: It is used by the stack as an init 3022 * entry point in network interface structure. It is also 3023 * used by the driver as a hw/sw initialization routine to 3024 * get to a consistent state. 3025 * 3026 * Return 0 on success, positive on failure 3027 **********************************************************************/ 3028 void 3029 ixgbe_if_init(if_ctx_t ctx) 3030 { 3031 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3032 struct ifnet *ifp = iflib_get_ifp(ctx); 3033 device_t dev = iflib_get_dev(ctx); 3034 struct ixgbe_hw *hw = &sc->hw; 3035 struct ix_rx_queue *rx_que; 3036 struct ix_tx_queue *tx_que; 3037 u32 txdctl, mhadd; 3038 u32 rxdctl, rxctrl; 3039 u32 ctrl_ext; 3040 3041 int i, j, err; 3042 3043 INIT_DEBUGOUT("ixgbe_if_init: begin"); 3044 3045 /* Queue indices may change with IOV mode */ 3046 ixgbe_align_all_queue_indices(sc); 3047 3048 /* reprogram the RAR[0] in case user changed it. */ 3049 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 3050 3051 /* Get the latest mac address, User can use a LAA */ 3052 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3053 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 3054 hw->addr_ctrl.rar_used_count = 1; 3055 3056 ixgbe_init_hw(hw); 3057 3058 ixgbe_initialize_iov(sc); 3059 3060 ixgbe_initialize_transmit_units(ctx); 3061 3062 /* Setup Multicast table */ 3063 ixgbe_if_multi_set(ctx); 3064 3065 /* Determine the correct mbuf pool, based on frame size */ 3066 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 3067 3068 /* Configure RX settings */ 3069 ixgbe_initialize_receive_units(ctx); 3070 3071 /* 3072 * Initialize variable holding task enqueue requests 3073 * from MSI-X interrupts 3074 */ 3075 sc->task_requests = 0; 3076 3077 /* Enable SDP & MSI-X interrupts based on adapter */ 3078 ixgbe_config_gpie(sc); 3079 3080 /* Set MTU size */ 3081 if (ifp->if_mtu > ETHERMTU) { 3082 /* aka IXGBE_MAXFRS on 82599 and newer */ 3083 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3084 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3085 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3086 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3087 } 3088 3089 /* Now enable all the queues */ 3090 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 3091 struct tx_ring *txr = &tx_que->txr; 3092 3093 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3094 txdctl |= IXGBE_TXDCTL_ENABLE; 3095 /* Set WTHRESH to 8, burst writeback */ 3096 txdctl |= (8 << 16); 3097 /* 3098 * When the internal queue falls below PTHRESH (32), 3099 * start prefetching as long as there are at least 3100 * HTHRESH (1) buffers ready. The values are taken 3101 * from the Intel linux driver 3.8.21. 3102 * Prefetching enables tx line rate even with 1 queue. 3103 */ 3104 txdctl |= (32 << 0) | (1 << 8); 3105 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 3106 } 3107 3108 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 3109 struct rx_ring *rxr = &rx_que->rxr; 3110 3111 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3112 if (hw->mac.type == ixgbe_mac_82598EB) { 3113 /* 3114 * PTHRESH = 21 3115 * HTHRESH = 4 3116 * WTHRESH = 8 3117 */ 3118 rxdctl &= ~0x3FFFFF; 3119 rxdctl |= 0x080420; 3120 } 3121 rxdctl |= IXGBE_RXDCTL_ENABLE; 3122 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 3123 for (j = 0; j < 10; j++) { 3124 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 3125 IXGBE_RXDCTL_ENABLE) 3126 break; 3127 else 3128 msec_delay(1); 3129 } 3130 wmb(); 3131 } 3132 3133 /* Enable Receive engine */ 3134 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3135 if (hw->mac.type == ixgbe_mac_82598EB) 3136 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3137 rxctrl |= IXGBE_RXCTRL_RXEN; 3138 ixgbe_enable_rx_dma(hw, rxctrl); 3139 3140 /* Set up MSI/MSI-X routing */ 3141 if (ixgbe_enable_msix) { 3142 ixgbe_configure_ivars(sc); 3143 /* Set up auto-mask */ 3144 if (hw->mac.type == ixgbe_mac_82598EB) 3145 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3146 else { 3147 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3148 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3149 } 3150 } else { /* Simple settings for Legacy/MSI */ 3151 ixgbe_set_ivar(sc, 0, 0, 0); 3152 ixgbe_set_ivar(sc, 0, 0, 1); 3153 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3154 } 3155 3156 ixgbe_init_fdir(sc); 3157 3158 /* 3159 * Check on any SFP devices that 3160 * need to be kick-started 3161 */ 3162 if (hw->phy.type == ixgbe_phy_none) { 3163 err = hw->phy.ops.identify(hw); 3164 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3165 device_printf(dev, 3166 "Unsupported SFP+ module type was detected.\n"); 3167 return; 3168 } 3169 } 3170 3171 /* Set moderation on the Link interrupt */ 3172 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); 3173 3174 /* Enable power to the phy. */ 3175 ixgbe_set_phy_power(hw, true); 3176 3177 /* Config/Enable Link */ 3178 ixgbe_config_link(ctx); 3179 3180 /* Hardware Packet Buffer & Flow Control setup */ 3181 ixgbe_config_delay_values(sc); 3182 3183 /* Initialize the FC settings */ 3184 ixgbe_start_hw(hw); 3185 3186 /* Set up VLAN support and filter */ 3187 ixgbe_setup_vlan_hw_support(ctx); 3188 3189 /* Setup DMA Coalescing */ 3190 ixgbe_config_dmac(sc); 3191 3192 /* And now turn on interrupts */ 3193 ixgbe_if_enable_intr(ctx); 3194 3195 /* Enable the use of the MBX by the VF's */ 3196 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 3197 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3198 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3199 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3200 } 3201 3202 } /* ixgbe_init_locked */ 3203 3204 /************************************************************************ 3205 * ixgbe_set_ivar 3206 * 3207 * Setup the correct IVAR register for a particular MSI-X interrupt 3208 * (yes this is all very magic and confusing :) 3209 * - entry is the register array entry 3210 * - vector is the MSI-X vector for this queue 3211 * - type is RX/TX/MISC 3212 ************************************************************************/ 3213 static void 3214 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 3215 { 3216 struct ixgbe_hw *hw = &sc->hw; 3217 u32 ivar, index; 3218 3219 vector |= IXGBE_IVAR_ALLOC_VAL; 3220 3221 switch (hw->mac.type) { 3222 case ixgbe_mac_82598EB: 3223 if (type == -1) 3224 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3225 else 3226 entry += (type * 64); 3227 index = (entry >> 2) & 0x1F; 3228 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3229 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3230 ivar |= (vector << (8 * (entry & 0x3))); 3231 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 3232 break; 3233 case ixgbe_mac_82599EB: 3234 case ixgbe_mac_X540: 3235 case ixgbe_mac_X550: 3236 case ixgbe_mac_X550EM_x: 3237 case ixgbe_mac_X550EM_a: 3238 if (type == -1) { /* MISC IVAR */ 3239 index = (entry & 1) * 8; 3240 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3241 ivar &= ~(0xFF << index); 3242 ivar |= (vector << index); 3243 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3244 } else { /* RX/TX IVARS */ 3245 index = (16 * (entry & 1)) + (8 * type); 3246 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3247 ivar &= ~(0xFF << index); 3248 ivar |= (vector << index); 3249 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3250 } 3251 default: 3252 break; 3253 } 3254 } /* ixgbe_set_ivar */ 3255 3256 /************************************************************************ 3257 * ixgbe_configure_ivars 3258 ************************************************************************/ 3259 static void 3260 ixgbe_configure_ivars(struct ixgbe_softc *sc) 3261 { 3262 struct ix_rx_queue *rx_que = sc->rx_queues; 3263 struct ix_tx_queue *tx_que = sc->tx_queues; 3264 u32 newitr; 3265 3266 if (ixgbe_max_interrupt_rate > 0) 3267 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3268 else { 3269 /* 3270 * Disable DMA coalescing if interrupt moderation is 3271 * disabled. 3272 */ 3273 sc->dmac = 0; 3274 newitr = 0; 3275 } 3276 3277 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 3278 struct rx_ring *rxr = &rx_que->rxr; 3279 3280 /* First the RX queue entry */ 3281 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); 3282 3283 /* Set an Initial EITR value */ 3284 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); 3285 } 3286 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 3287 struct tx_ring *txr = &tx_que->txr; 3288 3289 /* ... and the TX */ 3290 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); 3291 } 3292 /* For the Link interrupt */ 3293 ixgbe_set_ivar(sc, 1, sc->vector, -1); 3294 } /* ixgbe_configure_ivars */ 3295 3296 /************************************************************************ 3297 * ixgbe_config_gpie 3298 ************************************************************************/ 3299 static void 3300 ixgbe_config_gpie(struct ixgbe_softc *sc) 3301 { 3302 struct ixgbe_hw *hw = &sc->hw; 3303 u32 gpie; 3304 3305 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3306 3307 if (sc->intr_type == IFLIB_INTR_MSIX) { 3308 /* Enable Enhanced MSI-X mode */ 3309 gpie |= IXGBE_GPIE_MSIX_MODE 3310 | IXGBE_GPIE_EIAME 3311 | IXGBE_GPIE_PBA_SUPPORT 3312 | IXGBE_GPIE_OCD; 3313 } 3314 3315 /* Fan Failure Interrupt */ 3316 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3317 gpie |= IXGBE_SDP1_GPIEN; 3318 3319 /* Thermal Sensor Interrupt */ 3320 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3321 gpie |= IXGBE_SDP0_GPIEN_X540; 3322 3323 /* Link detection */ 3324 switch (hw->mac.type) { 3325 case ixgbe_mac_82599EB: 3326 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3327 break; 3328 case ixgbe_mac_X550EM_x: 3329 case ixgbe_mac_X550EM_a: 3330 gpie |= IXGBE_SDP0_GPIEN_X540; 3331 break; 3332 default: 3333 break; 3334 } 3335 3336 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3337 3338 } /* ixgbe_config_gpie */ 3339 3340 /************************************************************************ 3341 * ixgbe_config_delay_values 3342 * 3343 * Requires sc->max_frame_size to be set. 3344 ************************************************************************/ 3345 static void 3346 ixgbe_config_delay_values(struct ixgbe_softc *sc) 3347 { 3348 struct ixgbe_hw *hw = &sc->hw; 3349 u32 rxpb, frame, size, tmp; 3350 3351 frame = sc->max_frame_size; 3352 3353 /* Calculate High Water */ 3354 switch (hw->mac.type) { 3355 case ixgbe_mac_X540: 3356 case ixgbe_mac_X550: 3357 case ixgbe_mac_X550EM_x: 3358 case ixgbe_mac_X550EM_a: 3359 tmp = IXGBE_DV_X540(frame, frame); 3360 break; 3361 default: 3362 tmp = IXGBE_DV(frame, frame); 3363 break; 3364 } 3365 size = IXGBE_BT2KB(tmp); 3366 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3367 hw->fc.high_water[0] = rxpb - size; 3368 3369 /* Now calculate Low Water */ 3370 switch (hw->mac.type) { 3371 case ixgbe_mac_X540: 3372 case ixgbe_mac_X550: 3373 case ixgbe_mac_X550EM_x: 3374 case ixgbe_mac_X550EM_a: 3375 tmp = IXGBE_LOW_DV_X540(frame); 3376 break; 3377 default: 3378 tmp = IXGBE_LOW_DV(frame); 3379 break; 3380 } 3381 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3382 3383 hw->fc.pause_time = IXGBE_FC_PAUSE; 3384 hw->fc.send_xon = true; 3385 } /* ixgbe_config_delay_values */ 3386 3387 /************************************************************************ 3388 * ixgbe_set_multi - Multicast Update 3389 * 3390 * Called whenever multicast address list is updated. 3391 ************************************************************************/ 3392 static u_int 3393 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) 3394 { 3395 struct ixgbe_softc *sc = arg; 3396 struct ixgbe_mc_addr *mta = sc->mta; 3397 3398 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 3399 return (0); 3400 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3401 mta[idx].vmdq = sc->pool; 3402 3403 return (1); 3404 } /* ixgbe_mc_filter_apply */ 3405 3406 static void 3407 ixgbe_if_multi_set(if_ctx_t ctx) 3408 { 3409 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3410 struct ixgbe_mc_addr *mta; 3411 struct ifnet *ifp = iflib_get_ifp(ctx); 3412 u8 *update_ptr; 3413 u32 fctrl; 3414 u_int mcnt; 3415 3416 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3417 3418 mta = sc->mta; 3419 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3420 3421 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc); 3422 3423 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3424 update_ptr = (u8 *)mta; 3425 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 3426 ixgbe_mc_array_itr, true); 3427 } 3428 3429 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 3430 3431 if (ifp->if_flags & IFF_PROMISC) 3432 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3433 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3434 ifp->if_flags & IFF_ALLMULTI) { 3435 fctrl |= IXGBE_FCTRL_MPE; 3436 fctrl &= ~IXGBE_FCTRL_UPE; 3437 } else 3438 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3439 3440 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 3441 } /* ixgbe_if_multi_set */ 3442 3443 /************************************************************************ 3444 * ixgbe_mc_array_itr 3445 * 3446 * An iterator function needed by the multicast shared code. 3447 * It feeds the shared code routine the addresses in the 3448 * array of ixgbe_set_multi() one by one. 3449 ************************************************************************/ 3450 static u8 * 3451 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3452 { 3453 struct ixgbe_mc_addr *mta; 3454 3455 mta = (struct ixgbe_mc_addr *)*update_ptr; 3456 *vmdq = mta->vmdq; 3457 3458 *update_ptr = (u8*)(mta + 1); 3459 3460 return (mta->addr); 3461 } /* ixgbe_mc_array_itr */ 3462 3463 /************************************************************************ 3464 * ixgbe_local_timer - Timer routine 3465 * 3466 * Checks for link status, updates statistics, 3467 * and runs the watchdog check. 3468 ************************************************************************/ 3469 static void 3470 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3471 { 3472 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3473 3474 if (qid != 0) 3475 return; 3476 3477 /* Check for pluggable optics */ 3478 if (sc->sfp_probe) 3479 if (!ixgbe_sfp_probe(ctx)) 3480 return; /* Nothing to do */ 3481 3482 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3483 3484 /* Fire off the adminq task */ 3485 iflib_admin_intr_deferred(ctx); 3486 3487 } /* ixgbe_if_timer */ 3488 3489 /************************************************************************ 3490 * ixgbe_sfp_probe 3491 * 3492 * Determine if a port had optics inserted. 3493 ************************************************************************/ 3494 static bool 3495 ixgbe_sfp_probe(if_ctx_t ctx) 3496 { 3497 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3498 struct ixgbe_hw *hw = &sc->hw; 3499 device_t dev = iflib_get_dev(ctx); 3500 bool result = false; 3501 3502 if ((hw->phy.type == ixgbe_phy_nl) && 3503 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3504 s32 ret = hw->phy.ops.identify_sfp(hw); 3505 if (ret) 3506 goto out; 3507 ret = hw->phy.ops.reset(hw); 3508 sc->sfp_probe = false; 3509 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3510 device_printf(dev, "Unsupported SFP+ module detected!"); 3511 device_printf(dev, 3512 "Reload driver with supported module.\n"); 3513 goto out; 3514 } else 3515 device_printf(dev, "SFP+ module detected!\n"); 3516 /* We now have supported optics */ 3517 result = true; 3518 } 3519 out: 3520 3521 return (result); 3522 } /* ixgbe_sfp_probe */ 3523 3524 /************************************************************************ 3525 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3526 ************************************************************************/ 3527 static void 3528 ixgbe_handle_mod(void *context) 3529 { 3530 if_ctx_t ctx = context; 3531 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3532 struct ixgbe_hw *hw = &sc->hw; 3533 device_t dev = iflib_get_dev(ctx); 3534 u32 err, cage_full = 0; 3535 3536 if (sc->hw.need_crosstalk_fix) { 3537 switch (hw->mac.type) { 3538 case ixgbe_mac_82599EB: 3539 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3540 IXGBE_ESDP_SDP2; 3541 break; 3542 case ixgbe_mac_X550EM_x: 3543 case ixgbe_mac_X550EM_a: 3544 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3545 IXGBE_ESDP_SDP0; 3546 break; 3547 default: 3548 break; 3549 } 3550 3551 if (!cage_full) 3552 goto handle_mod_out; 3553 } 3554 3555 err = hw->phy.ops.identify_sfp(hw); 3556 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3557 device_printf(dev, 3558 "Unsupported SFP+ module type was detected.\n"); 3559 goto handle_mod_out; 3560 } 3561 3562 if (hw->mac.type == ixgbe_mac_82598EB) 3563 err = hw->phy.ops.reset(hw); 3564 else 3565 err = hw->mac.ops.setup_sfp(hw); 3566 3567 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3568 device_printf(dev, 3569 "Setup failure - unsupported SFP+ module type.\n"); 3570 goto handle_mod_out; 3571 } 3572 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3573 return; 3574 3575 handle_mod_out: 3576 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3577 } /* ixgbe_handle_mod */ 3578 3579 3580 /************************************************************************ 3581 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3582 ************************************************************************/ 3583 static void 3584 ixgbe_handle_msf(void *context) 3585 { 3586 if_ctx_t ctx = context; 3587 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3588 struct ixgbe_hw *hw = &sc->hw; 3589 u32 autoneg; 3590 bool negotiate; 3591 3592 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3593 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 3594 3595 autoneg = hw->phy.autoneg_advertised; 3596 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3597 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3598 if (hw->mac.ops.setup_link) 3599 hw->mac.ops.setup_link(hw, autoneg, true); 3600 3601 /* Adjust media types shown in ifconfig */ 3602 ifmedia_removeall(sc->media); 3603 ixgbe_add_media_types(sc->ctx); 3604 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 3605 } /* ixgbe_handle_msf */ 3606 3607 /************************************************************************ 3608 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3609 ************************************************************************/ 3610 static void 3611 ixgbe_handle_phy(void *context) 3612 { 3613 if_ctx_t ctx = context; 3614 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3615 struct ixgbe_hw *hw = &sc->hw; 3616 int error; 3617 3618 error = hw->phy.ops.handle_lasi(hw); 3619 if (error == IXGBE_ERR_OVERTEMP) 3620 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3621 else if (error) 3622 device_printf(sc->dev, 3623 "Error handling LASI interrupt: %d\n", error); 3624 } /* ixgbe_handle_phy */ 3625 3626 /************************************************************************ 3627 * ixgbe_if_stop - Stop the hardware 3628 * 3629 * Disables all traffic on the adapter by issuing a 3630 * global reset on the MAC and deallocates TX/RX buffers. 3631 ************************************************************************/ 3632 static void 3633 ixgbe_if_stop(if_ctx_t ctx) 3634 { 3635 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3636 struct ixgbe_hw *hw = &sc->hw; 3637 3638 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3639 3640 ixgbe_reset_hw(hw); 3641 hw->adapter_stopped = false; 3642 ixgbe_stop_adapter(hw); 3643 if (hw->mac.type == ixgbe_mac_82599EB) 3644 ixgbe_stop_mac_link_on_d3_82599(hw); 3645 /* Turn off the laser - noop with no optics */ 3646 ixgbe_disable_tx_laser(hw); 3647 3648 /* Update the stack */ 3649 sc->link_up = false; 3650 ixgbe_if_update_admin_status(ctx); 3651 3652 /* reprogram the RAR[0] in case user changed it. */ 3653 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 3654 3655 return; 3656 } /* ixgbe_if_stop */ 3657 3658 /************************************************************************ 3659 * ixgbe_update_link_status - Update OS on link state 3660 * 3661 * Note: Only updates the OS on the cached link state. 3662 * The real check of the hardware only happens with 3663 * a link interrupt. 3664 ************************************************************************/ 3665 static void 3666 ixgbe_if_update_admin_status(if_ctx_t ctx) 3667 { 3668 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3669 device_t dev = iflib_get_dev(ctx); 3670 3671 if (sc->link_up) { 3672 if (sc->link_active == false) { 3673 if (bootverbose) 3674 device_printf(dev, "Link is up %d Gbps %s \n", 3675 ((sc->link_speed == 128) ? 10 : 1), 3676 "Full Duplex"); 3677 sc->link_active = true; 3678 /* Update any Flow Control changes */ 3679 ixgbe_fc_enable(&sc->hw); 3680 /* Update DMA coalescing config */ 3681 ixgbe_config_dmac(sc); 3682 /* should actually be negotiated value */ 3683 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3684 3685 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3686 ixgbe_ping_all_vfs(sc); 3687 } 3688 } else { /* Link down */ 3689 if (sc->link_active == true) { 3690 if (bootverbose) 3691 device_printf(dev, "Link is Down\n"); 3692 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3693 sc->link_active = false; 3694 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3695 ixgbe_ping_all_vfs(sc); 3696 } 3697 } 3698 3699 /* Handle task requests from msix_link() */ 3700 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) 3701 ixgbe_handle_mod(ctx); 3702 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) 3703 ixgbe_handle_msf(ctx); 3704 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) 3705 ixgbe_handle_mbx(ctx); 3706 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) 3707 ixgbe_reinit_fdir(ctx); 3708 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) 3709 ixgbe_handle_phy(ctx); 3710 sc->task_requests = 0; 3711 3712 ixgbe_update_stats_counters(sc); 3713 } /* ixgbe_if_update_admin_status */ 3714 3715 /************************************************************************ 3716 * ixgbe_config_dmac - Configure DMA Coalescing 3717 ************************************************************************/ 3718 static void 3719 ixgbe_config_dmac(struct ixgbe_softc *sc) 3720 { 3721 struct ixgbe_hw *hw = &sc->hw; 3722 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3723 3724 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3725 return; 3726 3727 if (dcfg->watchdog_timer ^ sc->dmac || 3728 dcfg->link_speed ^ sc->link_speed) { 3729 dcfg->watchdog_timer = sc->dmac; 3730 dcfg->fcoe_en = false; 3731 dcfg->link_speed = sc->link_speed; 3732 dcfg->num_tcs = 1; 3733 3734 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3735 dcfg->watchdog_timer, dcfg->link_speed); 3736 3737 hw->mac.ops.dmac_config(hw); 3738 } 3739 } /* ixgbe_config_dmac */ 3740 3741 /************************************************************************ 3742 * ixgbe_if_enable_intr 3743 ************************************************************************/ 3744 void 3745 ixgbe_if_enable_intr(if_ctx_t ctx) 3746 { 3747 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3748 struct ixgbe_hw *hw = &sc->hw; 3749 struct ix_rx_queue *que = sc->rx_queues; 3750 u32 mask, fwsm; 3751 3752 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3753 3754 switch (sc->hw.mac.type) { 3755 case ixgbe_mac_82599EB: 3756 mask |= IXGBE_EIMS_ECC; 3757 /* Temperature sensor on some scs */ 3758 mask |= IXGBE_EIMS_GPI_SDP0; 3759 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3760 mask |= IXGBE_EIMS_GPI_SDP1; 3761 mask |= IXGBE_EIMS_GPI_SDP2; 3762 break; 3763 case ixgbe_mac_X540: 3764 /* Detect if Thermal Sensor is enabled */ 3765 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3766 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3767 mask |= IXGBE_EIMS_TS; 3768 mask |= IXGBE_EIMS_ECC; 3769 break; 3770 case ixgbe_mac_X550: 3771 /* MAC thermal sensor is automatically enabled */ 3772 mask |= IXGBE_EIMS_TS; 3773 mask |= IXGBE_EIMS_ECC; 3774 break; 3775 case ixgbe_mac_X550EM_x: 3776 case ixgbe_mac_X550EM_a: 3777 /* Some devices use SDP0 for important information */ 3778 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3779 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3780 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3781 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3782 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3783 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3784 mask |= IXGBE_EICR_GPI_SDP0_X540; 3785 mask |= IXGBE_EIMS_ECC; 3786 break; 3787 default: 3788 break; 3789 } 3790 3791 /* Enable Fan Failure detection */ 3792 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3793 mask |= IXGBE_EIMS_GPI_SDP1; 3794 /* Enable SR-IOV */ 3795 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3796 mask |= IXGBE_EIMS_MAILBOX; 3797 /* Enable Flow Director */ 3798 if (sc->feat_en & IXGBE_FEATURE_FDIR) 3799 mask |= IXGBE_EIMS_FLOW_DIR; 3800 3801 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3802 3803 /* With MSI-X we use auto clear */ 3804 if (sc->intr_type == IFLIB_INTR_MSIX) { 3805 mask = IXGBE_EIMS_ENABLE_MASK; 3806 /* Don't autoclear Link */ 3807 mask &= ~IXGBE_EIMS_OTHER; 3808 mask &= ~IXGBE_EIMS_LSC; 3809 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 3810 mask &= ~IXGBE_EIMS_MAILBOX; 3811 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3812 } 3813 3814 /* 3815 * Now enable all queues, this is done separately to 3816 * allow for handling the extended (beyond 32) MSI-X 3817 * vectors that can be used by 82599 3818 */ 3819 for (int i = 0; i < sc->num_rx_queues; i++, que++) 3820 ixgbe_enable_queue(sc, que->msix); 3821 3822 IXGBE_WRITE_FLUSH(hw); 3823 3824 } /* ixgbe_if_enable_intr */ 3825 3826 /************************************************************************ 3827 * ixgbe_disable_intr 3828 ************************************************************************/ 3829 static void 3830 ixgbe_if_disable_intr(if_ctx_t ctx) 3831 { 3832 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3833 3834 if (sc->intr_type == IFLIB_INTR_MSIX) 3835 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 3836 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 3837 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 3838 } else { 3839 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 3840 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 3841 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 3842 } 3843 IXGBE_WRITE_FLUSH(&sc->hw); 3844 3845 } /* ixgbe_if_disable_intr */ 3846 3847 /************************************************************************ 3848 * ixgbe_link_intr_enable 3849 ************************************************************************/ 3850 static void 3851 ixgbe_link_intr_enable(if_ctx_t ctx) 3852 { 3853 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; 3854 3855 /* Re-enable other interrupts */ 3856 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3857 } /* ixgbe_link_intr_enable */ 3858 3859 /************************************************************************ 3860 * ixgbe_if_rx_queue_intr_enable 3861 ************************************************************************/ 3862 static int 3863 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3864 { 3865 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3866 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 3867 3868 ixgbe_enable_queue(sc, que->msix); 3869 3870 return (0); 3871 } /* ixgbe_if_rx_queue_intr_enable */ 3872 3873 /************************************************************************ 3874 * ixgbe_enable_queue 3875 ************************************************************************/ 3876 static void 3877 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 3878 { 3879 struct ixgbe_hw *hw = &sc->hw; 3880 u64 queue = 1ULL << vector; 3881 u32 mask; 3882 3883 if (hw->mac.type == ixgbe_mac_82598EB) { 3884 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3885 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3886 } else { 3887 mask = (queue & 0xFFFFFFFF); 3888 if (mask) 3889 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3890 mask = (queue >> 32); 3891 if (mask) 3892 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3893 } 3894 } /* ixgbe_enable_queue */ 3895 3896 /************************************************************************ 3897 * ixgbe_disable_queue 3898 ************************************************************************/ 3899 static void 3900 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 3901 { 3902 struct ixgbe_hw *hw = &sc->hw; 3903 u64 queue = 1ULL << vector; 3904 u32 mask; 3905 3906 if (hw->mac.type == ixgbe_mac_82598EB) { 3907 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3908 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3909 } else { 3910 mask = (queue & 0xFFFFFFFF); 3911 if (mask) 3912 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3913 mask = (queue >> 32); 3914 if (mask) 3915 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3916 } 3917 } /* ixgbe_disable_queue */ 3918 3919 /************************************************************************ 3920 * ixgbe_intr - Legacy Interrupt Service Routine 3921 ************************************************************************/ 3922 int 3923 ixgbe_intr(void *arg) 3924 { 3925 struct ixgbe_softc *sc = arg; 3926 struct ix_rx_queue *que = sc->rx_queues; 3927 struct ixgbe_hw *hw = &sc->hw; 3928 if_ctx_t ctx = sc->ctx; 3929 u32 eicr, eicr_mask; 3930 3931 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3932 3933 ++que->irqs; 3934 if (eicr == 0) { 3935 ixgbe_if_enable_intr(ctx); 3936 return (FILTER_HANDLED); 3937 } 3938 3939 /* Check for fan failure */ 3940 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3941 (eicr & IXGBE_EICR_GPI_SDP1)) { 3942 device_printf(sc->dev, 3943 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3944 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3945 } 3946 3947 /* Link status change */ 3948 if (eicr & IXGBE_EICR_LSC) { 3949 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3950 iflib_admin_intr_deferred(ctx); 3951 } 3952 3953 if (ixgbe_is_sfp(hw)) { 3954 /* Pluggable optics-related interrupt */ 3955 if (hw->mac.type >= ixgbe_mac_X540) 3956 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3957 else 3958 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3959 3960 if (eicr & eicr_mask) { 3961 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3962 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 3963 } 3964 3965 if ((hw->mac.type == ixgbe_mac_82599EB) && 3966 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3967 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3968 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3969 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3970 } 3971 } 3972 3973 /* External PHY interrupt */ 3974 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3975 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3976 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 3977 3978 return (FILTER_SCHEDULE_THREAD); 3979 } /* ixgbe_intr */ 3980 3981 /************************************************************************ 3982 * ixgbe_free_pci_resources 3983 ************************************************************************/ 3984 static void 3985 ixgbe_free_pci_resources(if_ctx_t ctx) 3986 { 3987 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3988 struct ix_rx_queue *que = sc->rx_queues; 3989 device_t dev = iflib_get_dev(ctx); 3990 3991 /* Release all MSI-X queue resources */ 3992 if (sc->intr_type == IFLIB_INTR_MSIX) 3993 iflib_irq_free(ctx, &sc->irq); 3994 3995 if (que != NULL) { 3996 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 3997 iflib_irq_free(ctx, &que->que_irq); 3998 } 3999 } 4000 4001 if (sc->pci_mem != NULL) 4002 bus_release_resource(dev, SYS_RES_MEMORY, 4003 rman_get_rid(sc->pci_mem), sc->pci_mem); 4004 } /* ixgbe_free_pci_resources */ 4005 4006 /************************************************************************ 4007 * ixgbe_sysctl_flowcntl 4008 * 4009 * SYSCTL wrapper around setting Flow Control 4010 ************************************************************************/ 4011 static int 4012 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 4013 { 4014 struct ixgbe_softc *sc; 4015 int error, fc; 4016 4017 sc = (struct ixgbe_softc *)arg1; 4018 fc = sc->hw.fc.current_mode; 4019 4020 error = sysctl_handle_int(oidp, &fc, 0, req); 4021 if ((error) || (req->newptr == NULL)) 4022 return (error); 4023 4024 /* Don't bother if it's not changed */ 4025 if (fc == sc->hw.fc.current_mode) 4026 return (0); 4027 4028 return ixgbe_set_flowcntl(sc, fc); 4029 } /* ixgbe_sysctl_flowcntl */ 4030 4031 /************************************************************************ 4032 * ixgbe_set_flowcntl - Set flow control 4033 * 4034 * Flow control values: 4035 * 0 - off 4036 * 1 - rx pause 4037 * 2 - tx pause 4038 * 3 - full 4039 ************************************************************************/ 4040 static int 4041 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 4042 { 4043 switch (fc) { 4044 case ixgbe_fc_rx_pause: 4045 case ixgbe_fc_tx_pause: 4046 case ixgbe_fc_full: 4047 sc->hw.fc.requested_mode = fc; 4048 if (sc->num_rx_queues > 1) 4049 ixgbe_disable_rx_drop(sc); 4050 break; 4051 case ixgbe_fc_none: 4052 sc->hw.fc.requested_mode = ixgbe_fc_none; 4053 if (sc->num_rx_queues > 1) 4054 ixgbe_enable_rx_drop(sc); 4055 break; 4056 default: 4057 return (EINVAL); 4058 } 4059 4060 /* Don't autoneg if forcing a value */ 4061 sc->hw.fc.disable_fc_autoneg = true; 4062 ixgbe_fc_enable(&sc->hw); 4063 4064 return (0); 4065 } /* ixgbe_set_flowcntl */ 4066 4067 /************************************************************************ 4068 * ixgbe_enable_rx_drop 4069 * 4070 * Enable the hardware to drop packets when the buffer is 4071 * full. This is useful with multiqueue, so that no single 4072 * queue being full stalls the entire RX engine. We only 4073 * enable this when Multiqueue is enabled AND Flow Control 4074 * is disabled. 4075 ************************************************************************/ 4076 static void 4077 ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 4078 { 4079 struct ixgbe_hw *hw = &sc->hw; 4080 struct rx_ring *rxr; 4081 u32 srrctl; 4082 4083 for (int i = 0; i < sc->num_rx_queues; i++) { 4084 rxr = &sc->rx_queues[i].rxr; 4085 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4086 srrctl |= IXGBE_SRRCTL_DROP_EN; 4087 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4088 } 4089 4090 /* enable drop for each vf */ 4091 for (int i = 0; i < sc->num_vfs; i++) { 4092 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4093 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4094 IXGBE_QDE_ENABLE)); 4095 } 4096 } /* ixgbe_enable_rx_drop */ 4097 4098 /************************************************************************ 4099 * ixgbe_disable_rx_drop 4100 ************************************************************************/ 4101 static void 4102 ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 4103 { 4104 struct ixgbe_hw *hw = &sc->hw; 4105 struct rx_ring *rxr; 4106 u32 srrctl; 4107 4108 for (int i = 0; i < sc->num_rx_queues; i++) { 4109 rxr = &sc->rx_queues[i].rxr; 4110 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4111 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4112 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4113 } 4114 4115 /* disable drop for each vf */ 4116 for (int i = 0; i < sc->num_vfs; i++) { 4117 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4118 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4119 } 4120 } /* ixgbe_disable_rx_drop */ 4121 4122 /************************************************************************ 4123 * ixgbe_sysctl_advertise 4124 * 4125 * SYSCTL wrapper around setting advertised speed 4126 ************************************************************************/ 4127 static int 4128 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4129 { 4130 struct ixgbe_softc *sc; 4131 int error, advertise; 4132 4133 sc = (struct ixgbe_softc *)arg1; 4134 advertise = sc->advertise; 4135 4136 error = sysctl_handle_int(oidp, &advertise, 0, req); 4137 if ((error) || (req->newptr == NULL)) 4138 return (error); 4139 4140 return ixgbe_set_advertise(sc, advertise); 4141 } /* ixgbe_sysctl_advertise */ 4142 4143 /************************************************************************ 4144 * ixgbe_set_advertise - Control advertised link speed 4145 * 4146 * Flags: 4147 * 0x1 - advertise 100 Mb 4148 * 0x2 - advertise 1G 4149 * 0x4 - advertise 10G 4150 * 0x8 - advertise 10 Mb (yes, Mb) 4151 * 0x10 - advertise 2.5G (disabled by default) 4152 * 0x20 - advertise 5G (disabled by default) 4153 * 4154 ************************************************************************/ 4155 static int 4156 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 4157 { 4158 device_t dev = iflib_get_dev(sc->ctx); 4159 struct ixgbe_hw *hw; 4160 ixgbe_link_speed speed = 0; 4161 ixgbe_link_speed link_caps = 0; 4162 s32 err = IXGBE_NOT_IMPLEMENTED; 4163 bool negotiate = false; 4164 4165 /* Checks to validate new value */ 4166 if (sc->advertise == advertise) /* no change */ 4167 return (0); 4168 4169 hw = &sc->hw; 4170 4171 /* No speed changes for backplane media */ 4172 if (hw->phy.media_type == ixgbe_media_type_backplane) 4173 return (ENODEV); 4174 4175 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4176 (hw->phy.multispeed_fiber))) { 4177 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4178 return (EINVAL); 4179 } 4180 4181 if (advertise < 0x1 || advertise > 0x3F) { 4182 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n"); 4183 return (EINVAL); 4184 } 4185 4186 if (hw->mac.ops.get_link_capabilities) { 4187 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4188 &negotiate); 4189 if (err != IXGBE_SUCCESS) { 4190 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4191 return (ENODEV); 4192 } 4193 } 4194 4195 /* Set new value and report new advertised mode */ 4196 if (advertise & 0x1) { 4197 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4198 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4199 return (EINVAL); 4200 } 4201 speed |= IXGBE_LINK_SPEED_100_FULL; 4202 } 4203 if (advertise & 0x2) { 4204 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4205 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4206 return (EINVAL); 4207 } 4208 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4209 } 4210 if (advertise & 0x4) { 4211 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4212 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4213 return (EINVAL); 4214 } 4215 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4216 } 4217 if (advertise & 0x8) { 4218 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4219 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4220 return (EINVAL); 4221 } 4222 speed |= IXGBE_LINK_SPEED_10_FULL; 4223 } 4224 if (advertise & 0x10) { 4225 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 4226 device_printf(dev, "Interface does not support 2.5G advertised speed\n"); 4227 return (EINVAL); 4228 } 4229 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 4230 } 4231 if (advertise & 0x20) { 4232 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 4233 device_printf(dev, "Interface does not support 5G advertised speed\n"); 4234 return (EINVAL); 4235 } 4236 speed |= IXGBE_LINK_SPEED_5GB_FULL; 4237 } 4238 4239 hw->mac.autotry_restart = true; 4240 hw->mac.ops.setup_link(hw, speed, true); 4241 sc->advertise = advertise; 4242 4243 return (0); 4244 } /* ixgbe_set_advertise */ 4245 4246 /************************************************************************ 4247 * ixgbe_get_default_advertise - Get default advertised speed settings 4248 * 4249 * Formatted for sysctl usage. 4250 * Flags: 4251 * 0x1 - advertise 100 Mb 4252 * 0x2 - advertise 1G 4253 * 0x4 - advertise 10G 4254 * 0x8 - advertise 10 Mb (yes, Mb) 4255 * 0x10 - advertise 2.5G (disabled by default) 4256 * 0x20 - advertise 5G (disabled by default) 4257 ************************************************************************/ 4258 static int 4259 ixgbe_get_default_advertise(struct ixgbe_softc *sc) 4260 { 4261 struct ixgbe_hw *hw = &sc->hw; 4262 int speed; 4263 ixgbe_link_speed link_caps = 0; 4264 s32 err; 4265 bool negotiate = false; 4266 4267 /* 4268 * Advertised speed means nothing unless it's copper or 4269 * multi-speed fiber 4270 */ 4271 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4272 !(hw->phy.multispeed_fiber)) 4273 return (0); 4274 4275 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4276 if (err != IXGBE_SUCCESS) 4277 return (0); 4278 4279 if (hw->mac.type == ixgbe_mac_X550) { 4280 /* 4281 * 2.5G and 5G autonegotiation speeds on X550 4282 * are disabled by default due to reported 4283 * interoperability issues with some switches. 4284 */ 4285 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 4286 IXGBE_LINK_SPEED_5GB_FULL); 4287 } 4288 4289 speed = 4290 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 4291 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 4292 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 4293 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 4294 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 4295 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 4296 4297 return speed; 4298 } /* ixgbe_get_default_advertise */ 4299 4300 /************************************************************************ 4301 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4302 * 4303 * Control values: 4304 * 0/1 - off / on (use default value of 1000) 4305 * 4306 * Legal timer values are: 4307 * 50,100,250,500,1000,2000,5000,10000 4308 * 4309 * Turning off interrupt moderation will also turn this off. 4310 ************************************************************************/ 4311 static int 4312 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4313 { 4314 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4315 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4316 int error; 4317 u16 newval; 4318 4319 newval = sc->dmac; 4320 error = sysctl_handle_16(oidp, &newval, 0, req); 4321 if ((error) || (req->newptr == NULL)) 4322 return (error); 4323 4324 switch (newval) { 4325 case 0: 4326 /* Disabled */ 4327 sc->dmac = 0; 4328 break; 4329 case 1: 4330 /* Enable and use default */ 4331 sc->dmac = 1000; 4332 break; 4333 case 50: 4334 case 100: 4335 case 250: 4336 case 500: 4337 case 1000: 4338 case 2000: 4339 case 5000: 4340 case 10000: 4341 /* Legal values - allow */ 4342 sc->dmac = newval; 4343 break; 4344 default: 4345 /* Do nothing, illegal value */ 4346 return (EINVAL); 4347 } 4348 4349 /* Re-initialize hardware if it's already running */ 4350 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4351 ifp->if_init(ifp); 4352 4353 return (0); 4354 } /* ixgbe_sysctl_dmac */ 4355 4356 #ifdef IXGBE_DEBUG 4357 /************************************************************************ 4358 * ixgbe_sysctl_power_state 4359 * 4360 * Sysctl to test power states 4361 * Values: 4362 * 0 - set device to D0 4363 * 3 - set device to D3 4364 * (none) - get current device power state 4365 ************************************************************************/ 4366 static int 4367 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4368 { 4369 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4370 device_t dev = sc->dev; 4371 int curr_ps, new_ps, error = 0; 4372 4373 curr_ps = new_ps = pci_get_powerstate(dev); 4374 4375 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4376 if ((error) || (req->newptr == NULL)) 4377 return (error); 4378 4379 if (new_ps == curr_ps) 4380 return (0); 4381 4382 if (new_ps == 3 && curr_ps == 0) 4383 error = DEVICE_SUSPEND(dev); 4384 else if (new_ps == 0 && curr_ps == 3) 4385 error = DEVICE_RESUME(dev); 4386 else 4387 return (EINVAL); 4388 4389 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4390 4391 return (error); 4392 } /* ixgbe_sysctl_power_state */ 4393 #endif 4394 4395 /************************************************************************ 4396 * ixgbe_sysctl_wol_enable 4397 * 4398 * Sysctl to enable/disable the WoL capability, 4399 * if supported by the adapter. 4400 * 4401 * Values: 4402 * 0 - disabled 4403 * 1 - enabled 4404 ************************************************************************/ 4405 static int 4406 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4407 { 4408 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4409 struct ixgbe_hw *hw = &sc->hw; 4410 int new_wol_enabled; 4411 int error = 0; 4412 4413 new_wol_enabled = hw->wol_enabled; 4414 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4415 if ((error) || (req->newptr == NULL)) 4416 return (error); 4417 new_wol_enabled = !!(new_wol_enabled); 4418 if (new_wol_enabled == hw->wol_enabled) 4419 return (0); 4420 4421 if (new_wol_enabled > 0 && !sc->wol_support) 4422 return (ENODEV); 4423 else 4424 hw->wol_enabled = new_wol_enabled; 4425 4426 return (0); 4427 } /* ixgbe_sysctl_wol_enable */ 4428 4429 /************************************************************************ 4430 * ixgbe_sysctl_wufc - Wake Up Filter Control 4431 * 4432 * Sysctl to enable/disable the types of packets that the 4433 * adapter will wake up on upon receipt. 4434 * Flags: 4435 * 0x1 - Link Status Change 4436 * 0x2 - Magic Packet 4437 * 0x4 - Direct Exact 4438 * 0x8 - Directed Multicast 4439 * 0x10 - Broadcast 4440 * 0x20 - ARP/IPv4 Request Packet 4441 * 0x40 - Direct IPv4 Packet 4442 * 0x80 - Direct IPv6 Packet 4443 * 4444 * Settings not listed above will cause the sysctl to return an error. 4445 ************************************************************************/ 4446 static int 4447 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4448 { 4449 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4450 int error = 0; 4451 u32 new_wufc; 4452 4453 new_wufc = sc->wufc; 4454 4455 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4456 if ((error) || (req->newptr == NULL)) 4457 return (error); 4458 if (new_wufc == sc->wufc) 4459 return (0); 4460 4461 if (new_wufc & 0xffffff00) 4462 return (EINVAL); 4463 4464 new_wufc &= 0xff; 4465 new_wufc |= (0xffffff & sc->wufc); 4466 sc->wufc = new_wufc; 4467 4468 return (0); 4469 } /* ixgbe_sysctl_wufc */ 4470 4471 #ifdef IXGBE_DEBUG 4472 /************************************************************************ 4473 * ixgbe_sysctl_print_rss_config 4474 ************************************************************************/ 4475 static int 4476 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4477 { 4478 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4479 struct ixgbe_hw *hw = &sc->hw; 4480 device_t dev = sc->dev; 4481 struct sbuf *buf; 4482 int error = 0, reta_size; 4483 u32 reg; 4484 4485 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4486 if (!buf) { 4487 device_printf(dev, "Could not allocate sbuf for output.\n"); 4488 return (ENOMEM); 4489 } 4490 4491 // TODO: use sbufs to make a string to print out 4492 /* Set multiplier for RETA setup and table size based on MAC */ 4493 switch (sc->hw.mac.type) { 4494 case ixgbe_mac_X550: 4495 case ixgbe_mac_X550EM_x: 4496 case ixgbe_mac_X550EM_a: 4497 reta_size = 128; 4498 break; 4499 default: 4500 reta_size = 32; 4501 break; 4502 } 4503 4504 /* Print out the redirection table */ 4505 sbuf_cat(buf, "\n"); 4506 for (int i = 0; i < reta_size; i++) { 4507 if (i < 32) { 4508 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4509 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4510 } else { 4511 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4512 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4513 } 4514 } 4515 4516 // TODO: print more config 4517 4518 error = sbuf_finish(buf); 4519 if (error) 4520 device_printf(dev, "Error finishing sbuf: %d\n", error); 4521 4522 sbuf_delete(buf); 4523 4524 return (0); 4525 } /* ixgbe_sysctl_print_rss_config */ 4526 #endif /* IXGBE_DEBUG */ 4527 4528 /************************************************************************ 4529 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4530 * 4531 * For X552/X557-AT devices using an external PHY 4532 ************************************************************************/ 4533 static int 4534 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4535 { 4536 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4537 struct ixgbe_hw *hw = &sc->hw; 4538 u16 reg; 4539 4540 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4541 device_printf(iflib_get_dev(sc->ctx), 4542 "Device has no supported external thermal sensor.\n"); 4543 return (ENODEV); 4544 } 4545 4546 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4547 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4548 device_printf(iflib_get_dev(sc->ctx), 4549 "Error reading from PHY's current temperature register\n"); 4550 return (EAGAIN); 4551 } 4552 4553 /* Shift temp for output */ 4554 reg = reg >> 8; 4555 4556 return (sysctl_handle_16(oidp, NULL, reg, req)); 4557 } /* ixgbe_sysctl_phy_temp */ 4558 4559 /************************************************************************ 4560 * ixgbe_sysctl_phy_overtemp_occurred 4561 * 4562 * Reports (directly from the PHY) whether the current PHY 4563 * temperature is over the overtemp threshold. 4564 ************************************************************************/ 4565 static int 4566 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4567 { 4568 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4569 struct ixgbe_hw *hw = &sc->hw; 4570 u16 reg; 4571 4572 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4573 device_printf(iflib_get_dev(sc->ctx), 4574 "Device has no supported external thermal sensor.\n"); 4575 return (ENODEV); 4576 } 4577 4578 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4579 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4580 device_printf(iflib_get_dev(sc->ctx), 4581 "Error reading from PHY's temperature status register\n"); 4582 return (EAGAIN); 4583 } 4584 4585 /* Get occurrence bit */ 4586 reg = !!(reg & 0x4000); 4587 4588 return (sysctl_handle_16(oidp, 0, reg, req)); 4589 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4590 4591 /************************************************************************ 4592 * ixgbe_sysctl_eee_state 4593 * 4594 * Sysctl to set EEE power saving feature 4595 * Values: 4596 * 0 - disable EEE 4597 * 1 - enable EEE 4598 * (none) - get current device EEE state 4599 ************************************************************************/ 4600 static int 4601 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4602 { 4603 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4604 device_t dev = sc->dev; 4605 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4606 int curr_eee, new_eee, error = 0; 4607 s32 retval; 4608 4609 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 4610 4611 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4612 if ((error) || (req->newptr == NULL)) 4613 return (error); 4614 4615 /* Nothing to do */ 4616 if (new_eee == curr_eee) 4617 return (0); 4618 4619 /* Not supported */ 4620 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 4621 return (EINVAL); 4622 4623 /* Bounds checking */ 4624 if ((new_eee < 0) || (new_eee > 1)) 4625 return (EINVAL); 4626 4627 retval = ixgbe_setup_eee(&sc->hw, new_eee); 4628 if (retval) { 4629 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4630 return (EINVAL); 4631 } 4632 4633 /* Restart auto-neg */ 4634 ifp->if_init(ifp); 4635 4636 device_printf(dev, "New EEE state: %d\n", new_eee); 4637 4638 /* Cache new value */ 4639 if (new_eee) 4640 sc->feat_en |= IXGBE_FEATURE_EEE; 4641 else 4642 sc->feat_en &= ~IXGBE_FEATURE_EEE; 4643 4644 return (error); 4645 } /* ixgbe_sysctl_eee_state */ 4646 4647 /************************************************************************ 4648 * ixgbe_init_device_features 4649 ************************************************************************/ 4650 static void 4651 ixgbe_init_device_features(struct ixgbe_softc *sc) 4652 { 4653 sc->feat_cap = IXGBE_FEATURE_NETMAP 4654 | IXGBE_FEATURE_RSS 4655 | IXGBE_FEATURE_MSI 4656 | IXGBE_FEATURE_MSIX 4657 | IXGBE_FEATURE_LEGACY_IRQ; 4658 4659 /* Set capabilities first... */ 4660 switch (sc->hw.mac.type) { 4661 case ixgbe_mac_82598EB: 4662 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 4663 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4664 break; 4665 case ixgbe_mac_X540: 4666 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4667 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4668 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4669 (sc->hw.bus.func == 0)) 4670 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4671 break; 4672 case ixgbe_mac_X550: 4673 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4674 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4675 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4676 break; 4677 case ixgbe_mac_X550EM_x: 4678 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4679 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4680 break; 4681 case ixgbe_mac_X550EM_a: 4682 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4683 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4684 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4685 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4686 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4687 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4688 sc->feat_cap |= IXGBE_FEATURE_EEE; 4689 } 4690 break; 4691 case ixgbe_mac_82599EB: 4692 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4693 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4694 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4695 (sc->hw.bus.func == 0)) 4696 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4697 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4698 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4699 break; 4700 default: 4701 break; 4702 } 4703 4704 /* Enabled by default... */ 4705 /* Fan failure detection */ 4706 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4707 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4708 /* Netmap */ 4709 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 4710 sc->feat_en |= IXGBE_FEATURE_NETMAP; 4711 /* EEE */ 4712 if (sc->feat_cap & IXGBE_FEATURE_EEE) 4713 sc->feat_en |= IXGBE_FEATURE_EEE; 4714 /* Thermal Sensor */ 4715 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4716 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4717 4718 /* Enabled via global sysctl... */ 4719 /* Flow Director */ 4720 if (ixgbe_enable_fdir) { 4721 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 4722 sc->feat_en |= IXGBE_FEATURE_FDIR; 4723 else 4724 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled."); 4725 } 4726 /* 4727 * Message Signal Interrupts - Extended (MSI-X) 4728 * Normal MSI is only enabled if MSI-X calls fail. 4729 */ 4730 if (!ixgbe_enable_msix) 4731 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 4732 /* Receive-Side Scaling (RSS) */ 4733 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4734 sc->feat_en |= IXGBE_FEATURE_RSS; 4735 4736 /* Disable features with unmet dependencies... */ 4737 /* No MSI-X */ 4738 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 4739 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 4740 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4741 sc->feat_en &= ~IXGBE_FEATURE_RSS; 4742 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 4743 } 4744 } /* ixgbe_init_device_features */ 4745 4746 /************************************************************************ 4747 * ixgbe_check_fan_failure 4748 ************************************************************************/ 4749 static void 4750 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 4751 { 4752 u32 mask; 4753 4754 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 4755 IXGBE_ESDP_SDP1; 4756 4757 if (reg & mask) 4758 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4759 } /* ixgbe_check_fan_failure */ 4760 4761 /************************************************************************ 4762 * ixgbe_sbuf_fw_version 4763 ************************************************************************/ 4764 static void 4765 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) 4766 { 4767 struct ixgbe_nvm_version nvm_ver = {0}; 4768 uint16_t phyfw = 0; 4769 int status; 4770 const char *space = ""; 4771 4772 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ 4773 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ 4774 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ 4775 status = ixgbe_get_phy_firmware_version(hw, &phyfw); 4776 4777 if (nvm_ver.oem_valid) { 4778 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, 4779 nvm_ver.oem_minor, nvm_ver.oem_release); 4780 space = " "; 4781 } 4782 4783 if (nvm_ver.or_valid) { 4784 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 4785 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); 4786 space = " "; 4787 } 4788 4789 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | 4790 NVM_VER_INVALID)) { 4791 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); 4792 space = " "; 4793 } 4794 4795 if (phyfw != 0 && status == IXGBE_SUCCESS) 4796 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); 4797 } /* ixgbe_sbuf_fw_version */ 4798 4799 /************************************************************************ 4800 * ixgbe_print_fw_version 4801 ************************************************************************/ 4802 static void 4803 ixgbe_print_fw_version(if_ctx_t ctx) 4804 { 4805 struct ixgbe_softc *sc = iflib_get_softc(ctx); 4806 struct ixgbe_hw *hw = &sc->hw; 4807 device_t dev = sc->dev; 4808 struct sbuf *buf; 4809 int error = 0; 4810 4811 buf = sbuf_new_auto(); 4812 if (!buf) { 4813 device_printf(dev, "Could not allocate sbuf for output.\n"); 4814 return; 4815 } 4816 4817 ixgbe_sbuf_fw_version(hw, buf); 4818 4819 error = sbuf_finish(buf); 4820 if (error) 4821 device_printf(dev, "Error finishing sbuf: %d\n", error); 4822 else if (sbuf_len(buf)) 4823 device_printf(dev, "%s\n", sbuf_data(buf)); 4824 4825 sbuf_delete(buf); 4826 } /* ixgbe_print_fw_version */ 4827 4828 /************************************************************************ 4829 * ixgbe_sysctl_print_fw_version 4830 ************************************************************************/ 4831 static int 4832 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 4833 { 4834 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4835 struct ixgbe_hw *hw = &sc->hw; 4836 device_t dev = sc->dev; 4837 struct sbuf *buf; 4838 int error = 0; 4839 4840 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4841 if (!buf) { 4842 device_printf(dev, "Could not allocate sbuf for output.\n"); 4843 return (ENOMEM); 4844 } 4845 4846 ixgbe_sbuf_fw_version(hw, buf); 4847 4848 error = sbuf_finish(buf); 4849 if (error) 4850 device_printf(dev, "Error finishing sbuf: %d\n", error); 4851 4852 sbuf_delete(buf); 4853 4854 return (0); 4855 } /* ixgbe_sysctl_print_fw_version */ 4856