1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 #include "opt_rss.h" 37 38 #include "ixgbe.h" 39 #include "ixgbe_sriov.h" 40 #include "ifdi_if.h" 41 42 #include <net/netmap.h> 43 #include <dev/netmap/netmap_kern.h> 44 45 /************************************************************************ 46 * Driver version 47 ************************************************************************/ 48 static const char ixgbe_driver_version[] = "4.0.1-k"; 49 50 /************************************************************************ 51 * PCI Device ID Table 52 * 53 * Used by probe to select devices to load on 54 * Last field stores an index into ixgbe_strings 55 * Last entry must be all 0s 56 * 57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 58 ************************************************************************/ 59 static const pci_vendor_info_t ixgbe_vendor_info_array[] = 60 { 61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, "Intel(R) X520 82599 LS"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), 106 /* required last entry */ 107 PVID_END 108 }; 109 110 static void *ixgbe_register(device_t); 111 static int ixgbe_if_attach_pre(if_ctx_t); 112 static int ixgbe_if_attach_post(if_ctx_t); 113 static int ixgbe_if_detach(if_ctx_t); 114 static int ixgbe_if_shutdown(if_ctx_t); 115 static int ixgbe_if_suspend(if_ctx_t); 116 static int ixgbe_if_resume(if_ctx_t); 117 118 static void ixgbe_if_stop(if_ctx_t); 119 void ixgbe_if_enable_intr(if_ctx_t); 120 static void ixgbe_if_disable_intr(if_ctx_t); 121 static void ixgbe_link_intr_enable(if_ctx_t); 122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); 124 static int ixgbe_if_media_change(if_ctx_t); 125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); 127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); 128 static void ixgbe_if_multi_set(if_ctx_t); 129 static int ixgbe_if_promisc_set(if_ctx_t, int); 130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 132 static void ixgbe_if_queues_free(if_ctx_t); 133 static void ixgbe_if_timer(if_ctx_t, uint16_t); 134 static void ixgbe_if_update_admin_status(if_ctx_t); 135 static void ixgbe_if_vlan_register(if_ctx_t, u16); 136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16); 137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); 138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 139 int ixgbe_intr(void *); 140 141 /************************************************************************ 142 * Function prototypes 143 ************************************************************************/ 144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 145 146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32); 147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32); 148 static void ixgbe_add_device_sysctls(if_ctx_t); 149 static int ixgbe_allocate_pci_resources(if_ctx_t); 150 static int ixgbe_setup_low_power_mode(if_ctx_t); 151 152 static void ixgbe_config_dmac(struct ixgbe_softc *); 153 static void ixgbe_configure_ivars(struct ixgbe_softc *); 154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 156 static bool ixgbe_sfp_probe(if_ctx_t); 157 158 static void ixgbe_free_pci_resources(if_ctx_t); 159 160 static int ixgbe_msix_link(void *); 161 static int ixgbe_msix_que(void *); 162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 163 static void ixgbe_initialize_receive_units(if_ctx_t); 164 static void ixgbe_initialize_transmit_units(if_ctx_t); 165 166 static int ixgbe_setup_interface(if_ctx_t); 167 static void ixgbe_init_device_features(struct ixgbe_softc *); 168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); 170 static void ixgbe_print_fw_version(if_ctx_t); 171 static void ixgbe_add_media_types(if_ctx_t); 172 static void ixgbe_update_stats_counters(struct ixgbe_softc *); 173 static void ixgbe_config_link(if_ctx_t); 174 static void ixgbe_get_slot_info(struct ixgbe_softc *); 175 static void ixgbe_check_wol_support(struct ixgbe_softc *); 176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 178 179 static void ixgbe_add_hw_stats(struct ixgbe_softc *); 180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 181 static int ixgbe_set_advertise(struct ixgbe_softc *, int); 182 static int ixgbe_get_default_advertise(struct ixgbe_softc *); 183 static void ixgbe_setup_vlan_hw_support(if_ctx_t); 184 static void ixgbe_config_gpie(struct ixgbe_softc *); 185 static void ixgbe_config_delay_values(struct ixgbe_softc *); 186 187 /* Sysctl handlers */ 188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 195 #ifdef IXGBE_DEBUG 196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 198 #endif 199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 206 207 /* Deferred interrupt tasklets */ 208 static void ixgbe_handle_msf(void *); 209 static void ixgbe_handle_mod(void *); 210 static void ixgbe_handle_phy(void *); 211 212 /************************************************************************ 213 * FreeBSD Device Interface Entry Points 214 ************************************************************************/ 215 static device_method_t ix_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_register, ixgbe_register), 218 DEVMETHOD(device_probe, iflib_device_probe), 219 DEVMETHOD(device_attach, iflib_device_attach), 220 DEVMETHOD(device_detach, iflib_device_detach), 221 DEVMETHOD(device_shutdown, iflib_device_shutdown), 222 DEVMETHOD(device_suspend, iflib_device_suspend), 223 DEVMETHOD(device_resume, iflib_device_resume), 224 #ifdef PCI_IOV 225 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 228 #endif /* PCI_IOV */ 229 DEVMETHOD_END 230 }; 231 232 static driver_t ix_driver = { 233 "ix", ix_methods, sizeof(struct ixgbe_softc), 234 }; 235 236 DRIVER_MODULE(ix, pci, ix_driver, 0, 0); 237 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 238 MODULE_DEPEND(ix, pci, 1, 1, 1); 239 MODULE_DEPEND(ix, ether, 1, 1, 1); 240 MODULE_DEPEND(ix, iflib, 1, 1, 1); 241 242 static device_method_t ixgbe_if_methods[] = { 243 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 244 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 245 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 246 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 247 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 248 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 249 DEVMETHOD(ifdi_init, ixgbe_if_init), 250 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 251 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 252 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 253 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 254 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 255 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 256 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 257 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 258 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 259 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 260 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 261 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 262 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 263 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 264 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 265 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 266 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 267 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 268 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 269 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 270 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 271 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 272 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), 273 #ifdef PCI_IOV 274 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 275 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 276 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 277 #endif /* PCI_IOV */ 278 DEVMETHOD_END 279 }; 280 281 /* 282 * TUNEABLE PARAMETERS: 283 */ 284 285 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 286 "IXGBE driver parameters"); 287 static driver_t ixgbe_if_driver = { 288 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) 289 }; 290 291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 293 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 294 295 /* Flow control setting, default to full */ 296 static int ixgbe_flow_control = ixgbe_fc_full; 297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 298 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 299 300 /* Advertise Speed, default to 0 (auto) */ 301 static int ixgbe_advertise_speed = 0; 302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 304 305 /* 306 * Smart speed setting, default to on 307 * this only works as a compile option 308 * right now as its during attach, set 309 * this to 'ixgbe_smart_speed_off' to 310 * disable. 311 */ 312 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 313 314 /* 315 * MSI-X should be the default for best performance, 316 * but this allows it to be forced off for testing. 317 */ 318 static int ixgbe_enable_msix = 1; 319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 320 "Enable MSI-X interrupts"); 321 322 /* 323 * Defining this on will allow the use 324 * of unsupported SFP+ modules, note that 325 * doing so you are on your own :) 326 */ 327 static int allow_unsupported_sfp = false; 328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 329 &allow_unsupported_sfp, 0, 330 "Allow unsupported SFP modules...use at your own risk"); 331 332 /* 333 * Not sure if Flow Director is fully baked, 334 * so we'll default to turning it off. 335 */ 336 static int ixgbe_enable_fdir = 0; 337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 338 "Enable Flow Director"); 339 340 /* Receive-Side Scaling */ 341 static int ixgbe_enable_rss = 1; 342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 343 "Enable Receive-Side Scaling (RSS)"); 344 345 /* 346 * AIM: Adaptive Interrupt Moderation 347 * which means that the interrupt rate 348 * is varied over time based on the 349 * traffic for that interrupt vector 350 */ 351 static int ixgbe_enable_aim = false; 352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 353 "Enable adaptive interrupt moderation"); 354 355 #if 0 356 /* Keep running tab on them for sanity check */ 357 static int ixgbe_total_ports; 358 #endif 359 360 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 361 362 /* 363 * For Flow Director: this is the number of TX packets we sample 364 * for the filter pool, this means every 20th packet will be probed. 365 * 366 * This feature can be disabled by setting this to 0. 367 */ 368 static int atr_sample_rate = 20; 369 370 extern struct if_txrx ixgbe_txrx; 371 372 static struct if_shared_ctx ixgbe_sctx_init = { 373 .isc_magic = IFLIB_MAGIC, 374 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 375 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 376 .isc_tx_maxsegsize = PAGE_SIZE, 377 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 378 .isc_tso_maxsegsize = PAGE_SIZE, 379 .isc_rx_maxsize = PAGE_SIZE*4, 380 .isc_rx_nsegments = 1, 381 .isc_rx_maxsegsize = PAGE_SIZE*4, 382 .isc_nfl = 1, 383 .isc_ntxqs = 1, 384 .isc_nrxqs = 1, 385 386 .isc_admin_intrcnt = 1, 387 .isc_vendor_info = ixgbe_vendor_info_array, 388 .isc_driver_version = ixgbe_driver_version, 389 .isc_driver = &ixgbe_if_driver, 390 .isc_flags = IFLIB_TSO_INIT_IP, 391 392 .isc_nrxd_min = {MIN_RXD}, 393 .isc_ntxd_min = {MIN_TXD}, 394 .isc_nrxd_max = {MAX_RXD}, 395 .isc_ntxd_max = {MAX_TXD}, 396 .isc_nrxd_default = {DEFAULT_RXD}, 397 .isc_ntxd_default = {DEFAULT_TXD}, 398 }; 399 400 /************************************************************************ 401 * ixgbe_if_tx_queues_alloc 402 ************************************************************************/ 403 static int 404 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 405 int ntxqs, int ntxqsets) 406 { 407 struct ixgbe_softc *sc = iflib_get_softc(ctx); 408 if_softc_ctx_t scctx = sc->shared; 409 struct ix_tx_queue *que; 410 int i, j, error; 411 412 MPASS(sc->num_tx_queues > 0); 413 MPASS(sc->num_tx_queues == ntxqsets); 414 MPASS(ntxqs == 1); 415 416 /* Allocate queue structure memory */ 417 sc->tx_queues = 418 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 419 M_IXGBE, M_NOWAIT | M_ZERO); 420 if (!sc->tx_queues) { 421 device_printf(iflib_get_dev(ctx), 422 "Unable to allocate TX ring memory\n"); 423 return (ENOMEM); 424 } 425 426 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 427 struct tx_ring *txr = &que->txr; 428 429 /* In case SR-IOV is enabled, align the index properly */ 430 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 431 i); 432 433 txr->sc = que->sc = sc; 434 435 /* Allocate report status array */ 436 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 437 if (txr->tx_rsq == NULL) { 438 error = ENOMEM; 439 goto fail; 440 } 441 for (j = 0; j < scctx->isc_ntxd[0]; j++) 442 txr->tx_rsq[j] = QIDX_INVALID; 443 /* get the virtual and physical address of the hardware queues */ 444 txr->tail = IXGBE_TDT(txr->me); 445 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 446 txr->tx_paddr = paddrs[i]; 447 448 txr->bytes = 0; 449 txr->total_packets = 0; 450 451 /* Set the rate at which we sample packets */ 452 if (sc->feat_en & IXGBE_FEATURE_FDIR) 453 txr->atr_sample = atr_sample_rate; 454 455 } 456 457 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 458 sc->num_tx_queues); 459 460 return (0); 461 462 fail: 463 ixgbe_if_queues_free(ctx); 464 465 return (error); 466 } /* ixgbe_if_tx_queues_alloc */ 467 468 /************************************************************************ 469 * ixgbe_if_rx_queues_alloc 470 ************************************************************************/ 471 static int 472 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 473 int nrxqs, int nrxqsets) 474 { 475 struct ixgbe_softc *sc = iflib_get_softc(ctx); 476 struct ix_rx_queue *que; 477 int i; 478 479 MPASS(sc->num_rx_queues > 0); 480 MPASS(sc->num_rx_queues == nrxqsets); 481 MPASS(nrxqs == 1); 482 483 /* Allocate queue structure memory */ 484 sc->rx_queues = 485 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 486 M_IXGBE, M_NOWAIT | M_ZERO); 487 if (!sc->rx_queues) { 488 device_printf(iflib_get_dev(ctx), 489 "Unable to allocate TX ring memory\n"); 490 return (ENOMEM); 491 } 492 493 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 494 struct rx_ring *rxr = &que->rxr; 495 496 /* In case SR-IOV is enabled, align the index properly */ 497 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 498 i); 499 500 rxr->sc = que->sc = sc; 501 502 /* get the virtual and physical address of the hw queues */ 503 rxr->tail = IXGBE_RDT(rxr->me); 504 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 505 rxr->rx_paddr = paddrs[i]; 506 rxr->bytes = 0; 507 rxr->que = que; 508 } 509 510 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 511 sc->num_rx_queues); 512 513 return (0); 514 } /* ixgbe_if_rx_queues_alloc */ 515 516 /************************************************************************ 517 * ixgbe_if_queues_free 518 ************************************************************************/ 519 static void 520 ixgbe_if_queues_free(if_ctx_t ctx) 521 { 522 struct ixgbe_softc *sc = iflib_get_softc(ctx); 523 struct ix_tx_queue *tx_que = sc->tx_queues; 524 struct ix_rx_queue *rx_que = sc->rx_queues; 525 int i; 526 527 if (tx_que != NULL) { 528 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { 529 struct tx_ring *txr = &tx_que->txr; 530 if (txr->tx_rsq == NULL) 531 break; 532 533 free(txr->tx_rsq, M_IXGBE); 534 txr->tx_rsq = NULL; 535 } 536 537 free(sc->tx_queues, M_IXGBE); 538 sc->tx_queues = NULL; 539 } 540 if (rx_que != NULL) { 541 free(sc->rx_queues, M_IXGBE); 542 sc->rx_queues = NULL; 543 } 544 } /* ixgbe_if_queues_free */ 545 546 /************************************************************************ 547 * ixgbe_initialize_rss_mapping 548 ************************************************************************/ 549 static void 550 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 551 { 552 struct ixgbe_hw *hw = &sc->hw; 553 u32 reta = 0, mrqc, rss_key[10]; 554 int queue_id, table_size, index_mult; 555 int i, j; 556 u32 rss_hash_config; 557 558 if (sc->feat_en & IXGBE_FEATURE_RSS) { 559 /* Fetch the configured RSS key */ 560 rss_getkey((uint8_t *)&rss_key); 561 } else { 562 /* set up random bits */ 563 arc4rand(&rss_key, sizeof(rss_key), 0); 564 } 565 566 /* Set multiplier for RETA setup and table size based on MAC */ 567 index_mult = 0x1; 568 table_size = 128; 569 switch (sc->hw.mac.type) { 570 case ixgbe_mac_82598EB: 571 index_mult = 0x11; 572 break; 573 case ixgbe_mac_X550: 574 case ixgbe_mac_X550EM_x: 575 case ixgbe_mac_X550EM_a: 576 table_size = 512; 577 break; 578 default: 579 break; 580 } 581 582 /* Set up the redirection table */ 583 for (i = 0, j = 0; i < table_size; i++, j++) { 584 if (j == sc->num_rx_queues) 585 j = 0; 586 587 if (sc->feat_en & IXGBE_FEATURE_RSS) { 588 /* 589 * Fetch the RSS bucket id for the given indirection 590 * entry. Cap it at the number of configured buckets 591 * (which is num_rx_queues.) 592 */ 593 queue_id = rss_get_indirection_to_bucket(i); 594 queue_id = queue_id % sc->num_rx_queues; 595 } else 596 queue_id = (j * index_mult); 597 598 /* 599 * The low 8 bits are for hash value (n+0); 600 * The next 8 bits are for hash value (n+1), etc. 601 */ 602 reta = reta >> 8; 603 reta = reta | (((uint32_t)queue_id) << 24); 604 if ((i & 3) == 3) { 605 if (i < 128) 606 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 607 else 608 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 609 reta); 610 reta = 0; 611 } 612 } 613 614 /* Now fill our hash function seeds */ 615 for (i = 0; i < 10; i++) 616 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 617 618 /* Perform hash on these packet types */ 619 if (sc->feat_en & IXGBE_FEATURE_RSS) 620 rss_hash_config = rss_gethashconfig(); 621 else { 622 /* 623 * Disable UDP - IP fragments aren't currently being handled 624 * and so we end up with a mix of 2-tuple and 4-tuple 625 * traffic. 626 */ 627 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 628 | RSS_HASHTYPE_RSS_TCP_IPV4 629 | RSS_HASHTYPE_RSS_IPV6 630 | RSS_HASHTYPE_RSS_TCP_IPV6 631 | RSS_HASHTYPE_RSS_IPV6_EX 632 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 633 } 634 635 mrqc = IXGBE_MRQC_RSSEN; 636 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 638 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 640 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 642 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 644 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 646 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 648 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 649 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 650 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 651 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 652 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 653 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 654 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 655 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 656 } /* ixgbe_initialize_rss_mapping */ 657 658 /************************************************************************ 659 * ixgbe_initialize_receive_units - Setup receive registers and features. 660 ************************************************************************/ 661 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 662 663 static void 664 ixgbe_initialize_receive_units(if_ctx_t ctx) 665 { 666 struct ixgbe_softc *sc = iflib_get_softc(ctx); 667 if_softc_ctx_t scctx = sc->shared; 668 struct ixgbe_hw *hw = &sc->hw; 669 if_t ifp = iflib_get_ifp(ctx); 670 struct ix_rx_queue *que; 671 int i, j; 672 u32 bufsz, fctrl, srrctl, rxcsum; 673 u32 hlreg; 674 675 /* 676 * Make sure receives are disabled while 677 * setting up the descriptor ring 678 */ 679 ixgbe_disable_rx(hw); 680 681 /* Enable broadcasts */ 682 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 683 fctrl |= IXGBE_FCTRL_BAM; 684 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 685 fctrl |= IXGBE_FCTRL_DPF; 686 fctrl |= IXGBE_FCTRL_PMCF; 687 } 688 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 689 690 /* Set for Jumbo Frames? */ 691 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 692 if (if_getmtu(ifp) > ETHERMTU) 693 hlreg |= IXGBE_HLREG0_JUMBOEN; 694 else 695 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 696 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 697 698 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 699 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 700 701 /* Setup the Base and Length of the Rx Descriptor Ring */ 702 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { 703 struct rx_ring *rxr = &que->rxr; 704 u64 rdba = rxr->rx_paddr; 705 706 j = rxr->me; 707 708 /* Setup the Base and Length of the Rx Descriptor Ring */ 709 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 710 (rdba & 0x00000000ffffffffULL)); 711 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 712 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 713 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 714 715 /* Set up the SRRCTL register */ 716 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 717 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 718 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 719 srrctl |= bufsz; 720 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 721 722 /* 723 * Set DROP_EN iff we have no flow control and >1 queue. 724 * Note that srrctl was cleared shortly before during reset, 725 * so we do not need to clear the bit, but do it just in case 726 * this code is moved elsewhere. 727 */ 728 if (sc->num_rx_queues > 1 && 729 sc->hw.fc.requested_mode == ixgbe_fc_none) { 730 srrctl |= IXGBE_SRRCTL_DROP_EN; 731 } else { 732 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 733 } 734 735 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 736 737 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 738 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 739 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 740 741 /* Set the driver rx tail address */ 742 rxr->tail = IXGBE_RDT(rxr->me); 743 } 744 745 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 746 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 747 | IXGBE_PSRTYPE_UDPHDR 748 | IXGBE_PSRTYPE_IPV4HDR 749 | IXGBE_PSRTYPE_IPV6HDR; 750 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 751 } 752 753 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 754 755 ixgbe_initialize_rss_mapping(sc); 756 757 if (sc->feat_en & IXGBE_FEATURE_RSS) { 758 /* RSS and RX IPP Checksum are mutually exclusive */ 759 rxcsum |= IXGBE_RXCSUM_PCSD; 760 } 761 762 if (if_getcapenable(ifp) & IFCAP_RXCSUM) 763 rxcsum |= IXGBE_RXCSUM_PCSD; 764 765 /* This is useful for calculating UDP/IP fragment checksums */ 766 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 767 rxcsum |= IXGBE_RXCSUM_IPPCSE; 768 769 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 770 771 } /* ixgbe_initialize_receive_units */ 772 773 /************************************************************************ 774 * ixgbe_initialize_transmit_units - Enable transmit units. 775 ************************************************************************/ 776 static void 777 ixgbe_initialize_transmit_units(if_ctx_t ctx) 778 { 779 struct ixgbe_softc *sc = iflib_get_softc(ctx); 780 struct ixgbe_hw *hw = &sc->hw; 781 if_softc_ctx_t scctx = sc->shared; 782 struct ix_tx_queue *que; 783 int i; 784 785 /* Setup the Base and Length of the Tx Descriptor Ring */ 786 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; 787 i++, que++) { 788 struct tx_ring *txr = &que->txr; 789 u64 tdba = txr->tx_paddr; 790 u32 txctrl = 0; 791 int j = txr->me; 792 793 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 794 (tdba & 0x00000000ffffffffULL)); 795 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 796 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 797 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 798 799 /* Setup the HW Tx Head and Tail descriptor pointers */ 800 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 801 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 802 803 /* Cache the tail address */ 804 txr->tail = IXGBE_TDT(txr->me); 805 806 txr->tx_rs_cidx = txr->tx_rs_pidx; 807 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 808 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 809 txr->tx_rsq[k] = QIDX_INVALID; 810 811 /* Disable Head Writeback */ 812 /* 813 * Note: for X550 series devices, these registers are actually 814 * prefixed with TPH_ isntead of DCA_, but the addresses and 815 * fields remain the same. 816 */ 817 switch (hw->mac.type) { 818 case ixgbe_mac_82598EB: 819 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 820 break; 821 default: 822 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 823 break; 824 } 825 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 826 switch (hw->mac.type) { 827 case ixgbe_mac_82598EB: 828 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 829 break; 830 default: 831 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 832 break; 833 } 834 835 } 836 837 if (hw->mac.type != ixgbe_mac_82598EB) { 838 u32 dmatxctl, rttdcs; 839 840 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 841 dmatxctl |= IXGBE_DMATXCTL_TE; 842 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 843 /* Disable arbiter to set MTQC */ 844 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 845 rttdcs |= IXGBE_RTTDCS_ARBDIS; 846 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 847 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 848 ixgbe_get_mtqc(sc->iov_mode)); 849 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 850 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 851 } 852 853 } /* ixgbe_initialize_transmit_units */ 854 855 /************************************************************************ 856 * ixgbe_register 857 ************************************************************************/ 858 static void * 859 ixgbe_register(device_t dev) 860 { 861 return (&ixgbe_sctx_init); 862 } /* ixgbe_register */ 863 864 /************************************************************************ 865 * ixgbe_if_attach_pre - Device initialization routine, part 1 866 * 867 * Called when the driver is being loaded. 868 * Identifies the type of hardware, initializes the hardware, 869 * and initializes iflib structures. 870 * 871 * return 0 on success, positive on failure 872 ************************************************************************/ 873 static int 874 ixgbe_if_attach_pre(if_ctx_t ctx) 875 { 876 struct ixgbe_softc *sc; 877 device_t dev; 878 if_softc_ctx_t scctx; 879 struct ixgbe_hw *hw; 880 int error = 0; 881 u32 ctrl_ext; 882 883 INIT_DEBUGOUT("ixgbe_attach: begin"); 884 885 /* Allocate, clear, and link in our adapter structure */ 886 dev = iflib_get_dev(ctx); 887 sc = iflib_get_softc(ctx); 888 sc->hw.back = sc; 889 sc->ctx = ctx; 890 sc->dev = dev; 891 scctx = sc->shared = iflib_get_softc_ctx(ctx); 892 sc->media = iflib_get_media(ctx); 893 hw = &sc->hw; 894 895 /* Determine hardware revision */ 896 hw->vendor_id = pci_get_vendor(dev); 897 hw->device_id = pci_get_device(dev); 898 hw->revision_id = pci_get_revid(dev); 899 hw->subsystem_vendor_id = pci_get_subvendor(dev); 900 hw->subsystem_device_id = pci_get_subdevice(dev); 901 902 /* Do base PCI setup - map BAR0 */ 903 if (ixgbe_allocate_pci_resources(ctx)) { 904 device_printf(dev, "Allocation of PCI resources failed\n"); 905 return (ENXIO); 906 } 907 908 /* let hardware know driver is loaded */ 909 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 910 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 911 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 912 913 /* 914 * Initialize the shared code 915 */ 916 if (ixgbe_init_shared_code(hw) != 0) { 917 device_printf(dev, "Unable to initialize the shared code\n"); 918 error = ENXIO; 919 goto err_pci; 920 } 921 922 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 923 device_printf(dev, "Firmware recovery mode detected. Limiting " 924 "functionality.\nRefer to the Intel(R) Ethernet Adapters " 925 "and Devices User Guide for details on firmware recovery " 926 "mode."); 927 error = ENOSYS; 928 goto err_pci; 929 } 930 931 if (hw->mbx.ops.init_params) 932 hw->mbx.ops.init_params(hw); 933 934 hw->allow_unsupported_sfp = allow_unsupported_sfp; 935 936 if (hw->mac.type != ixgbe_mac_82598EB) 937 hw->phy.smart_speed = ixgbe_smart_speed; 938 939 ixgbe_init_device_features(sc); 940 941 /* Enable WoL (if supported) */ 942 ixgbe_check_wol_support(sc); 943 944 /* Verify adapter fan is still functional (if applicable) */ 945 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 946 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 947 ixgbe_check_fan_failure(sc, esdp, false); 948 } 949 950 /* Ensure SW/FW semaphore is free */ 951 ixgbe_init_swfw_semaphore(hw); 952 953 /* Set an initial default flow control value */ 954 hw->fc.requested_mode = ixgbe_flow_control; 955 956 hw->phy.reset_if_overtemp = true; 957 error = ixgbe_reset_hw(hw); 958 hw->phy.reset_if_overtemp = false; 959 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 960 /* 961 * No optics in this port, set up 962 * so the timer routine will probe 963 * for later insertion. 964 */ 965 sc->sfp_probe = true; 966 error = 0; 967 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 968 device_printf(dev, "Unsupported SFP+ module detected!\n"); 969 error = EIO; 970 goto err_pci; 971 } else if (error) { 972 device_printf(dev, "Hardware initialization failed\n"); 973 error = EIO; 974 goto err_pci; 975 } 976 977 /* Make sure we have a good EEPROM before we read from it */ 978 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 979 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 980 error = EIO; 981 goto err_pci; 982 } 983 984 error = ixgbe_start_hw(hw); 985 switch (error) { 986 case IXGBE_ERR_EEPROM_VERSION: 987 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 988 break; 989 case IXGBE_ERR_SFP_NOT_SUPPORTED: 990 device_printf(dev, "Unsupported SFP+ Module\n"); 991 error = EIO; 992 goto err_pci; 993 case IXGBE_ERR_SFP_NOT_PRESENT: 994 device_printf(dev, "No SFP+ Module found\n"); 995 /* falls thru */ 996 default: 997 break; 998 } 999 1000 /* Most of the iflib initialization... */ 1001 1002 iflib_set_mac(ctx, hw->mac.addr); 1003 switch (sc->hw.mac.type) { 1004 case ixgbe_mac_X550: 1005 case ixgbe_mac_X550EM_x: 1006 case ixgbe_mac_X550EM_a: 1007 scctx->isc_rss_table_size = 512; 1008 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1009 break; 1010 default: 1011 scctx->isc_rss_table_size = 128; 1012 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1013 } 1014 1015 /* Allow legacy interrupts */ 1016 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1017 1018 scctx->isc_txqsizes[0] = 1019 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1020 sizeof(u32), DBA_ALIGN), 1021 scctx->isc_rxqsizes[0] = 1022 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1023 DBA_ALIGN); 1024 1025 /* XXX */ 1026 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1027 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1028 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 1029 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1030 } else { 1031 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1032 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1033 } 1034 1035 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1036 1037 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1038 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1039 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1040 1041 scctx->isc_txrx = &ixgbe_txrx; 1042 1043 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1044 1045 return (0); 1046 1047 err_pci: 1048 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1049 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1050 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1051 ixgbe_free_pci_resources(ctx); 1052 1053 return (error); 1054 } /* ixgbe_if_attach_pre */ 1055 1056 /********************************************************************* 1057 * ixgbe_if_attach_post - Device initialization routine, part 2 1058 * 1059 * Called during driver load, but after interrupts and 1060 * resources have been allocated and configured. 1061 * Sets up some data structures not relevant to iflib. 1062 * 1063 * return 0 on success, positive on failure 1064 *********************************************************************/ 1065 static int 1066 ixgbe_if_attach_post(if_ctx_t ctx) 1067 { 1068 device_t dev; 1069 struct ixgbe_softc *sc; 1070 struct ixgbe_hw *hw; 1071 int error = 0; 1072 1073 dev = iflib_get_dev(ctx); 1074 sc = iflib_get_softc(ctx); 1075 hw = &sc->hw; 1076 1077 1078 if (sc->intr_type == IFLIB_INTR_LEGACY && 1079 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1080 device_printf(dev, "Device does not support legacy interrupts"); 1081 error = ENXIO; 1082 goto err; 1083 } 1084 1085 /* Allocate multicast array memory. */ 1086 sc->mta = malloc(sizeof(*sc->mta) * 1087 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1088 if (sc->mta == NULL) { 1089 device_printf(dev, "Can not allocate multicast setup array\n"); 1090 error = ENOMEM; 1091 goto err; 1092 } 1093 1094 /* hw.ix defaults init */ 1095 ixgbe_set_advertise(sc, ixgbe_advertise_speed); 1096 1097 /* Enable the optics for 82599 SFP+ fiber */ 1098 ixgbe_enable_tx_laser(hw); 1099 1100 /* Enable power to the phy. */ 1101 ixgbe_set_phy_power(hw, true); 1102 1103 ixgbe_initialize_iov(sc); 1104 1105 error = ixgbe_setup_interface(ctx); 1106 if (error) { 1107 device_printf(dev, "Interface setup failed: %d\n", error); 1108 goto err; 1109 } 1110 1111 ixgbe_if_update_admin_status(ctx); 1112 1113 /* Initialize statistics */ 1114 ixgbe_update_stats_counters(sc); 1115 ixgbe_add_hw_stats(sc); 1116 1117 /* Check PCIE slot type/speed/width */ 1118 ixgbe_get_slot_info(sc); 1119 1120 /* 1121 * Do time init and sysctl init here, but 1122 * only on the first port of a bypass sc. 1123 */ 1124 ixgbe_bypass_init(sc); 1125 1126 /* Display NVM and Option ROM versions */ 1127 ixgbe_print_fw_version(ctx); 1128 1129 /* Set an initial dmac value */ 1130 sc->dmac = 0; 1131 /* Set initial advertised speeds (if applicable) */ 1132 sc->advertise = ixgbe_get_default_advertise(sc); 1133 1134 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1135 ixgbe_define_iov_schemas(dev, &error); 1136 1137 /* Add sysctls */ 1138 ixgbe_add_device_sysctls(ctx); 1139 1140 return (0); 1141 err: 1142 return (error); 1143 } /* ixgbe_if_attach_post */ 1144 1145 /************************************************************************ 1146 * ixgbe_check_wol_support 1147 * 1148 * Checks whether the adapter's ports are capable of 1149 * Wake On LAN by reading the adapter's NVM. 1150 * 1151 * Sets each port's hw->wol_enabled value depending 1152 * on the value read here. 1153 ************************************************************************/ 1154 static void 1155 ixgbe_check_wol_support(struct ixgbe_softc *sc) 1156 { 1157 struct ixgbe_hw *hw = &sc->hw; 1158 u16 dev_caps = 0; 1159 1160 /* Find out WoL support for port */ 1161 sc->wol_support = hw->wol_enabled = 0; 1162 ixgbe_get_device_caps(hw, &dev_caps); 1163 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1164 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1165 hw->bus.func == 0)) 1166 sc->wol_support = hw->wol_enabled = 1; 1167 1168 /* Save initial wake up filter configuration */ 1169 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1170 1171 return; 1172 } /* ixgbe_check_wol_support */ 1173 1174 /************************************************************************ 1175 * ixgbe_setup_interface 1176 * 1177 * Setup networking device structure and register an interface. 1178 ************************************************************************/ 1179 static int 1180 ixgbe_setup_interface(if_ctx_t ctx) 1181 { 1182 if_t ifp = iflib_get_ifp(ctx); 1183 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1184 1185 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1186 1187 if_setbaudrate(ifp, IF_Gbps(10)); 1188 1189 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 1190 1191 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1192 1193 ixgbe_add_media_types(ctx); 1194 1195 /* Autoselect media by default */ 1196 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1197 1198 return (0); 1199 } /* ixgbe_setup_interface */ 1200 1201 /************************************************************************ 1202 * ixgbe_if_get_counter 1203 ************************************************************************/ 1204 static uint64_t 1205 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1206 { 1207 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1208 if_t ifp = iflib_get_ifp(ctx); 1209 1210 switch (cnt) { 1211 case IFCOUNTER_IPACKETS: 1212 return (sc->ipackets); 1213 case IFCOUNTER_OPACKETS: 1214 return (sc->opackets); 1215 case IFCOUNTER_IBYTES: 1216 return (sc->ibytes); 1217 case IFCOUNTER_OBYTES: 1218 return (sc->obytes); 1219 case IFCOUNTER_IMCASTS: 1220 return (sc->imcasts); 1221 case IFCOUNTER_OMCASTS: 1222 return (sc->omcasts); 1223 case IFCOUNTER_COLLISIONS: 1224 return (0); 1225 case IFCOUNTER_IQDROPS: 1226 return (sc->iqdrops); 1227 case IFCOUNTER_OQDROPS: 1228 return (0); 1229 case IFCOUNTER_IERRORS: 1230 return (sc->ierrors); 1231 default: 1232 return (if_get_counter_default(ifp, cnt)); 1233 } 1234 } /* ixgbe_if_get_counter */ 1235 1236 /************************************************************************ 1237 * ixgbe_if_i2c_req 1238 ************************************************************************/ 1239 static int 1240 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1241 { 1242 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1243 struct ixgbe_hw *hw = &sc->hw; 1244 int i; 1245 1246 1247 if (hw->phy.ops.read_i2c_byte == NULL) 1248 return (ENXIO); 1249 for (i = 0; i < req->len; i++) 1250 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1251 req->dev_addr, &req->data[i]); 1252 return (0); 1253 } /* ixgbe_if_i2c_req */ 1254 1255 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1256 * @ctx: iflib context 1257 * @event: event code to check 1258 * 1259 * Defaults to returning false for unknown events. 1260 * 1261 * @returns true if iflib needs to reinit the interface 1262 */ 1263 static bool 1264 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1265 { 1266 switch (event) { 1267 case IFLIB_RESTART_VLAN_CONFIG: 1268 default: 1269 return (false); 1270 } 1271 } 1272 1273 /************************************************************************ 1274 * ixgbe_add_media_types 1275 ************************************************************************/ 1276 static void 1277 ixgbe_add_media_types(if_ctx_t ctx) 1278 { 1279 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1280 struct ixgbe_hw *hw = &sc->hw; 1281 device_t dev = iflib_get_dev(ctx); 1282 u64 layer; 1283 1284 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 1285 1286 /* Media types with matching FreeBSD media defines */ 1287 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1288 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1289 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1290 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1291 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1292 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1293 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1294 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1295 1296 if (hw->mac.type == ixgbe_mac_X550) { 1297 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1298 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL); 1299 } 1300 1301 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1302 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1303 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1304 NULL); 1305 1306 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1307 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1308 if (hw->phy.multispeed_fiber) 1309 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, 1310 NULL); 1311 } 1312 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1313 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1314 if (hw->phy.multispeed_fiber) 1315 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, 1316 NULL); 1317 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1318 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1319 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1320 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1321 1322 #ifdef IFM_ETH_XTYPE 1323 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1324 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1325 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1326 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1327 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1328 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1329 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1330 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1331 #else 1332 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1333 device_printf(dev, "Media supported: 10GbaseKR\n"); 1334 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1335 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1336 } 1337 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1338 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1339 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1340 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1341 } 1342 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1343 device_printf(dev, "Media supported: 1000baseKX\n"); 1344 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1345 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1346 } 1347 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1348 device_printf(dev, "Media supported: 2500baseKX\n"); 1349 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1350 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1351 } 1352 #endif 1353 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1354 device_printf(dev, "Media supported: 1000baseBX\n"); 1355 1356 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1357 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1358 0, NULL); 1359 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1360 } 1361 1362 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1363 } /* ixgbe_add_media_types */ 1364 1365 /************************************************************************ 1366 * ixgbe_is_sfp 1367 ************************************************************************/ 1368 static inline bool 1369 ixgbe_is_sfp(struct ixgbe_hw *hw) 1370 { 1371 switch (hw->mac.type) { 1372 case ixgbe_mac_82598EB: 1373 if (hw->phy.type == ixgbe_phy_nl) 1374 return (true); 1375 return (false); 1376 case ixgbe_mac_82599EB: 1377 switch (hw->mac.ops.get_media_type(hw)) { 1378 case ixgbe_media_type_fiber: 1379 case ixgbe_media_type_fiber_qsfp: 1380 return (true); 1381 default: 1382 return (false); 1383 } 1384 case ixgbe_mac_X550EM_x: 1385 case ixgbe_mac_X550EM_a: 1386 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1387 return (true); 1388 return (false); 1389 default: 1390 return (false); 1391 } 1392 } /* ixgbe_is_sfp */ 1393 1394 /************************************************************************ 1395 * ixgbe_config_link 1396 ************************************************************************/ 1397 static void 1398 ixgbe_config_link(if_ctx_t ctx) 1399 { 1400 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1401 struct ixgbe_hw *hw = &sc->hw; 1402 u32 autoneg, err = 0; 1403 bool sfp, negotiate; 1404 1405 sfp = ixgbe_is_sfp(hw); 1406 1407 if (sfp) { 1408 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 1409 iflib_admin_intr_deferred(ctx); 1410 } else { 1411 if (hw->mac.ops.check_link) 1412 err = ixgbe_check_link(hw, &sc->link_speed, 1413 &sc->link_up, false); 1414 if (err) 1415 return; 1416 autoneg = hw->phy.autoneg_advertised; 1417 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1418 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1419 &negotiate); 1420 if (err) 1421 return; 1422 1423 if (hw->mac.type == ixgbe_mac_X550 && 1424 hw->phy.autoneg_advertised == 0) { 1425 /* 1426 * 2.5G and 5G autonegotiation speeds on X550 1427 * are disabled by default due to reported 1428 * interoperability issues with some switches. 1429 * 1430 * The second condition checks if any operations 1431 * involving setting autonegotiation speeds have 1432 * been performed prior to this ixgbe_config_link() 1433 * call. 1434 * 1435 * If hw->phy.autoneg_advertised does not 1436 * equal 0, this means that the user might have 1437 * set autonegotiation speeds via the sysctl 1438 * before bringing the interface up. In this 1439 * case, we should not disable 2.5G and 5G 1440 * since that speeds might be selected by the 1441 * user. 1442 * 1443 * Otherwise (i.e. if hw->phy.autoneg_advertised 1444 * is set to 0), it is the first time we set 1445 * autonegotiation preferences and the default 1446 * set of speeds should exclude 2.5G and 5G. 1447 */ 1448 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 1449 IXGBE_LINK_SPEED_5GB_FULL); 1450 } 1451 1452 if (hw->mac.ops.setup_link) 1453 err = hw->mac.ops.setup_link(hw, autoneg, 1454 sc->link_up); 1455 } 1456 } /* ixgbe_config_link */ 1457 1458 /************************************************************************ 1459 * ixgbe_update_stats_counters - Update board statistics counters. 1460 ************************************************************************/ 1461 static void 1462 ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1463 { 1464 struct ixgbe_hw *hw = &sc->hw; 1465 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1466 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1467 u32 lxoffrxc; 1468 u64 total_missed_rx = 0; 1469 1470 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1471 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1472 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1473 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1474 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1475 1476 for (int i = 0; i < 16; i++) { 1477 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1478 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1479 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1480 } 1481 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1482 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1483 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1484 1485 /* Hardware workaround, gprc counts missed packets */ 1486 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1487 stats->gprc -= missed_rx; 1488 1489 if (hw->mac.type != ixgbe_mac_82598EB) { 1490 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1491 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1492 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1493 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1494 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1495 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1496 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1497 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1498 stats->lxoffrxc += lxoffrxc; 1499 } else { 1500 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1501 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1502 stats->lxoffrxc += lxoffrxc; 1503 /* 82598 only has a counter in the high register */ 1504 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1505 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1506 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1507 } 1508 1509 /* 1510 * For watchdog management we need to know if we have been paused 1511 * during the last interval, so capture that here. 1512 */ 1513 if (lxoffrxc) 1514 sc->shared->isc_pause_frames = 1; 1515 1516 /* 1517 * Workaround: mprc hardware is incorrectly counting 1518 * broadcasts, so for now we subtract those. 1519 */ 1520 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1521 stats->bprc += bprc; 1522 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1523 if (hw->mac.type == ixgbe_mac_82598EB) 1524 stats->mprc -= bprc; 1525 1526 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1527 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1528 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1529 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1530 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1531 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1532 1533 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1534 stats->lxontxc += lxon; 1535 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1536 stats->lxofftxc += lxoff; 1537 total = lxon + lxoff; 1538 1539 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1540 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1541 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1542 stats->gptc -= total; 1543 stats->mptc -= total; 1544 stats->ptc64 -= total; 1545 stats->gotc -= total * ETHER_MIN_LEN; 1546 1547 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1548 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1549 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1550 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1551 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1552 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1553 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1554 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1555 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1556 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1557 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1558 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1559 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1560 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1561 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1562 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1563 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1564 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1565 /* Only read FCOE on 82599 */ 1566 if (hw->mac.type != ixgbe_mac_82598EB) { 1567 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1568 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1569 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1570 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1571 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1572 } 1573 1574 /* Fill out the OS statistics structure */ 1575 IXGBE_SET_IPACKETS(sc, stats->gprc); 1576 IXGBE_SET_OPACKETS(sc, stats->gptc); 1577 IXGBE_SET_IBYTES(sc, stats->gorc); 1578 IXGBE_SET_OBYTES(sc, stats->gotc); 1579 IXGBE_SET_IMCASTS(sc, stats->mprc); 1580 IXGBE_SET_OMCASTS(sc, stats->mptc); 1581 IXGBE_SET_COLLISIONS(sc, 0); 1582 IXGBE_SET_IQDROPS(sc, total_missed_rx); 1583 1584 /* 1585 * Aggregate following types of errors as RX errors: 1586 * - CRC error count, 1587 * - illegal byte error count, 1588 * - missed packets count, 1589 * - length error count, 1590 * - undersized packets count, 1591 * - fragmented packets count, 1592 * - oversized packets count, 1593 * - jabber count. 1594 */ 1595 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + 1596 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + 1597 stats->rjc); 1598 } /* ixgbe_update_stats_counters */ 1599 1600 /************************************************************************ 1601 * ixgbe_add_hw_stats 1602 * 1603 * Add sysctl variables, one per statistic, to the system. 1604 ************************************************************************/ 1605 static void 1606 ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1607 { 1608 device_t dev = iflib_get_dev(sc->ctx); 1609 struct ix_rx_queue *rx_que; 1610 struct ix_tx_queue *tx_que; 1611 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1612 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1613 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1614 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1615 struct sysctl_oid *stat_node, *queue_node; 1616 struct sysctl_oid_list *stat_list, *queue_list; 1617 int i; 1618 1619 #define QUEUE_NAME_LEN 32 1620 char namebuf[QUEUE_NAME_LEN]; 1621 1622 /* Driver Statistics */ 1623 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1624 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); 1625 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1626 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1627 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1628 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1629 1630 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 1631 struct tx_ring *txr = &tx_que->txr; 1632 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1633 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1634 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1635 queue_list = SYSCTL_CHILDREN(queue_node); 1636 1637 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1638 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1639 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1640 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1641 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1642 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1643 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1644 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1645 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1646 CTLFLAG_RD, &txr->total_packets, 1647 "Queue Packets Transmitted"); 1648 } 1649 1650 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 1651 struct rx_ring *rxr = &rx_que->rxr; 1652 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1653 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1654 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1655 queue_list = SYSCTL_CHILDREN(queue_node); 1656 1657 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1658 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 1659 &sc->rx_queues[i], 0, 1660 ixgbe_sysctl_interrupt_rate_handler, "IU", 1661 "Interrupt Rate"); 1662 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1663 CTLFLAG_RD, &(sc->rx_queues[i].irqs), 1664 "irqs on this queue"); 1665 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1666 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1667 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1668 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1669 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1670 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1671 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1672 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1673 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1674 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1675 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1676 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1677 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1678 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1679 } 1680 1681 /* MAC stats get their own sub node */ 1682 1683 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1684 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); 1685 stat_list = SYSCTL_CHILDREN(stat_node); 1686 1687 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", 1688 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); 1689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1690 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1692 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1694 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1696 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1698 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1699 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1700 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1701 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1702 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1704 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1705 1706 /* Flow Control stats */ 1707 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1708 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1710 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1712 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1714 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1715 1716 /* Packet Reception Stats */ 1717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1718 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1720 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1722 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1724 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1726 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1728 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1730 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1732 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1734 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1736 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1738 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1740 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1742 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1744 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1746 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1747 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1748 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1749 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1750 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1752 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1753 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1754 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1755 1756 /* Packet Transmission Stats */ 1757 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1758 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1759 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1760 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1761 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1762 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1763 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1764 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1765 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1766 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1767 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1768 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1769 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1770 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1771 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1772 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1773 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1774 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1775 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1776 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1777 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1778 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1779 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1780 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1781 } /* ixgbe_add_hw_stats */ 1782 1783 /************************************************************************ 1784 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1785 * 1786 * Retrieves the TDH value from the hardware 1787 ************************************************************************/ 1788 static int 1789 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1790 { 1791 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1792 int error; 1793 unsigned int val; 1794 1795 if (!txr) 1796 return (0); 1797 1798 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); 1799 error = sysctl_handle_int(oidp, &val, 0, req); 1800 if (error || !req->newptr) 1801 return error; 1802 1803 return (0); 1804 } /* ixgbe_sysctl_tdh_handler */ 1805 1806 /************************************************************************ 1807 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1808 * 1809 * Retrieves the TDT value from the hardware 1810 ************************************************************************/ 1811 static int 1812 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1813 { 1814 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1815 int error; 1816 unsigned int val; 1817 1818 if (!txr) 1819 return (0); 1820 1821 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); 1822 error = sysctl_handle_int(oidp, &val, 0, req); 1823 if (error || !req->newptr) 1824 return error; 1825 1826 return (0); 1827 } /* ixgbe_sysctl_tdt_handler */ 1828 1829 /************************************************************************ 1830 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1831 * 1832 * Retrieves the RDH value from the hardware 1833 ************************************************************************/ 1834 static int 1835 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1836 { 1837 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1838 int error; 1839 unsigned int val; 1840 1841 if (!rxr) 1842 return (0); 1843 1844 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); 1845 error = sysctl_handle_int(oidp, &val, 0, req); 1846 if (error || !req->newptr) 1847 return error; 1848 1849 return (0); 1850 } /* ixgbe_sysctl_rdh_handler */ 1851 1852 /************************************************************************ 1853 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1854 * 1855 * Retrieves the RDT value from the hardware 1856 ************************************************************************/ 1857 static int 1858 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1859 { 1860 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1861 int error; 1862 unsigned int val; 1863 1864 if (!rxr) 1865 return (0); 1866 1867 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); 1868 error = sysctl_handle_int(oidp, &val, 0, req); 1869 if (error || !req->newptr) 1870 return error; 1871 1872 return (0); 1873 } /* ixgbe_sysctl_rdt_handler */ 1874 1875 /************************************************************************ 1876 * ixgbe_if_vlan_register 1877 * 1878 * Run via vlan config EVENT, it enables us to use the 1879 * HW Filter table since we can get the vlan id. This 1880 * just creates the entry in the soft version of the 1881 * VFTA, init will repopulate the real table. 1882 ************************************************************************/ 1883 static void 1884 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1885 { 1886 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1887 u16 index, bit; 1888 1889 index = (vtag >> 5) & 0x7F; 1890 bit = vtag & 0x1F; 1891 sc->shadow_vfta[index] |= (1 << bit); 1892 ++sc->num_vlans; 1893 ixgbe_setup_vlan_hw_support(ctx); 1894 } /* ixgbe_if_vlan_register */ 1895 1896 /************************************************************************ 1897 * ixgbe_if_vlan_unregister 1898 * 1899 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1900 ************************************************************************/ 1901 static void 1902 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1903 { 1904 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1905 u16 index, bit; 1906 1907 index = (vtag >> 5) & 0x7F; 1908 bit = vtag & 0x1F; 1909 sc->shadow_vfta[index] &= ~(1 << bit); 1910 --sc->num_vlans; 1911 /* Re-init to load the changes */ 1912 ixgbe_setup_vlan_hw_support(ctx); 1913 } /* ixgbe_if_vlan_unregister */ 1914 1915 /************************************************************************ 1916 * ixgbe_setup_vlan_hw_support 1917 ************************************************************************/ 1918 static void 1919 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1920 { 1921 if_t ifp = iflib_get_ifp(ctx); 1922 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1923 struct ixgbe_hw *hw = &sc->hw; 1924 struct rx_ring *rxr; 1925 int i; 1926 u32 ctrl; 1927 1928 1929 /* 1930 * We get here thru init_locked, meaning 1931 * a soft reset, this has already cleared 1932 * the VFTA and other state, so if there 1933 * have been no vlan's registered do nothing. 1934 */ 1935 if (sc->num_vlans == 0 || (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) { 1936 /* Clear the vlan hw flag */ 1937 for (i = 0; i < sc->num_rx_queues; i++) { 1938 rxr = &sc->rx_queues[i].rxr; 1939 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1940 if (hw->mac.type != ixgbe_mac_82598EB) { 1941 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1942 ctrl &= ~IXGBE_RXDCTL_VME; 1943 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1944 } 1945 rxr->vtag_strip = false; 1946 } 1947 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1948 /* Enable the Filter Table if enabled */ 1949 ctrl |= IXGBE_VLNCTRL_CFIEN; 1950 ctrl &= ~IXGBE_VLNCTRL_VFE; 1951 if (hw->mac.type == ixgbe_mac_82598EB) 1952 ctrl &= ~IXGBE_VLNCTRL_VME; 1953 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1954 return; 1955 } 1956 1957 /* Setup the queues for vlans */ 1958 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { 1959 for (i = 0; i < sc->num_rx_queues; i++) { 1960 rxr = &sc->rx_queues[i].rxr; 1961 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1962 if (hw->mac.type != ixgbe_mac_82598EB) { 1963 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1964 ctrl |= IXGBE_RXDCTL_VME; 1965 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1966 } 1967 rxr->vtag_strip = true; 1968 } 1969 } 1970 1971 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 1972 return; 1973 /* 1974 * A soft reset zero's out the VFTA, so 1975 * we need to repopulate it now. 1976 */ 1977 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1978 if (sc->shadow_vfta[i] != 0) 1979 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1980 sc->shadow_vfta[i]); 1981 1982 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1983 /* Enable the Filter Table if enabled */ 1984 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) { 1985 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1986 ctrl |= IXGBE_VLNCTRL_VFE; 1987 } 1988 if (hw->mac.type == ixgbe_mac_82598EB) 1989 ctrl |= IXGBE_VLNCTRL_VME; 1990 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1991 } /* ixgbe_setup_vlan_hw_support */ 1992 1993 /************************************************************************ 1994 * ixgbe_get_slot_info 1995 * 1996 * Get the width and transaction speed of 1997 * the slot this adapter is plugged into. 1998 ************************************************************************/ 1999 static void 2000 ixgbe_get_slot_info(struct ixgbe_softc *sc) 2001 { 2002 device_t dev = iflib_get_dev(sc->ctx); 2003 struct ixgbe_hw *hw = &sc->hw; 2004 int bus_info_valid = true; 2005 u32 offset; 2006 u16 link; 2007 2008 /* Some devices are behind an internal bridge */ 2009 switch (hw->device_id) { 2010 case IXGBE_DEV_ID_82599_SFP_SF_QP: 2011 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 2012 goto get_parent_info; 2013 default: 2014 break; 2015 } 2016 2017 ixgbe_get_bus_info(hw); 2018 2019 /* 2020 * Some devices don't use PCI-E, but there is no need 2021 * to display "Unknown" for bus speed and width. 2022 */ 2023 switch (hw->mac.type) { 2024 case ixgbe_mac_X550EM_x: 2025 case ixgbe_mac_X550EM_a: 2026 return; 2027 default: 2028 goto display; 2029 } 2030 2031 get_parent_info: 2032 /* 2033 * For the Quad port adapter we need to parse back 2034 * up the PCI tree to find the speed of the expansion 2035 * slot into which this adapter is plugged. A bit more work. 2036 */ 2037 dev = device_get_parent(device_get_parent(dev)); 2038 #ifdef IXGBE_DEBUG 2039 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2040 pci_get_slot(dev), pci_get_function(dev)); 2041 #endif 2042 dev = device_get_parent(device_get_parent(dev)); 2043 #ifdef IXGBE_DEBUG 2044 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2045 pci_get_slot(dev), pci_get_function(dev)); 2046 #endif 2047 /* Now get the PCI Express Capabilities offset */ 2048 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 2049 /* 2050 * Hmm...can't get PCI-Express capabilities. 2051 * Falling back to default method. 2052 */ 2053 bus_info_valid = false; 2054 ixgbe_get_bus_info(hw); 2055 goto display; 2056 } 2057 /* ...and read the Link Status Register */ 2058 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2059 ixgbe_set_pci_config_data_generic(hw, link); 2060 2061 display: 2062 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2063 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2064 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2065 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2066 "Unknown"), 2067 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 2068 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 2069 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 2070 "Unknown")); 2071 2072 if (bus_info_valid) { 2073 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2074 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2075 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2076 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2077 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 2078 } 2079 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2080 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2081 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2082 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2083 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 2084 } 2085 } else 2086 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2087 2088 return; 2089 } /* ixgbe_get_slot_info */ 2090 2091 /************************************************************************ 2092 * ixgbe_if_msix_intr_assign 2093 * 2094 * Setup MSI-X Interrupt resources and handlers 2095 ************************************************************************/ 2096 static int 2097 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2098 { 2099 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2100 struct ix_rx_queue *rx_que = sc->rx_queues; 2101 struct ix_tx_queue *tx_que; 2102 int error, rid, vector = 0; 2103 char buf[16]; 2104 2105 /* Admin Que is vector 0*/ 2106 rid = vector + 1; 2107 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 2108 rid = vector + 1; 2109 2110 snprintf(buf, sizeof(buf), "rxq%d", i); 2111 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2112 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2113 2114 if (error) { 2115 device_printf(iflib_get_dev(ctx), 2116 "Failed to allocate que int %d err: %d", i, error); 2117 sc->num_rx_queues = i + 1; 2118 goto fail; 2119 } 2120 2121 rx_que->msix = vector; 2122 } 2123 for (int i = 0; i < sc->num_tx_queues; i++) { 2124 snprintf(buf, sizeof(buf), "txq%d", i); 2125 tx_que = &sc->tx_queues[i]; 2126 tx_que->msix = i % sc->num_rx_queues; 2127 iflib_softirq_alloc_generic(ctx, 2128 &sc->rx_queues[tx_que->msix].que_irq, 2129 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2130 } 2131 rid = vector + 1; 2132 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 2133 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq"); 2134 if (error) { 2135 device_printf(iflib_get_dev(ctx), 2136 "Failed to register admin handler"); 2137 return (error); 2138 } 2139 2140 sc->vector = vector; 2141 2142 return (0); 2143 fail: 2144 iflib_irq_free(ctx, &sc->irq); 2145 rx_que = sc->rx_queues; 2146 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 2147 iflib_irq_free(ctx, &rx_que->que_irq); 2148 2149 return (error); 2150 } /* ixgbe_if_msix_intr_assign */ 2151 2152 static inline void 2153 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que) 2154 { 2155 uint32_t newitr = 0; 2156 struct rx_ring *rxr = &que->rxr; 2157 2158 /* 2159 * Do Adaptive Interrupt Moderation: 2160 * - Write out last calculated setting 2161 * - Calculate based on average size over 2162 * the last interval. 2163 */ 2164 if (que->eitr_setting) { 2165 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), 2166 que->eitr_setting); 2167 } 2168 2169 que->eitr_setting = 0; 2170 /* Idle, do nothing */ 2171 if (rxr->bytes == 0) { 2172 return; 2173 } 2174 2175 if ((rxr->bytes) && (rxr->packets)) { 2176 newitr = (rxr->bytes / rxr->packets); 2177 } 2178 2179 newitr += 24; /* account for hardware frame, crc */ 2180 /* set an upper boundary */ 2181 newitr = min(newitr, 3000); 2182 2183 /* Be nice to the mid range */ 2184 if ((newitr > 300) && (newitr < 1200)) { 2185 newitr = (newitr / 3); 2186 } else { 2187 newitr = (newitr / 2); 2188 } 2189 2190 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2191 newitr |= newitr << 16; 2192 } else { 2193 newitr |= IXGBE_EITR_CNT_WDIS; 2194 } 2195 2196 /* save for next interrupt */ 2197 que->eitr_setting = newitr; 2198 2199 /* Reset state */ 2200 rxr->bytes = 0; 2201 rxr->packets = 0; 2202 2203 return; 2204 } 2205 2206 /********************************************************************* 2207 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2208 **********************************************************************/ 2209 static int 2210 ixgbe_msix_que(void *arg) 2211 { 2212 struct ix_rx_queue *que = arg; 2213 struct ixgbe_softc *sc = que->sc; 2214 if_t ifp = iflib_get_ifp(que->sc->ctx); 2215 2216 /* Protect against spurious interrupts */ 2217 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 2218 return (FILTER_HANDLED); 2219 2220 ixgbe_disable_queue(sc, que->msix); 2221 ++que->irqs; 2222 2223 /* Check for AIM */ 2224 if (sc->enable_aim) { 2225 ixgbe_perform_aim(sc, que); 2226 } 2227 2228 return (FILTER_SCHEDULE_THREAD); 2229 } /* ixgbe_msix_que */ 2230 2231 /************************************************************************ 2232 * ixgbe_media_status - Media Ioctl callback 2233 * 2234 * Called whenever the user queries the status of 2235 * the interface using ifconfig. 2236 ************************************************************************/ 2237 static void 2238 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2239 { 2240 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2241 struct ixgbe_hw *hw = &sc->hw; 2242 int layer; 2243 2244 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2245 2246 ifmr->ifm_status = IFM_AVALID; 2247 ifmr->ifm_active = IFM_ETHER; 2248 2249 if (!sc->link_active) 2250 return; 2251 2252 ifmr->ifm_status |= IFM_ACTIVE; 2253 layer = sc->phy_layer; 2254 2255 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2256 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2257 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2258 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2259 switch (sc->link_speed) { 2260 case IXGBE_LINK_SPEED_10GB_FULL: 2261 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2262 break; 2263 case IXGBE_LINK_SPEED_1GB_FULL: 2264 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2265 break; 2266 case IXGBE_LINK_SPEED_100_FULL: 2267 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2268 break; 2269 case IXGBE_LINK_SPEED_10_FULL: 2270 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2271 break; 2272 } 2273 if (hw->mac.type == ixgbe_mac_X550) 2274 switch (sc->link_speed) { 2275 case IXGBE_LINK_SPEED_5GB_FULL: 2276 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2277 break; 2278 case IXGBE_LINK_SPEED_2_5GB_FULL: 2279 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2280 break; 2281 } 2282 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2283 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2284 switch (sc->link_speed) { 2285 case IXGBE_LINK_SPEED_10GB_FULL: 2286 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2287 break; 2288 } 2289 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2290 switch (sc->link_speed) { 2291 case IXGBE_LINK_SPEED_10GB_FULL: 2292 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2293 break; 2294 case IXGBE_LINK_SPEED_1GB_FULL: 2295 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2296 break; 2297 } 2298 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2299 switch (sc->link_speed) { 2300 case IXGBE_LINK_SPEED_10GB_FULL: 2301 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2302 break; 2303 case IXGBE_LINK_SPEED_1GB_FULL: 2304 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2305 break; 2306 } 2307 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2308 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2309 switch (sc->link_speed) { 2310 case IXGBE_LINK_SPEED_10GB_FULL: 2311 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2312 break; 2313 case IXGBE_LINK_SPEED_1GB_FULL: 2314 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2315 break; 2316 } 2317 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2318 switch (sc->link_speed) { 2319 case IXGBE_LINK_SPEED_10GB_FULL: 2320 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2321 break; 2322 } 2323 /* 2324 * XXX: These need to use the proper media types once 2325 * they're added. 2326 */ 2327 #ifndef IFM_ETH_XTYPE 2328 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2329 switch (sc->link_speed) { 2330 case IXGBE_LINK_SPEED_10GB_FULL: 2331 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2332 break; 2333 case IXGBE_LINK_SPEED_2_5GB_FULL: 2334 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2335 break; 2336 case IXGBE_LINK_SPEED_1GB_FULL: 2337 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2338 break; 2339 } 2340 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2341 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2342 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2343 switch (sc->link_speed) { 2344 case IXGBE_LINK_SPEED_10GB_FULL: 2345 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2346 break; 2347 case IXGBE_LINK_SPEED_2_5GB_FULL: 2348 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2349 break; 2350 case IXGBE_LINK_SPEED_1GB_FULL: 2351 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2352 break; 2353 } 2354 #else 2355 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2356 switch (sc->link_speed) { 2357 case IXGBE_LINK_SPEED_10GB_FULL: 2358 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2359 break; 2360 case IXGBE_LINK_SPEED_2_5GB_FULL: 2361 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2362 break; 2363 case IXGBE_LINK_SPEED_1GB_FULL: 2364 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2365 break; 2366 } 2367 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2368 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2369 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2370 switch (sc->link_speed) { 2371 case IXGBE_LINK_SPEED_10GB_FULL: 2372 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2373 break; 2374 case IXGBE_LINK_SPEED_2_5GB_FULL: 2375 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2376 break; 2377 case IXGBE_LINK_SPEED_1GB_FULL: 2378 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2379 break; 2380 } 2381 #endif 2382 2383 /* If nothing is recognized... */ 2384 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2385 ifmr->ifm_active |= IFM_UNKNOWN; 2386 2387 /* Display current flow control setting used on link */ 2388 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2389 hw->fc.current_mode == ixgbe_fc_full) 2390 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2391 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2392 hw->fc.current_mode == ixgbe_fc_full) 2393 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2394 } /* ixgbe_media_status */ 2395 2396 /************************************************************************ 2397 * ixgbe_media_change - Media Ioctl callback 2398 * 2399 * Called when the user changes speed/duplex using 2400 * media/mediopt option with ifconfig. 2401 ************************************************************************/ 2402 static int 2403 ixgbe_if_media_change(if_ctx_t ctx) 2404 { 2405 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2406 struct ifmedia *ifm = iflib_get_media(ctx); 2407 struct ixgbe_hw *hw = &sc->hw; 2408 ixgbe_link_speed speed = 0; 2409 2410 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2411 2412 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2413 return (EINVAL); 2414 2415 if (hw->phy.media_type == ixgbe_media_type_backplane) 2416 return (EPERM); 2417 2418 /* 2419 * We don't actually need to check against the supported 2420 * media types of the adapter; ifmedia will take care of 2421 * that for us. 2422 */ 2423 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2424 case IFM_AUTO: 2425 case IFM_10G_T: 2426 speed |= IXGBE_LINK_SPEED_100_FULL; 2427 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2428 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2429 break; 2430 case IFM_10G_LRM: 2431 case IFM_10G_LR: 2432 #ifndef IFM_ETH_XTYPE 2433 case IFM_10G_SR: /* KR, too */ 2434 case IFM_10G_CX4: /* KX4 */ 2435 #else 2436 case IFM_10G_KR: 2437 case IFM_10G_KX4: 2438 #endif 2439 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2440 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2441 break; 2442 #ifndef IFM_ETH_XTYPE 2443 case IFM_1000_CX: /* KX */ 2444 #else 2445 case IFM_1000_KX: 2446 #endif 2447 case IFM_1000_LX: 2448 case IFM_1000_SX: 2449 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2450 break; 2451 case IFM_1000_T: 2452 speed |= IXGBE_LINK_SPEED_100_FULL; 2453 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2454 break; 2455 case IFM_10G_TWINAX: 2456 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2457 break; 2458 case IFM_5000_T: 2459 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2460 break; 2461 case IFM_2500_T: 2462 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2463 break; 2464 case IFM_100_TX: 2465 speed |= IXGBE_LINK_SPEED_100_FULL; 2466 break; 2467 case IFM_10_T: 2468 speed |= IXGBE_LINK_SPEED_10_FULL; 2469 break; 2470 default: 2471 goto invalid; 2472 } 2473 2474 hw->mac.autotry_restart = true; 2475 hw->mac.ops.setup_link(hw, speed, true); 2476 sc->advertise = 2477 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 2478 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 2479 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 2480 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 2481 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 2482 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 2483 2484 return (0); 2485 2486 invalid: 2487 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2488 2489 return (EINVAL); 2490 } /* ixgbe_if_media_change */ 2491 2492 /************************************************************************ 2493 * ixgbe_set_promisc 2494 ************************************************************************/ 2495 static int 2496 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2497 { 2498 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2499 if_t ifp = iflib_get_ifp(ctx); 2500 u32 rctl; 2501 int mcnt = 0; 2502 2503 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 2504 rctl &= (~IXGBE_FCTRL_UPE); 2505 if (if_getflags(ifp) & IFF_ALLMULTI) 2506 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2507 else { 2508 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2509 } 2510 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2511 rctl &= (~IXGBE_FCTRL_MPE); 2512 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2513 2514 if (if_getflags(ifp) & IFF_PROMISC) { 2515 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2516 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2517 } else if (if_getflags(ifp) & IFF_ALLMULTI) { 2518 rctl |= IXGBE_FCTRL_MPE; 2519 rctl &= ~IXGBE_FCTRL_UPE; 2520 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2521 } 2522 return (0); 2523 } /* ixgbe_if_promisc_set */ 2524 2525 /************************************************************************ 2526 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2527 ************************************************************************/ 2528 static int 2529 ixgbe_msix_link(void *arg) 2530 { 2531 struct ixgbe_softc *sc = arg; 2532 struct ixgbe_hw *hw = &sc->hw; 2533 u32 eicr, eicr_mask; 2534 s32 retval; 2535 2536 ++sc->link_irq; 2537 2538 /* Pause other interrupts */ 2539 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2540 2541 /* First get the cause */ 2542 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2543 /* Be sure the queue bits are not cleared */ 2544 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2545 /* Clear interrupt with write */ 2546 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2547 2548 /* Link status change */ 2549 if (eicr & IXGBE_EICR_LSC) { 2550 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2551 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; 2552 } 2553 2554 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 2555 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 2556 (eicr & IXGBE_EICR_FLOW_DIR)) { 2557 /* This is probably overkill :) */ 2558 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) 2559 return (FILTER_HANDLED); 2560 /* Disable the interrupt */ 2561 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2562 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2563 } else 2564 if (eicr & IXGBE_EICR_ECC) { 2565 device_printf(iflib_get_dev(sc->ctx), 2566 "Received ECC Err, initiating reset\n"); 2567 hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2568 ixgbe_reset_hw(hw); 2569 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2570 } 2571 2572 /* Check for over temp condition */ 2573 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2574 switch (sc->hw.mac.type) { 2575 case ixgbe_mac_X550EM_a: 2576 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2577 break; 2578 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2579 IXGBE_EICR_GPI_SDP0_X550EM_a); 2580 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2581 IXGBE_EICR_GPI_SDP0_X550EM_a); 2582 retval = hw->phy.ops.check_overtemp(hw); 2583 if (retval != IXGBE_ERR_OVERTEMP) 2584 break; 2585 device_printf(iflib_get_dev(sc->ctx), 2586 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2587 device_printf(iflib_get_dev(sc->ctx), 2588 "System shutdown required!\n"); 2589 break; 2590 default: 2591 if (!(eicr & IXGBE_EICR_TS)) 2592 break; 2593 retval = hw->phy.ops.check_overtemp(hw); 2594 if (retval != IXGBE_ERR_OVERTEMP) 2595 break; 2596 device_printf(iflib_get_dev(sc->ctx), 2597 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2598 device_printf(iflib_get_dev(sc->ctx), 2599 "System shutdown required!\n"); 2600 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2601 break; 2602 } 2603 } 2604 2605 /* Check for VF message */ 2606 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 2607 (eicr & IXGBE_EICR_MAILBOX)) 2608 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; 2609 } 2610 2611 if (ixgbe_is_sfp(hw)) { 2612 /* Pluggable optics-related interrupt */ 2613 if (hw->mac.type >= ixgbe_mac_X540) 2614 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2615 else 2616 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2617 2618 if (eicr & eicr_mask) { 2619 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2620 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 2621 } 2622 2623 if ((hw->mac.type == ixgbe_mac_82599EB) && 2624 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2625 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2626 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2627 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 2628 } 2629 } 2630 2631 /* Check for fan failure */ 2632 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2633 ixgbe_check_fan_failure(sc, eicr, true); 2634 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2635 } 2636 2637 /* External PHY interrupt */ 2638 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2639 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2640 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2641 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 2642 } 2643 2644 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2645 } /* ixgbe_msix_link */ 2646 2647 /************************************************************************ 2648 * ixgbe_sysctl_interrupt_rate_handler 2649 ************************************************************************/ 2650 static int 2651 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2652 { 2653 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2654 int error; 2655 unsigned int reg, usec, rate; 2656 2657 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); 2658 usec = ((reg & 0x0FF8) >> 3); 2659 if (usec > 0) 2660 rate = 500000 / usec; 2661 else 2662 rate = 0; 2663 error = sysctl_handle_int(oidp, &rate, 0, req); 2664 if (error || !req->newptr) 2665 return error; 2666 reg &= ~0xfff; /* default, no limitation */ 2667 ixgbe_max_interrupt_rate = 0; 2668 if (rate > 0 && rate < 500000) { 2669 if (rate < 1000) 2670 rate = 1000; 2671 ixgbe_max_interrupt_rate = rate; 2672 reg |= ((4000000/rate) & 0xff8); 2673 } 2674 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); 2675 2676 return (0); 2677 } /* ixgbe_sysctl_interrupt_rate_handler */ 2678 2679 /************************************************************************ 2680 * ixgbe_add_device_sysctls 2681 ************************************************************************/ 2682 static void 2683 ixgbe_add_device_sysctls(if_ctx_t ctx) 2684 { 2685 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2686 device_t dev = iflib_get_dev(ctx); 2687 struct ixgbe_hw *hw = &sc->hw; 2688 struct sysctl_oid_list *child; 2689 struct sysctl_ctx_list *ctx_list; 2690 2691 ctx_list = device_get_sysctl_ctx(dev); 2692 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2693 2694 /* Sysctls for all devices */ 2695 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2696 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2697 sc, 0, ixgbe_sysctl_flowcntl, "I", 2698 IXGBE_SYSCTL_DESC_SET_FC); 2699 2700 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2701 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2702 sc, 0, ixgbe_sysctl_advertise, "I", 2703 IXGBE_SYSCTL_DESC_ADV_SPEED); 2704 2705 sc->enable_aim = ixgbe_enable_aim; 2706 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2707 &sc->enable_aim, 0, "Interrupt Moderation"); 2708 2709 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", 2710 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2711 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); 2712 2713 #ifdef IXGBE_DEBUG 2714 /* testing sysctls (for all devices) */ 2715 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2716 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2717 sc, 0, ixgbe_sysctl_power_state, 2718 "I", "PCI Power State"); 2719 2720 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2721 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2722 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2723 #endif 2724 /* for X550 series devices */ 2725 if (hw->mac.type >= ixgbe_mac_X550) 2726 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2727 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2728 sc, 0, ixgbe_sysctl_dmac, 2729 "I", "DMA Coalesce"); 2730 2731 /* for WoL-capable devices */ 2732 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2733 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2734 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2735 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2736 2737 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2738 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2739 sc, 0, ixgbe_sysctl_wufc, 2740 "I", "Enable/Disable Wake Up Filters"); 2741 } 2742 2743 /* for X552/X557-AT devices */ 2744 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2745 struct sysctl_oid *phy_node; 2746 struct sysctl_oid_list *phy_list; 2747 2748 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2749 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); 2750 phy_list = SYSCTL_CHILDREN(phy_node); 2751 2752 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2753 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2754 sc, 0, ixgbe_sysctl_phy_temp, 2755 "I", "Current External PHY Temperature (Celsius)"); 2756 2757 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2758 "overtemp_occurred", 2759 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2760 ixgbe_sysctl_phy_overtemp_occurred, "I", 2761 "External PHY High Temperature Event Occurred"); 2762 } 2763 2764 if (sc->feat_cap & IXGBE_FEATURE_EEE) { 2765 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2766 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2767 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2768 } 2769 } /* ixgbe_add_device_sysctls */ 2770 2771 /************************************************************************ 2772 * ixgbe_allocate_pci_resources 2773 ************************************************************************/ 2774 static int 2775 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2776 { 2777 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2778 device_t dev = iflib_get_dev(ctx); 2779 int rid; 2780 2781 rid = PCIR_BAR(0); 2782 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2783 RF_ACTIVE); 2784 2785 if (!(sc->pci_mem)) { 2786 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2787 return (ENXIO); 2788 } 2789 2790 /* Save bus_space values for READ/WRITE_REG macros */ 2791 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 2792 sc->osdep.mem_bus_space_handle = 2793 rman_get_bushandle(sc->pci_mem); 2794 /* Set hw values for shared code */ 2795 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 2796 2797 return (0); 2798 } /* ixgbe_allocate_pci_resources */ 2799 2800 /************************************************************************ 2801 * ixgbe_detach - Device removal routine 2802 * 2803 * Called when the driver is being removed. 2804 * Stops the adapter and deallocates all the resources 2805 * that were allocated for driver operation. 2806 * 2807 * return 0 on success, positive on failure 2808 ************************************************************************/ 2809 static int 2810 ixgbe_if_detach(if_ctx_t ctx) 2811 { 2812 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2813 device_t dev = iflib_get_dev(ctx); 2814 u32 ctrl_ext; 2815 2816 INIT_DEBUGOUT("ixgbe_detach: begin"); 2817 2818 if (ixgbe_pci_iov_detach(dev) != 0) { 2819 device_printf(dev, "SR-IOV in use; detach first.\n"); 2820 return (EBUSY); 2821 } 2822 2823 ixgbe_setup_low_power_mode(ctx); 2824 2825 /* let hardware know driver is unloading */ 2826 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 2827 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2828 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 2829 2830 ixgbe_free_pci_resources(ctx); 2831 free(sc->mta, M_IXGBE); 2832 2833 return (0); 2834 } /* ixgbe_if_detach */ 2835 2836 /************************************************************************ 2837 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2838 * 2839 * Prepare the adapter/port for LPLU and/or WoL 2840 ************************************************************************/ 2841 static int 2842 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2843 { 2844 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2845 struct ixgbe_hw *hw = &sc->hw; 2846 device_t dev = iflib_get_dev(ctx); 2847 s32 error = 0; 2848 2849 if (!hw->wol_enabled) 2850 ixgbe_set_phy_power(hw, false); 2851 2852 /* Limit power management flow to X550EM baseT */ 2853 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2854 hw->phy.ops.enter_lplu) { 2855 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2856 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw), 2857 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2); 2858 2859 /* 2860 * Clear Wake Up Status register to prevent any previous wakeup 2861 * events from waking us up immediately after we suspend. 2862 */ 2863 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2864 2865 /* 2866 * Program the Wakeup Filter Control register with user filter 2867 * settings 2868 */ 2869 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 2870 2871 /* Enable wakeups and power management in Wakeup Control */ 2872 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2873 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2874 2875 /* X550EM baseT adapters need a special LPLU flow */ 2876 hw->phy.reset_disable = true; 2877 ixgbe_if_stop(ctx); 2878 error = hw->phy.ops.enter_lplu(hw); 2879 if (error) 2880 device_printf(dev, "Error entering LPLU: %d\n", error); 2881 hw->phy.reset_disable = false; 2882 } else { 2883 /* Just stop for other adapters */ 2884 ixgbe_if_stop(ctx); 2885 } 2886 2887 return error; 2888 } /* ixgbe_setup_low_power_mode */ 2889 2890 /************************************************************************ 2891 * ixgbe_shutdown - Shutdown entry point 2892 ************************************************************************/ 2893 static int 2894 ixgbe_if_shutdown(if_ctx_t ctx) 2895 { 2896 int error = 0; 2897 2898 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2899 2900 error = ixgbe_setup_low_power_mode(ctx); 2901 2902 return (error); 2903 } /* ixgbe_if_shutdown */ 2904 2905 /************************************************************************ 2906 * ixgbe_suspend 2907 * 2908 * From D0 to D3 2909 ************************************************************************/ 2910 static int 2911 ixgbe_if_suspend(if_ctx_t ctx) 2912 { 2913 int error = 0; 2914 2915 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2916 2917 error = ixgbe_setup_low_power_mode(ctx); 2918 2919 return (error); 2920 } /* ixgbe_if_suspend */ 2921 2922 /************************************************************************ 2923 * ixgbe_resume 2924 * 2925 * From D3 to D0 2926 ************************************************************************/ 2927 static int 2928 ixgbe_if_resume(if_ctx_t ctx) 2929 { 2930 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2931 device_t dev = iflib_get_dev(ctx); 2932 if_t ifp = iflib_get_ifp(ctx); 2933 struct ixgbe_hw *hw = &sc->hw; 2934 u32 wus; 2935 2936 INIT_DEBUGOUT("ixgbe_resume: begin"); 2937 2938 /* Read & clear WUS register */ 2939 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2940 if (wus) 2941 device_printf(dev, "Woken up by (WUS): %#010x\n", 2942 IXGBE_READ_REG(hw, IXGBE_WUS)); 2943 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2944 /* And clear WUFC until next low-power transition */ 2945 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2946 2947 /* 2948 * Required after D3->D0 transition; 2949 * will re-advertise all previous advertised speeds 2950 */ 2951 if (if_getflags(ifp) & IFF_UP) 2952 ixgbe_if_init(ctx); 2953 2954 return (0); 2955 } /* ixgbe_if_resume */ 2956 2957 /************************************************************************ 2958 * ixgbe_if_mtu_set - Ioctl mtu entry point 2959 * 2960 * Return 0 on success, EINVAL on failure 2961 ************************************************************************/ 2962 static int 2963 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2964 { 2965 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2966 int error = 0; 2967 2968 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2969 2970 if (mtu > IXGBE_MAX_MTU) { 2971 error = EINVAL; 2972 } else { 2973 sc->max_frame_size = mtu + IXGBE_MTU_HDR; 2974 } 2975 2976 return error; 2977 } /* ixgbe_if_mtu_set */ 2978 2979 /************************************************************************ 2980 * ixgbe_if_crcstrip_set 2981 ************************************************************************/ 2982 static void 2983 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2984 { 2985 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2986 struct ixgbe_hw *hw = &sc->hw; 2987 /* crc stripping is set in two places: 2988 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2989 * IXGBE_RDRXCTL (set by the original driver in 2990 * ixgbe_setup_hw_rsc() called in init_locked. 2991 * We disable the setting when netmap is compiled in). 2992 * We update the values here, but also in ixgbe.c because 2993 * init_locked sometimes is called outside our control. 2994 */ 2995 uint32_t hl, rxc; 2996 2997 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2998 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2999 #ifdef NETMAP 3000 if (netmap_verbose) 3001 D("%s read HLREG 0x%x rxc 0x%x", 3002 onoff ? "enter" : "exit", hl, rxc); 3003 #endif 3004 /* hw requirements ... */ 3005 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 3006 rxc |= IXGBE_RDRXCTL_RSCACKC; 3007 if (onoff && !crcstrip) { 3008 /* keep the crc. Fast rx */ 3009 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 3010 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 3011 } else { 3012 /* reset default mode */ 3013 hl |= IXGBE_HLREG0_RXCRCSTRP; 3014 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 3015 } 3016 #ifdef NETMAP 3017 if (netmap_verbose) 3018 D("%s write HLREG 0x%x rxc 0x%x", 3019 onoff ? "enter" : "exit", hl, rxc); 3020 #endif 3021 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 3022 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 3023 } /* ixgbe_if_crcstrip_set */ 3024 3025 /********************************************************************* 3026 * ixgbe_if_init - Init entry point 3027 * 3028 * Used in two ways: It is used by the stack as an init 3029 * entry point in network interface structure. It is also 3030 * used by the driver as a hw/sw initialization routine to 3031 * get to a consistent state. 3032 * 3033 * Return 0 on success, positive on failure 3034 **********************************************************************/ 3035 void 3036 ixgbe_if_init(if_ctx_t ctx) 3037 { 3038 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3039 if_t ifp = iflib_get_ifp(ctx); 3040 device_t dev = iflib_get_dev(ctx); 3041 struct ixgbe_hw *hw = &sc->hw; 3042 struct ix_rx_queue *rx_que; 3043 struct ix_tx_queue *tx_que; 3044 u32 txdctl, mhadd; 3045 u32 rxdctl, rxctrl; 3046 u32 ctrl_ext; 3047 3048 int i, j, err; 3049 3050 INIT_DEBUGOUT("ixgbe_if_init: begin"); 3051 3052 /* Queue indices may change with IOV mode */ 3053 ixgbe_align_all_queue_indices(sc); 3054 3055 /* reprogram the RAR[0] in case user changed it. */ 3056 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 3057 3058 /* Get the latest mac address, User can use a LAA */ 3059 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3060 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 3061 hw->addr_ctrl.rar_used_count = 1; 3062 3063 ixgbe_init_hw(hw); 3064 3065 ixgbe_initialize_iov(sc); 3066 3067 ixgbe_initialize_transmit_units(ctx); 3068 3069 /* Setup Multicast table */ 3070 ixgbe_if_multi_set(ctx); 3071 3072 /* Determine the correct mbuf pool, based on frame size */ 3073 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 3074 3075 /* Configure RX settings */ 3076 ixgbe_initialize_receive_units(ctx); 3077 3078 /* 3079 * Initialize variable holding task enqueue requests 3080 * from MSI-X interrupts 3081 */ 3082 sc->task_requests = 0; 3083 3084 /* Enable SDP & MSI-X interrupts based on adapter */ 3085 ixgbe_config_gpie(sc); 3086 3087 /* Set MTU size */ 3088 if (if_getmtu(ifp) > ETHERMTU) { 3089 /* aka IXGBE_MAXFRS on 82599 and newer */ 3090 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3091 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3092 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3093 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3094 } 3095 3096 /* Now enable all the queues */ 3097 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 3098 struct tx_ring *txr = &tx_que->txr; 3099 3100 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3101 txdctl |= IXGBE_TXDCTL_ENABLE; 3102 /* Set WTHRESH to 8, burst writeback */ 3103 txdctl |= (8 << 16); 3104 /* 3105 * When the internal queue falls below PTHRESH (32), 3106 * start prefetching as long as there are at least 3107 * HTHRESH (1) buffers ready. The values are taken 3108 * from the Intel linux driver 3.8.21. 3109 * Prefetching enables tx line rate even with 1 queue. 3110 */ 3111 txdctl |= (32 << 0) | (1 << 8); 3112 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 3113 } 3114 3115 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 3116 struct rx_ring *rxr = &rx_que->rxr; 3117 3118 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3119 if (hw->mac.type == ixgbe_mac_82598EB) { 3120 /* 3121 * PTHRESH = 21 3122 * HTHRESH = 4 3123 * WTHRESH = 8 3124 */ 3125 rxdctl &= ~0x3FFFFF; 3126 rxdctl |= 0x080420; 3127 } 3128 rxdctl |= IXGBE_RXDCTL_ENABLE; 3129 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 3130 for (j = 0; j < 10; j++) { 3131 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 3132 IXGBE_RXDCTL_ENABLE) 3133 break; 3134 else 3135 msec_delay(1); 3136 } 3137 wmb(); 3138 } 3139 3140 /* Enable Receive engine */ 3141 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3142 if (hw->mac.type == ixgbe_mac_82598EB) 3143 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3144 rxctrl |= IXGBE_RXCTRL_RXEN; 3145 ixgbe_enable_rx_dma(hw, rxctrl); 3146 3147 /* Set up MSI/MSI-X routing */ 3148 if (ixgbe_enable_msix) { 3149 ixgbe_configure_ivars(sc); 3150 /* Set up auto-mask */ 3151 if (hw->mac.type == ixgbe_mac_82598EB) 3152 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3153 else { 3154 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3155 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3156 } 3157 } else { /* Simple settings for Legacy/MSI */ 3158 ixgbe_set_ivar(sc, 0, 0, 0); 3159 ixgbe_set_ivar(sc, 0, 0, 1); 3160 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3161 } 3162 3163 ixgbe_init_fdir(sc); 3164 3165 /* 3166 * Check on any SFP devices that 3167 * need to be kick-started 3168 */ 3169 if (hw->phy.type == ixgbe_phy_none) { 3170 err = hw->phy.ops.identify(hw); 3171 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3172 device_printf(dev, 3173 "Unsupported SFP+ module type was detected.\n"); 3174 return; 3175 } 3176 } 3177 3178 /* Set moderation on the Link interrupt */ 3179 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); 3180 3181 /* Enable power to the phy. */ 3182 ixgbe_set_phy_power(hw, true); 3183 3184 /* Config/Enable Link */ 3185 ixgbe_config_link(ctx); 3186 3187 /* Hardware Packet Buffer & Flow Control setup */ 3188 ixgbe_config_delay_values(sc); 3189 3190 /* Initialize the FC settings */ 3191 ixgbe_start_hw(hw); 3192 3193 /* Set up VLAN support and filter */ 3194 ixgbe_setup_vlan_hw_support(ctx); 3195 3196 /* Setup DMA Coalescing */ 3197 ixgbe_config_dmac(sc); 3198 3199 /* And now turn on interrupts */ 3200 ixgbe_if_enable_intr(ctx); 3201 3202 /* Enable the use of the MBX by the VF's */ 3203 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 3204 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3205 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3206 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3207 } 3208 3209 } /* ixgbe_init_locked */ 3210 3211 /************************************************************************ 3212 * ixgbe_set_ivar 3213 * 3214 * Setup the correct IVAR register for a particular MSI-X interrupt 3215 * (yes this is all very magic and confusing :) 3216 * - entry is the register array entry 3217 * - vector is the MSI-X vector for this queue 3218 * - type is RX/TX/MISC 3219 ************************************************************************/ 3220 static void 3221 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 3222 { 3223 struct ixgbe_hw *hw = &sc->hw; 3224 u32 ivar, index; 3225 3226 vector |= IXGBE_IVAR_ALLOC_VAL; 3227 3228 switch (hw->mac.type) { 3229 case ixgbe_mac_82598EB: 3230 if (type == -1) 3231 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3232 else 3233 entry += (type * 64); 3234 index = (entry >> 2) & 0x1F; 3235 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3236 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3237 ivar |= (vector << (8 * (entry & 0x3))); 3238 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 3239 break; 3240 case ixgbe_mac_82599EB: 3241 case ixgbe_mac_X540: 3242 case ixgbe_mac_X550: 3243 case ixgbe_mac_X550EM_x: 3244 case ixgbe_mac_X550EM_a: 3245 if (type == -1) { /* MISC IVAR */ 3246 index = (entry & 1) * 8; 3247 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3248 ivar &= ~(0xFF << index); 3249 ivar |= (vector << index); 3250 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3251 } else { /* RX/TX IVARS */ 3252 index = (16 * (entry & 1)) + (8 * type); 3253 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3254 ivar &= ~(0xFF << index); 3255 ivar |= (vector << index); 3256 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3257 } 3258 default: 3259 break; 3260 } 3261 } /* ixgbe_set_ivar */ 3262 3263 /************************************************************************ 3264 * ixgbe_configure_ivars 3265 ************************************************************************/ 3266 static void 3267 ixgbe_configure_ivars(struct ixgbe_softc *sc) 3268 { 3269 struct ix_rx_queue *rx_que = sc->rx_queues; 3270 struct ix_tx_queue *tx_que = sc->tx_queues; 3271 u32 newitr; 3272 3273 if (ixgbe_max_interrupt_rate > 0) 3274 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3275 else { 3276 /* 3277 * Disable DMA coalescing if interrupt moderation is 3278 * disabled. 3279 */ 3280 sc->dmac = 0; 3281 newitr = 0; 3282 } 3283 3284 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 3285 struct rx_ring *rxr = &rx_que->rxr; 3286 3287 /* First the RX queue entry */ 3288 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); 3289 3290 /* Set an Initial EITR value */ 3291 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); 3292 } 3293 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 3294 struct tx_ring *txr = &tx_que->txr; 3295 3296 /* ... and the TX */ 3297 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); 3298 } 3299 /* For the Link interrupt */ 3300 ixgbe_set_ivar(sc, 1, sc->vector, -1); 3301 } /* ixgbe_configure_ivars */ 3302 3303 /************************************************************************ 3304 * ixgbe_config_gpie 3305 ************************************************************************/ 3306 static void 3307 ixgbe_config_gpie(struct ixgbe_softc *sc) 3308 { 3309 struct ixgbe_hw *hw = &sc->hw; 3310 u32 gpie; 3311 3312 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3313 3314 if (sc->intr_type == IFLIB_INTR_MSIX) { 3315 /* Enable Enhanced MSI-X mode */ 3316 gpie |= IXGBE_GPIE_MSIX_MODE 3317 | IXGBE_GPIE_EIAME 3318 | IXGBE_GPIE_PBA_SUPPORT 3319 | IXGBE_GPIE_OCD; 3320 } 3321 3322 /* Fan Failure Interrupt */ 3323 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3324 gpie |= IXGBE_SDP1_GPIEN; 3325 3326 /* Thermal Sensor Interrupt */ 3327 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3328 gpie |= IXGBE_SDP0_GPIEN_X540; 3329 3330 /* Link detection */ 3331 switch (hw->mac.type) { 3332 case ixgbe_mac_82599EB: 3333 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3334 break; 3335 case ixgbe_mac_X550EM_x: 3336 case ixgbe_mac_X550EM_a: 3337 gpie |= IXGBE_SDP0_GPIEN_X540; 3338 break; 3339 default: 3340 break; 3341 } 3342 3343 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3344 3345 } /* ixgbe_config_gpie */ 3346 3347 /************************************************************************ 3348 * ixgbe_config_delay_values 3349 * 3350 * Requires sc->max_frame_size to be set. 3351 ************************************************************************/ 3352 static void 3353 ixgbe_config_delay_values(struct ixgbe_softc *sc) 3354 { 3355 struct ixgbe_hw *hw = &sc->hw; 3356 u32 rxpb, frame, size, tmp; 3357 3358 frame = sc->max_frame_size; 3359 3360 /* Calculate High Water */ 3361 switch (hw->mac.type) { 3362 case ixgbe_mac_X540: 3363 case ixgbe_mac_X550: 3364 case ixgbe_mac_X550EM_x: 3365 case ixgbe_mac_X550EM_a: 3366 tmp = IXGBE_DV_X540(frame, frame); 3367 break; 3368 default: 3369 tmp = IXGBE_DV(frame, frame); 3370 break; 3371 } 3372 size = IXGBE_BT2KB(tmp); 3373 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3374 hw->fc.high_water[0] = rxpb - size; 3375 3376 /* Now calculate Low Water */ 3377 switch (hw->mac.type) { 3378 case ixgbe_mac_X540: 3379 case ixgbe_mac_X550: 3380 case ixgbe_mac_X550EM_x: 3381 case ixgbe_mac_X550EM_a: 3382 tmp = IXGBE_LOW_DV_X540(frame); 3383 break; 3384 default: 3385 tmp = IXGBE_LOW_DV(frame); 3386 break; 3387 } 3388 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3389 3390 hw->fc.pause_time = IXGBE_FC_PAUSE; 3391 hw->fc.send_xon = true; 3392 } /* ixgbe_config_delay_values */ 3393 3394 /************************************************************************ 3395 * ixgbe_set_multi - Multicast Update 3396 * 3397 * Called whenever multicast address list is updated. 3398 ************************************************************************/ 3399 static u_int 3400 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) 3401 { 3402 struct ixgbe_softc *sc = arg; 3403 struct ixgbe_mc_addr *mta = sc->mta; 3404 3405 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 3406 return (0); 3407 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3408 mta[idx].vmdq = sc->pool; 3409 3410 return (1); 3411 } /* ixgbe_mc_filter_apply */ 3412 3413 static void 3414 ixgbe_if_multi_set(if_ctx_t ctx) 3415 { 3416 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3417 struct ixgbe_mc_addr *mta; 3418 if_t ifp = iflib_get_ifp(ctx); 3419 u8 *update_ptr; 3420 u32 fctrl; 3421 u_int mcnt; 3422 3423 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3424 3425 mta = sc->mta; 3426 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3427 3428 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc); 3429 3430 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3431 update_ptr = (u8 *)mta; 3432 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 3433 ixgbe_mc_array_itr, true); 3434 } 3435 3436 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 3437 3438 if (if_getflags(ifp) & IFF_PROMISC) 3439 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3440 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3441 if_getflags(ifp) & IFF_ALLMULTI) { 3442 fctrl |= IXGBE_FCTRL_MPE; 3443 fctrl &= ~IXGBE_FCTRL_UPE; 3444 } else 3445 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3446 3447 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 3448 } /* ixgbe_if_multi_set */ 3449 3450 /************************************************************************ 3451 * ixgbe_mc_array_itr 3452 * 3453 * An iterator function needed by the multicast shared code. 3454 * It feeds the shared code routine the addresses in the 3455 * array of ixgbe_set_multi() one by one. 3456 ************************************************************************/ 3457 static u8 * 3458 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3459 { 3460 struct ixgbe_mc_addr *mta; 3461 3462 mta = (struct ixgbe_mc_addr *)*update_ptr; 3463 *vmdq = mta->vmdq; 3464 3465 *update_ptr = (u8*)(mta + 1); 3466 3467 return (mta->addr); 3468 } /* ixgbe_mc_array_itr */ 3469 3470 /************************************************************************ 3471 * ixgbe_local_timer - Timer routine 3472 * 3473 * Checks for link status, updates statistics, 3474 * and runs the watchdog check. 3475 ************************************************************************/ 3476 static void 3477 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3478 { 3479 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3480 3481 if (qid != 0) 3482 return; 3483 3484 /* Check for pluggable optics */ 3485 if (sc->sfp_probe) 3486 if (!ixgbe_sfp_probe(ctx)) 3487 return; /* Nothing to do */ 3488 3489 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3490 3491 /* Fire off the adminq task */ 3492 iflib_admin_intr_deferred(ctx); 3493 3494 } /* ixgbe_if_timer */ 3495 3496 /************************************************************************ 3497 * ixgbe_sfp_probe 3498 * 3499 * Determine if a port had optics inserted. 3500 ************************************************************************/ 3501 static bool 3502 ixgbe_sfp_probe(if_ctx_t ctx) 3503 { 3504 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3505 struct ixgbe_hw *hw = &sc->hw; 3506 device_t dev = iflib_get_dev(ctx); 3507 bool result = false; 3508 3509 if ((hw->phy.type == ixgbe_phy_nl) && 3510 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3511 s32 ret = hw->phy.ops.identify_sfp(hw); 3512 if (ret) 3513 goto out; 3514 ret = hw->phy.ops.reset(hw); 3515 sc->sfp_probe = false; 3516 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3517 device_printf(dev, "Unsupported SFP+ module detected!"); 3518 device_printf(dev, 3519 "Reload driver with supported module.\n"); 3520 goto out; 3521 } else 3522 device_printf(dev, "SFP+ module detected!\n"); 3523 /* We now have supported optics */ 3524 result = true; 3525 } 3526 out: 3527 3528 return (result); 3529 } /* ixgbe_sfp_probe */ 3530 3531 /************************************************************************ 3532 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3533 ************************************************************************/ 3534 static void 3535 ixgbe_handle_mod(void *context) 3536 { 3537 if_ctx_t ctx = context; 3538 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3539 struct ixgbe_hw *hw = &sc->hw; 3540 device_t dev = iflib_get_dev(ctx); 3541 u32 err, cage_full = 0; 3542 3543 if (sc->hw.need_crosstalk_fix) { 3544 switch (hw->mac.type) { 3545 case ixgbe_mac_82599EB: 3546 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3547 IXGBE_ESDP_SDP2; 3548 break; 3549 case ixgbe_mac_X550EM_x: 3550 case ixgbe_mac_X550EM_a: 3551 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3552 IXGBE_ESDP_SDP0; 3553 break; 3554 default: 3555 break; 3556 } 3557 3558 if (!cage_full) 3559 goto handle_mod_out; 3560 } 3561 3562 err = hw->phy.ops.identify_sfp(hw); 3563 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3564 device_printf(dev, 3565 "Unsupported SFP+ module type was detected.\n"); 3566 goto handle_mod_out; 3567 } 3568 3569 if (hw->mac.type == ixgbe_mac_82598EB) 3570 err = hw->phy.ops.reset(hw); 3571 else 3572 err = hw->mac.ops.setup_sfp(hw); 3573 3574 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3575 device_printf(dev, 3576 "Setup failure - unsupported SFP+ module type.\n"); 3577 goto handle_mod_out; 3578 } 3579 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3580 return; 3581 3582 handle_mod_out: 3583 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3584 } /* ixgbe_handle_mod */ 3585 3586 3587 /************************************************************************ 3588 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3589 ************************************************************************/ 3590 static void 3591 ixgbe_handle_msf(void *context) 3592 { 3593 if_ctx_t ctx = context; 3594 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3595 struct ixgbe_hw *hw = &sc->hw; 3596 u32 autoneg; 3597 bool negotiate; 3598 3599 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3600 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 3601 3602 autoneg = hw->phy.autoneg_advertised; 3603 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3604 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3605 if (hw->mac.ops.setup_link) 3606 hw->mac.ops.setup_link(hw, autoneg, true); 3607 3608 /* Adjust media types shown in ifconfig */ 3609 ifmedia_removeall(sc->media); 3610 ixgbe_add_media_types(sc->ctx); 3611 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 3612 } /* ixgbe_handle_msf */ 3613 3614 /************************************************************************ 3615 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3616 ************************************************************************/ 3617 static void 3618 ixgbe_handle_phy(void *context) 3619 { 3620 if_ctx_t ctx = context; 3621 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3622 struct ixgbe_hw *hw = &sc->hw; 3623 int error; 3624 3625 error = hw->phy.ops.handle_lasi(hw); 3626 if (error == IXGBE_ERR_OVERTEMP) 3627 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3628 else if (error) 3629 device_printf(sc->dev, 3630 "Error handling LASI interrupt: %d\n", error); 3631 } /* ixgbe_handle_phy */ 3632 3633 /************************************************************************ 3634 * ixgbe_if_stop - Stop the hardware 3635 * 3636 * Disables all traffic on the adapter by issuing a 3637 * global reset on the MAC and deallocates TX/RX buffers. 3638 ************************************************************************/ 3639 static void 3640 ixgbe_if_stop(if_ctx_t ctx) 3641 { 3642 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3643 struct ixgbe_hw *hw = &sc->hw; 3644 3645 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3646 3647 ixgbe_reset_hw(hw); 3648 hw->adapter_stopped = false; 3649 ixgbe_stop_adapter(hw); 3650 if (hw->mac.type == ixgbe_mac_82599EB) 3651 ixgbe_stop_mac_link_on_d3_82599(hw); 3652 /* Turn off the laser - noop with no optics */ 3653 ixgbe_disable_tx_laser(hw); 3654 3655 /* Update the stack */ 3656 sc->link_up = false; 3657 ixgbe_if_update_admin_status(ctx); 3658 3659 /* reprogram the RAR[0] in case user changed it. */ 3660 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 3661 3662 return; 3663 } /* ixgbe_if_stop */ 3664 3665 /************************************************************************ 3666 * ixgbe_update_link_status - Update OS on link state 3667 * 3668 * Note: Only updates the OS on the cached link state. 3669 * The real check of the hardware only happens with 3670 * a link interrupt. 3671 ************************************************************************/ 3672 static void 3673 ixgbe_if_update_admin_status(if_ctx_t ctx) 3674 { 3675 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3676 device_t dev = iflib_get_dev(ctx); 3677 3678 if (sc->link_up) { 3679 if (sc->link_active == false) { 3680 if (bootverbose) 3681 device_printf(dev, "Link is up %d Gbps %s \n", 3682 ((sc->link_speed == 128) ? 10 : 1), 3683 "Full Duplex"); 3684 sc->link_active = true; 3685 /* Update any Flow Control changes */ 3686 ixgbe_fc_enable(&sc->hw); 3687 /* Update DMA coalescing config */ 3688 ixgbe_config_dmac(sc); 3689 iflib_link_state_change(ctx, LINK_STATE_UP, 3690 ixgbe_link_speed_to_baudrate(sc->link_speed)); 3691 3692 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3693 ixgbe_ping_all_vfs(sc); 3694 } 3695 } else { /* Link down */ 3696 if (sc->link_active == true) { 3697 if (bootverbose) 3698 device_printf(dev, "Link is Down\n"); 3699 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3700 sc->link_active = false; 3701 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3702 ixgbe_ping_all_vfs(sc); 3703 } 3704 } 3705 3706 /* Handle task requests from msix_link() */ 3707 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) 3708 ixgbe_handle_mod(ctx); 3709 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) 3710 ixgbe_handle_msf(ctx); 3711 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) 3712 ixgbe_handle_mbx(ctx); 3713 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) 3714 ixgbe_reinit_fdir(ctx); 3715 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) 3716 ixgbe_handle_phy(ctx); 3717 sc->task_requests = 0; 3718 3719 ixgbe_update_stats_counters(sc); 3720 } /* ixgbe_if_update_admin_status */ 3721 3722 /************************************************************************ 3723 * ixgbe_config_dmac - Configure DMA Coalescing 3724 ************************************************************************/ 3725 static void 3726 ixgbe_config_dmac(struct ixgbe_softc *sc) 3727 { 3728 struct ixgbe_hw *hw = &sc->hw; 3729 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3730 3731 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3732 return; 3733 3734 if (dcfg->watchdog_timer ^ sc->dmac || 3735 dcfg->link_speed ^ sc->link_speed) { 3736 dcfg->watchdog_timer = sc->dmac; 3737 dcfg->fcoe_en = false; 3738 dcfg->link_speed = sc->link_speed; 3739 dcfg->num_tcs = 1; 3740 3741 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3742 dcfg->watchdog_timer, dcfg->link_speed); 3743 3744 hw->mac.ops.dmac_config(hw); 3745 } 3746 } /* ixgbe_config_dmac */ 3747 3748 /************************************************************************ 3749 * ixgbe_if_enable_intr 3750 ************************************************************************/ 3751 void 3752 ixgbe_if_enable_intr(if_ctx_t ctx) 3753 { 3754 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3755 struct ixgbe_hw *hw = &sc->hw; 3756 struct ix_rx_queue *que = sc->rx_queues; 3757 u32 mask, fwsm; 3758 3759 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3760 3761 switch (sc->hw.mac.type) { 3762 case ixgbe_mac_82599EB: 3763 mask |= IXGBE_EIMS_ECC; 3764 /* Temperature sensor on some scs */ 3765 mask |= IXGBE_EIMS_GPI_SDP0; 3766 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3767 mask |= IXGBE_EIMS_GPI_SDP1; 3768 mask |= IXGBE_EIMS_GPI_SDP2; 3769 break; 3770 case ixgbe_mac_X540: 3771 /* Detect if Thermal Sensor is enabled */ 3772 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3773 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3774 mask |= IXGBE_EIMS_TS; 3775 mask |= IXGBE_EIMS_ECC; 3776 break; 3777 case ixgbe_mac_X550: 3778 /* MAC thermal sensor is automatically enabled */ 3779 mask |= IXGBE_EIMS_TS; 3780 mask |= IXGBE_EIMS_ECC; 3781 break; 3782 case ixgbe_mac_X550EM_x: 3783 case ixgbe_mac_X550EM_a: 3784 /* Some devices use SDP0 for important information */ 3785 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3786 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3787 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3788 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3789 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3790 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3791 mask |= IXGBE_EICR_GPI_SDP0_X540; 3792 mask |= IXGBE_EIMS_ECC; 3793 break; 3794 default: 3795 break; 3796 } 3797 3798 /* Enable Fan Failure detection */ 3799 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3800 mask |= IXGBE_EIMS_GPI_SDP1; 3801 /* Enable SR-IOV */ 3802 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3803 mask |= IXGBE_EIMS_MAILBOX; 3804 /* Enable Flow Director */ 3805 if (sc->feat_en & IXGBE_FEATURE_FDIR) 3806 mask |= IXGBE_EIMS_FLOW_DIR; 3807 3808 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3809 3810 /* With MSI-X we use auto clear */ 3811 if (sc->intr_type == IFLIB_INTR_MSIX) { 3812 mask = IXGBE_EIMS_ENABLE_MASK; 3813 /* Don't autoclear Link */ 3814 mask &= ~IXGBE_EIMS_OTHER; 3815 mask &= ~IXGBE_EIMS_LSC; 3816 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 3817 mask &= ~IXGBE_EIMS_MAILBOX; 3818 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3819 } 3820 3821 /* 3822 * Now enable all queues, this is done separately to 3823 * allow for handling the extended (beyond 32) MSI-X 3824 * vectors that can be used by 82599 3825 */ 3826 for (int i = 0; i < sc->num_rx_queues; i++, que++) 3827 ixgbe_enable_queue(sc, que->msix); 3828 3829 IXGBE_WRITE_FLUSH(hw); 3830 3831 } /* ixgbe_if_enable_intr */ 3832 3833 /************************************************************************ 3834 * ixgbe_disable_intr 3835 ************************************************************************/ 3836 static void 3837 ixgbe_if_disable_intr(if_ctx_t ctx) 3838 { 3839 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3840 3841 if (sc->intr_type == IFLIB_INTR_MSIX) 3842 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 3843 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 3844 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 3845 } else { 3846 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 3847 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 3848 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 3849 } 3850 IXGBE_WRITE_FLUSH(&sc->hw); 3851 3852 } /* ixgbe_if_disable_intr */ 3853 3854 /************************************************************************ 3855 * ixgbe_link_intr_enable 3856 ************************************************************************/ 3857 static void 3858 ixgbe_link_intr_enable(if_ctx_t ctx) 3859 { 3860 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; 3861 3862 /* Re-enable other interrupts */ 3863 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3864 } /* ixgbe_link_intr_enable */ 3865 3866 /************************************************************************ 3867 * ixgbe_if_rx_queue_intr_enable 3868 ************************************************************************/ 3869 static int 3870 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3871 { 3872 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3873 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 3874 3875 ixgbe_enable_queue(sc, que->msix); 3876 3877 return (0); 3878 } /* ixgbe_if_rx_queue_intr_enable */ 3879 3880 /************************************************************************ 3881 * ixgbe_enable_queue 3882 ************************************************************************/ 3883 static void 3884 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 3885 { 3886 struct ixgbe_hw *hw = &sc->hw; 3887 u64 queue = 1ULL << vector; 3888 u32 mask; 3889 3890 if (hw->mac.type == ixgbe_mac_82598EB) { 3891 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3892 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3893 } else { 3894 mask = (queue & 0xFFFFFFFF); 3895 if (mask) 3896 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3897 mask = (queue >> 32); 3898 if (mask) 3899 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3900 } 3901 } /* ixgbe_enable_queue */ 3902 3903 /************************************************************************ 3904 * ixgbe_disable_queue 3905 ************************************************************************/ 3906 static void 3907 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 3908 { 3909 struct ixgbe_hw *hw = &sc->hw; 3910 u64 queue = 1ULL << vector; 3911 u32 mask; 3912 3913 if (hw->mac.type == ixgbe_mac_82598EB) { 3914 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3915 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3916 } else { 3917 mask = (queue & 0xFFFFFFFF); 3918 if (mask) 3919 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3920 mask = (queue >> 32); 3921 if (mask) 3922 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3923 } 3924 } /* ixgbe_disable_queue */ 3925 3926 /************************************************************************ 3927 * ixgbe_intr - Legacy Interrupt Service Routine 3928 ************************************************************************/ 3929 int 3930 ixgbe_intr(void *arg) 3931 { 3932 struct ixgbe_softc *sc = arg; 3933 struct ix_rx_queue *que = sc->rx_queues; 3934 struct ixgbe_hw *hw = &sc->hw; 3935 if_ctx_t ctx = sc->ctx; 3936 u32 eicr, eicr_mask; 3937 3938 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3939 3940 ++que->irqs; 3941 if (eicr == 0) { 3942 ixgbe_if_enable_intr(ctx); 3943 return (FILTER_HANDLED); 3944 } 3945 3946 /* Check for fan failure */ 3947 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3948 (eicr & IXGBE_EICR_GPI_SDP1)) { 3949 device_printf(sc->dev, 3950 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3951 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3952 } 3953 3954 /* Link status change */ 3955 if (eicr & IXGBE_EICR_LSC) { 3956 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3957 iflib_admin_intr_deferred(ctx); 3958 } 3959 3960 if (ixgbe_is_sfp(hw)) { 3961 /* Pluggable optics-related interrupt */ 3962 if (hw->mac.type >= ixgbe_mac_X540) 3963 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3964 else 3965 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3966 3967 if (eicr & eicr_mask) { 3968 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3969 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 3970 } 3971 3972 if ((hw->mac.type == ixgbe_mac_82599EB) && 3973 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3974 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3975 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3976 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3977 } 3978 } 3979 3980 /* External PHY interrupt */ 3981 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3982 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3983 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 3984 3985 return (FILTER_SCHEDULE_THREAD); 3986 } /* ixgbe_intr */ 3987 3988 /************************************************************************ 3989 * ixgbe_free_pci_resources 3990 ************************************************************************/ 3991 static void 3992 ixgbe_free_pci_resources(if_ctx_t ctx) 3993 { 3994 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3995 struct ix_rx_queue *que = sc->rx_queues; 3996 device_t dev = iflib_get_dev(ctx); 3997 3998 /* Release all MSI-X queue resources */ 3999 if (sc->intr_type == IFLIB_INTR_MSIX) 4000 iflib_irq_free(ctx, &sc->irq); 4001 4002 if (que != NULL) { 4003 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 4004 iflib_irq_free(ctx, &que->que_irq); 4005 } 4006 } 4007 4008 if (sc->pci_mem != NULL) 4009 bus_release_resource(dev, SYS_RES_MEMORY, 4010 rman_get_rid(sc->pci_mem), sc->pci_mem); 4011 } /* ixgbe_free_pci_resources */ 4012 4013 /************************************************************************ 4014 * ixgbe_sysctl_flowcntl 4015 * 4016 * SYSCTL wrapper around setting Flow Control 4017 ************************************************************************/ 4018 static int 4019 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 4020 { 4021 struct ixgbe_softc *sc; 4022 int error, fc; 4023 4024 sc = (struct ixgbe_softc *)arg1; 4025 fc = sc->hw.fc.current_mode; 4026 4027 error = sysctl_handle_int(oidp, &fc, 0, req); 4028 if ((error) || (req->newptr == NULL)) 4029 return (error); 4030 4031 /* Don't bother if it's not changed */ 4032 if (fc == sc->hw.fc.current_mode) 4033 return (0); 4034 4035 return ixgbe_set_flowcntl(sc, fc); 4036 } /* ixgbe_sysctl_flowcntl */ 4037 4038 /************************************************************************ 4039 * ixgbe_set_flowcntl - Set flow control 4040 * 4041 * Flow control values: 4042 * 0 - off 4043 * 1 - rx pause 4044 * 2 - tx pause 4045 * 3 - full 4046 ************************************************************************/ 4047 static int 4048 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 4049 { 4050 switch (fc) { 4051 case ixgbe_fc_rx_pause: 4052 case ixgbe_fc_tx_pause: 4053 case ixgbe_fc_full: 4054 sc->hw.fc.requested_mode = fc; 4055 if (sc->num_rx_queues > 1) 4056 ixgbe_disable_rx_drop(sc); 4057 break; 4058 case ixgbe_fc_none: 4059 sc->hw.fc.requested_mode = ixgbe_fc_none; 4060 if (sc->num_rx_queues > 1) 4061 ixgbe_enable_rx_drop(sc); 4062 break; 4063 default: 4064 return (EINVAL); 4065 } 4066 4067 /* Don't autoneg if forcing a value */ 4068 sc->hw.fc.disable_fc_autoneg = true; 4069 ixgbe_fc_enable(&sc->hw); 4070 4071 return (0); 4072 } /* ixgbe_set_flowcntl */ 4073 4074 /************************************************************************ 4075 * ixgbe_enable_rx_drop 4076 * 4077 * Enable the hardware to drop packets when the buffer is 4078 * full. This is useful with multiqueue, so that no single 4079 * queue being full stalls the entire RX engine. We only 4080 * enable this when Multiqueue is enabled AND Flow Control 4081 * is disabled. 4082 ************************************************************************/ 4083 static void 4084 ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 4085 { 4086 struct ixgbe_hw *hw = &sc->hw; 4087 struct rx_ring *rxr; 4088 u32 srrctl; 4089 4090 for (int i = 0; i < sc->num_rx_queues; i++) { 4091 rxr = &sc->rx_queues[i].rxr; 4092 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4093 srrctl |= IXGBE_SRRCTL_DROP_EN; 4094 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4095 } 4096 4097 /* enable drop for each vf */ 4098 for (int i = 0; i < sc->num_vfs; i++) { 4099 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4100 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4101 IXGBE_QDE_ENABLE)); 4102 } 4103 } /* ixgbe_enable_rx_drop */ 4104 4105 /************************************************************************ 4106 * ixgbe_disable_rx_drop 4107 ************************************************************************/ 4108 static void 4109 ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 4110 { 4111 struct ixgbe_hw *hw = &sc->hw; 4112 struct rx_ring *rxr; 4113 u32 srrctl; 4114 4115 for (int i = 0; i < sc->num_rx_queues; i++) { 4116 rxr = &sc->rx_queues[i].rxr; 4117 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4118 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4119 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4120 } 4121 4122 /* disable drop for each vf */ 4123 for (int i = 0; i < sc->num_vfs; i++) { 4124 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4125 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4126 } 4127 } /* ixgbe_disable_rx_drop */ 4128 4129 /************************************************************************ 4130 * ixgbe_sysctl_advertise 4131 * 4132 * SYSCTL wrapper around setting advertised speed 4133 ************************************************************************/ 4134 static int 4135 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4136 { 4137 struct ixgbe_softc *sc; 4138 int error, advertise; 4139 4140 sc = (struct ixgbe_softc *)arg1; 4141 advertise = sc->advertise; 4142 4143 error = sysctl_handle_int(oidp, &advertise, 0, req); 4144 if ((error) || (req->newptr == NULL)) 4145 return (error); 4146 4147 return ixgbe_set_advertise(sc, advertise); 4148 } /* ixgbe_sysctl_advertise */ 4149 4150 /************************************************************************ 4151 * ixgbe_set_advertise - Control advertised link speed 4152 * 4153 * Flags: 4154 * 0x1 - advertise 100 Mb 4155 * 0x2 - advertise 1G 4156 * 0x4 - advertise 10G 4157 * 0x8 - advertise 10 Mb (yes, Mb) 4158 * 0x10 - advertise 2.5G (disabled by default) 4159 * 0x20 - advertise 5G (disabled by default) 4160 * 4161 ************************************************************************/ 4162 static int 4163 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 4164 { 4165 device_t dev = iflib_get_dev(sc->ctx); 4166 struct ixgbe_hw *hw; 4167 ixgbe_link_speed speed = 0; 4168 ixgbe_link_speed link_caps = 0; 4169 s32 err = IXGBE_NOT_IMPLEMENTED; 4170 bool negotiate = false; 4171 4172 /* Checks to validate new value */ 4173 if (sc->advertise == advertise) /* no change */ 4174 return (0); 4175 4176 hw = &sc->hw; 4177 4178 /* No speed changes for backplane media */ 4179 if (hw->phy.media_type == ixgbe_media_type_backplane) 4180 return (ENODEV); 4181 4182 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4183 (hw->phy.multispeed_fiber))) { 4184 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4185 return (EINVAL); 4186 } 4187 4188 if (advertise < 0x1 || advertise > 0x3F) { 4189 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n"); 4190 return (EINVAL); 4191 } 4192 4193 if (hw->mac.ops.get_link_capabilities) { 4194 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4195 &negotiate); 4196 if (err != IXGBE_SUCCESS) { 4197 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4198 return (ENODEV); 4199 } 4200 } 4201 4202 /* Set new value and report new advertised mode */ 4203 if (advertise & 0x1) { 4204 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4205 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4206 return (EINVAL); 4207 } 4208 speed |= IXGBE_LINK_SPEED_100_FULL; 4209 } 4210 if (advertise & 0x2) { 4211 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4212 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4213 return (EINVAL); 4214 } 4215 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4216 } 4217 if (advertise & 0x4) { 4218 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4219 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4220 return (EINVAL); 4221 } 4222 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4223 } 4224 if (advertise & 0x8) { 4225 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4226 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4227 return (EINVAL); 4228 } 4229 speed |= IXGBE_LINK_SPEED_10_FULL; 4230 } 4231 if (advertise & 0x10) { 4232 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 4233 device_printf(dev, "Interface does not support 2.5G advertised speed\n"); 4234 return (EINVAL); 4235 } 4236 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 4237 } 4238 if (advertise & 0x20) { 4239 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 4240 device_printf(dev, "Interface does not support 5G advertised speed\n"); 4241 return (EINVAL); 4242 } 4243 speed |= IXGBE_LINK_SPEED_5GB_FULL; 4244 } 4245 4246 hw->mac.autotry_restart = true; 4247 hw->mac.ops.setup_link(hw, speed, true); 4248 sc->advertise = advertise; 4249 4250 return (0); 4251 } /* ixgbe_set_advertise */ 4252 4253 /************************************************************************ 4254 * ixgbe_get_default_advertise - Get default advertised speed settings 4255 * 4256 * Formatted for sysctl usage. 4257 * Flags: 4258 * 0x1 - advertise 100 Mb 4259 * 0x2 - advertise 1G 4260 * 0x4 - advertise 10G 4261 * 0x8 - advertise 10 Mb (yes, Mb) 4262 * 0x10 - advertise 2.5G (disabled by default) 4263 * 0x20 - advertise 5G (disabled by default) 4264 ************************************************************************/ 4265 static int 4266 ixgbe_get_default_advertise(struct ixgbe_softc *sc) 4267 { 4268 struct ixgbe_hw *hw = &sc->hw; 4269 int speed; 4270 ixgbe_link_speed link_caps = 0; 4271 s32 err; 4272 bool negotiate = false; 4273 4274 /* 4275 * Advertised speed means nothing unless it's copper or 4276 * multi-speed fiber 4277 */ 4278 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4279 !(hw->phy.multispeed_fiber)) 4280 return (0); 4281 4282 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4283 if (err != IXGBE_SUCCESS) 4284 return (0); 4285 4286 if (hw->mac.type == ixgbe_mac_X550) { 4287 /* 4288 * 2.5G and 5G autonegotiation speeds on X550 4289 * are disabled by default due to reported 4290 * interoperability issues with some switches. 4291 */ 4292 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 4293 IXGBE_LINK_SPEED_5GB_FULL); 4294 } 4295 4296 speed = 4297 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 4298 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 4299 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 4300 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 4301 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 4302 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 4303 4304 return speed; 4305 } /* ixgbe_get_default_advertise */ 4306 4307 /************************************************************************ 4308 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4309 * 4310 * Control values: 4311 * 0/1 - off / on (use default value of 1000) 4312 * 4313 * Legal timer values are: 4314 * 50,100,250,500,1000,2000,5000,10000 4315 * 4316 * Turning off interrupt moderation will also turn this off. 4317 ************************************************************************/ 4318 static int 4319 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4320 { 4321 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4322 if_t ifp = iflib_get_ifp(sc->ctx); 4323 int error; 4324 u16 newval; 4325 4326 newval = sc->dmac; 4327 error = sysctl_handle_16(oidp, &newval, 0, req); 4328 if ((error) || (req->newptr == NULL)) 4329 return (error); 4330 4331 switch (newval) { 4332 case 0: 4333 /* Disabled */ 4334 sc->dmac = 0; 4335 break; 4336 case 1: 4337 /* Enable and use default */ 4338 sc->dmac = 1000; 4339 break; 4340 case 50: 4341 case 100: 4342 case 250: 4343 case 500: 4344 case 1000: 4345 case 2000: 4346 case 5000: 4347 case 10000: 4348 /* Legal values - allow */ 4349 sc->dmac = newval; 4350 break; 4351 default: 4352 /* Do nothing, illegal value */ 4353 return (EINVAL); 4354 } 4355 4356 /* Re-initialize hardware if it's already running */ 4357 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 4358 if_init(ifp, ifp); 4359 4360 return (0); 4361 } /* ixgbe_sysctl_dmac */ 4362 4363 #ifdef IXGBE_DEBUG 4364 /************************************************************************ 4365 * ixgbe_sysctl_power_state 4366 * 4367 * Sysctl to test power states 4368 * Values: 4369 * 0 - set device to D0 4370 * 3 - set device to D3 4371 * (none) - get current device power state 4372 ************************************************************************/ 4373 static int 4374 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4375 { 4376 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4377 device_t dev = sc->dev; 4378 int curr_ps, new_ps, error = 0; 4379 4380 curr_ps = new_ps = pci_get_powerstate(dev); 4381 4382 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4383 if ((error) || (req->newptr == NULL)) 4384 return (error); 4385 4386 if (new_ps == curr_ps) 4387 return (0); 4388 4389 if (new_ps == 3 && curr_ps == 0) 4390 error = DEVICE_SUSPEND(dev); 4391 else if (new_ps == 0 && curr_ps == 3) 4392 error = DEVICE_RESUME(dev); 4393 else 4394 return (EINVAL); 4395 4396 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4397 4398 return (error); 4399 } /* ixgbe_sysctl_power_state */ 4400 #endif 4401 4402 /************************************************************************ 4403 * ixgbe_sysctl_wol_enable 4404 * 4405 * Sysctl to enable/disable the WoL capability, 4406 * if supported by the adapter. 4407 * 4408 * Values: 4409 * 0 - disabled 4410 * 1 - enabled 4411 ************************************************************************/ 4412 static int 4413 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4414 { 4415 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4416 struct ixgbe_hw *hw = &sc->hw; 4417 int new_wol_enabled; 4418 int error = 0; 4419 4420 new_wol_enabled = hw->wol_enabled; 4421 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4422 if ((error) || (req->newptr == NULL)) 4423 return (error); 4424 new_wol_enabled = !!(new_wol_enabled); 4425 if (new_wol_enabled == hw->wol_enabled) 4426 return (0); 4427 4428 if (new_wol_enabled > 0 && !sc->wol_support) 4429 return (ENODEV); 4430 else 4431 hw->wol_enabled = new_wol_enabled; 4432 4433 return (0); 4434 } /* ixgbe_sysctl_wol_enable */ 4435 4436 /************************************************************************ 4437 * ixgbe_sysctl_wufc - Wake Up Filter Control 4438 * 4439 * Sysctl to enable/disable the types of packets that the 4440 * adapter will wake up on upon receipt. 4441 * Flags: 4442 * 0x1 - Link Status Change 4443 * 0x2 - Magic Packet 4444 * 0x4 - Direct Exact 4445 * 0x8 - Directed Multicast 4446 * 0x10 - Broadcast 4447 * 0x20 - ARP/IPv4 Request Packet 4448 * 0x40 - Direct IPv4 Packet 4449 * 0x80 - Direct IPv6 Packet 4450 * 4451 * Settings not listed above will cause the sysctl to return an error. 4452 ************************************************************************/ 4453 static int 4454 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4455 { 4456 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4457 int error = 0; 4458 u32 new_wufc; 4459 4460 new_wufc = sc->wufc; 4461 4462 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4463 if ((error) || (req->newptr == NULL)) 4464 return (error); 4465 if (new_wufc == sc->wufc) 4466 return (0); 4467 4468 if (new_wufc & 0xffffff00) 4469 return (EINVAL); 4470 4471 new_wufc &= 0xff; 4472 new_wufc |= (0xffffff & sc->wufc); 4473 sc->wufc = new_wufc; 4474 4475 return (0); 4476 } /* ixgbe_sysctl_wufc */ 4477 4478 #ifdef IXGBE_DEBUG 4479 /************************************************************************ 4480 * ixgbe_sysctl_print_rss_config 4481 ************************************************************************/ 4482 static int 4483 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4484 { 4485 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4486 struct ixgbe_hw *hw = &sc->hw; 4487 device_t dev = sc->dev; 4488 struct sbuf *buf; 4489 int error = 0, reta_size; 4490 u32 reg; 4491 4492 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4493 if (!buf) { 4494 device_printf(dev, "Could not allocate sbuf for output.\n"); 4495 return (ENOMEM); 4496 } 4497 4498 // TODO: use sbufs to make a string to print out 4499 /* Set multiplier for RETA setup and table size based on MAC */ 4500 switch (sc->hw.mac.type) { 4501 case ixgbe_mac_X550: 4502 case ixgbe_mac_X550EM_x: 4503 case ixgbe_mac_X550EM_a: 4504 reta_size = 128; 4505 break; 4506 default: 4507 reta_size = 32; 4508 break; 4509 } 4510 4511 /* Print out the redirection table */ 4512 sbuf_cat(buf, "\n"); 4513 for (int i = 0; i < reta_size; i++) { 4514 if (i < 32) { 4515 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4516 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4517 } else { 4518 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4519 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4520 } 4521 } 4522 4523 // TODO: print more config 4524 4525 error = sbuf_finish(buf); 4526 if (error) 4527 device_printf(dev, "Error finishing sbuf: %d\n", error); 4528 4529 sbuf_delete(buf); 4530 4531 return (0); 4532 } /* ixgbe_sysctl_print_rss_config */ 4533 #endif /* IXGBE_DEBUG */ 4534 4535 /************************************************************************ 4536 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4537 * 4538 * For X552/X557-AT devices using an external PHY 4539 ************************************************************************/ 4540 static int 4541 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4542 { 4543 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4544 struct ixgbe_hw *hw = &sc->hw; 4545 u16 reg; 4546 4547 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4548 device_printf(iflib_get_dev(sc->ctx), 4549 "Device has no supported external thermal sensor.\n"); 4550 return (ENODEV); 4551 } 4552 4553 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4554 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4555 device_printf(iflib_get_dev(sc->ctx), 4556 "Error reading from PHY's current temperature register\n"); 4557 return (EAGAIN); 4558 } 4559 4560 /* Shift temp for output */ 4561 reg = reg >> 8; 4562 4563 return (sysctl_handle_16(oidp, NULL, reg, req)); 4564 } /* ixgbe_sysctl_phy_temp */ 4565 4566 /************************************************************************ 4567 * ixgbe_sysctl_phy_overtemp_occurred 4568 * 4569 * Reports (directly from the PHY) whether the current PHY 4570 * temperature is over the overtemp threshold. 4571 ************************************************************************/ 4572 static int 4573 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4574 { 4575 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4576 struct ixgbe_hw *hw = &sc->hw; 4577 u16 reg; 4578 4579 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4580 device_printf(iflib_get_dev(sc->ctx), 4581 "Device has no supported external thermal sensor.\n"); 4582 return (ENODEV); 4583 } 4584 4585 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4586 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4587 device_printf(iflib_get_dev(sc->ctx), 4588 "Error reading from PHY's temperature status register\n"); 4589 return (EAGAIN); 4590 } 4591 4592 /* Get occurrence bit */ 4593 reg = !!(reg & 0x4000); 4594 4595 return (sysctl_handle_16(oidp, 0, reg, req)); 4596 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4597 4598 /************************************************************************ 4599 * ixgbe_sysctl_eee_state 4600 * 4601 * Sysctl to set EEE power saving feature 4602 * Values: 4603 * 0 - disable EEE 4604 * 1 - enable EEE 4605 * (none) - get current device EEE state 4606 ************************************************************************/ 4607 static int 4608 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4609 { 4610 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4611 device_t dev = sc->dev; 4612 if_t ifp = iflib_get_ifp(sc->ctx); 4613 int curr_eee, new_eee, error = 0; 4614 s32 retval; 4615 4616 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 4617 4618 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4619 if ((error) || (req->newptr == NULL)) 4620 return (error); 4621 4622 /* Nothing to do */ 4623 if (new_eee == curr_eee) 4624 return (0); 4625 4626 /* Not supported */ 4627 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 4628 return (EINVAL); 4629 4630 /* Bounds checking */ 4631 if ((new_eee < 0) || (new_eee > 1)) 4632 return (EINVAL); 4633 4634 retval = ixgbe_setup_eee(&sc->hw, new_eee); 4635 if (retval) { 4636 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4637 return (EINVAL); 4638 } 4639 4640 /* Restart auto-neg */ 4641 if_init(ifp, ifp); 4642 4643 device_printf(dev, "New EEE state: %d\n", new_eee); 4644 4645 /* Cache new value */ 4646 if (new_eee) 4647 sc->feat_en |= IXGBE_FEATURE_EEE; 4648 else 4649 sc->feat_en &= ~IXGBE_FEATURE_EEE; 4650 4651 return (error); 4652 } /* ixgbe_sysctl_eee_state */ 4653 4654 /************************************************************************ 4655 * ixgbe_init_device_features 4656 ************************************************************************/ 4657 static void 4658 ixgbe_init_device_features(struct ixgbe_softc *sc) 4659 { 4660 sc->feat_cap = IXGBE_FEATURE_NETMAP 4661 | IXGBE_FEATURE_RSS 4662 | IXGBE_FEATURE_MSI 4663 | IXGBE_FEATURE_MSIX 4664 | IXGBE_FEATURE_LEGACY_IRQ; 4665 4666 /* Set capabilities first... */ 4667 switch (sc->hw.mac.type) { 4668 case ixgbe_mac_82598EB: 4669 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 4670 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4671 break; 4672 case ixgbe_mac_X540: 4673 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4674 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4675 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4676 (sc->hw.bus.func == 0)) 4677 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4678 break; 4679 case ixgbe_mac_X550: 4680 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4681 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4682 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4683 break; 4684 case ixgbe_mac_X550EM_x: 4685 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4686 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4687 break; 4688 case ixgbe_mac_X550EM_a: 4689 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4690 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4691 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4692 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4693 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4694 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4695 sc->feat_cap |= IXGBE_FEATURE_EEE; 4696 } 4697 break; 4698 case ixgbe_mac_82599EB: 4699 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4700 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4701 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4702 (sc->hw.bus.func == 0)) 4703 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4704 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4705 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4706 break; 4707 default: 4708 break; 4709 } 4710 4711 /* Enabled by default... */ 4712 /* Fan failure detection */ 4713 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4714 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4715 /* Netmap */ 4716 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 4717 sc->feat_en |= IXGBE_FEATURE_NETMAP; 4718 /* EEE */ 4719 if (sc->feat_cap & IXGBE_FEATURE_EEE) 4720 sc->feat_en |= IXGBE_FEATURE_EEE; 4721 /* Thermal Sensor */ 4722 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4723 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4724 4725 /* Enabled via global sysctl... */ 4726 /* Flow Director */ 4727 if (ixgbe_enable_fdir) { 4728 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 4729 sc->feat_en |= IXGBE_FEATURE_FDIR; 4730 else 4731 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled."); 4732 } 4733 /* 4734 * Message Signal Interrupts - Extended (MSI-X) 4735 * Normal MSI is only enabled if MSI-X calls fail. 4736 */ 4737 if (!ixgbe_enable_msix) 4738 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 4739 /* Receive-Side Scaling (RSS) */ 4740 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4741 sc->feat_en |= IXGBE_FEATURE_RSS; 4742 4743 /* Disable features with unmet dependencies... */ 4744 /* No MSI-X */ 4745 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 4746 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 4747 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4748 sc->feat_en &= ~IXGBE_FEATURE_RSS; 4749 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 4750 } 4751 } /* ixgbe_init_device_features */ 4752 4753 /************************************************************************ 4754 * ixgbe_check_fan_failure 4755 ************************************************************************/ 4756 static void 4757 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 4758 { 4759 u32 mask; 4760 4761 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 4762 IXGBE_ESDP_SDP1; 4763 4764 if (reg & mask) 4765 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4766 } /* ixgbe_check_fan_failure */ 4767 4768 /************************************************************************ 4769 * ixgbe_sbuf_fw_version 4770 ************************************************************************/ 4771 static void 4772 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) 4773 { 4774 struct ixgbe_nvm_version nvm_ver = {0}; 4775 uint16_t phyfw = 0; 4776 int status; 4777 const char *space = ""; 4778 4779 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ 4780 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ 4781 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ 4782 status = ixgbe_get_phy_firmware_version(hw, &phyfw); 4783 4784 if (nvm_ver.oem_valid) { 4785 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, 4786 nvm_ver.oem_minor, nvm_ver.oem_release); 4787 space = " "; 4788 } 4789 4790 if (nvm_ver.or_valid) { 4791 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 4792 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); 4793 space = " "; 4794 } 4795 4796 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | 4797 NVM_VER_INVALID)) { 4798 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); 4799 space = " "; 4800 } 4801 4802 if (phyfw != 0 && status == IXGBE_SUCCESS) 4803 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); 4804 } /* ixgbe_sbuf_fw_version */ 4805 4806 /************************************************************************ 4807 * ixgbe_print_fw_version 4808 ************************************************************************/ 4809 static void 4810 ixgbe_print_fw_version(if_ctx_t ctx) 4811 { 4812 struct ixgbe_softc *sc = iflib_get_softc(ctx); 4813 struct ixgbe_hw *hw = &sc->hw; 4814 device_t dev = sc->dev; 4815 struct sbuf *buf; 4816 int error = 0; 4817 4818 buf = sbuf_new_auto(); 4819 if (!buf) { 4820 device_printf(dev, "Could not allocate sbuf for output.\n"); 4821 return; 4822 } 4823 4824 ixgbe_sbuf_fw_version(hw, buf); 4825 4826 error = sbuf_finish(buf); 4827 if (error) 4828 device_printf(dev, "Error finishing sbuf: %d\n", error); 4829 else if (sbuf_len(buf)) 4830 device_printf(dev, "%s\n", sbuf_data(buf)); 4831 4832 sbuf_delete(buf); 4833 } /* ixgbe_print_fw_version */ 4834 4835 /************************************************************************ 4836 * ixgbe_sysctl_print_fw_version 4837 ************************************************************************/ 4838 static int 4839 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 4840 { 4841 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4842 struct ixgbe_hw *hw = &sc->hw; 4843 device_t dev = sc->dev; 4844 struct sbuf *buf; 4845 int error = 0; 4846 4847 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4848 if (!buf) { 4849 device_printf(dev, "Could not allocate sbuf for output.\n"); 4850 return (ENOMEM); 4851 } 4852 4853 ixgbe_sbuf_fw_version(hw, buf); 4854 4855 error = sbuf_finish(buf); 4856 if (error) 4857 device_printf(dev, "Error finishing sbuf: %d\n", error); 4858 4859 sbuf_delete(buf); 4860 4861 return (0); 4862 } /* ixgbe_sysctl_print_fw_version */ 4863