1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_rss.h" 38 39 #include "ixgbe.h" 40 #include "ixgbe_sriov.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixgbe_driver_version[] = "4.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixgbe_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixgbe_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, "Intel(R) X520 82599 LS"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), 106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), 107 /* required last entry */ 108 PVID_END 109 }; 110 111 static void *ixgbe_register(device_t); 112 static int ixgbe_if_attach_pre(if_ctx_t); 113 static int ixgbe_if_attach_post(if_ctx_t); 114 static int ixgbe_if_detach(if_ctx_t); 115 static int ixgbe_if_shutdown(if_ctx_t); 116 static int ixgbe_if_suspend(if_ctx_t); 117 static int ixgbe_if_resume(if_ctx_t); 118 119 static void ixgbe_if_stop(if_ctx_t); 120 void ixgbe_if_enable_intr(if_ctx_t); 121 static void ixgbe_if_disable_intr(if_ctx_t); 122 static void ixgbe_link_intr_enable(if_ctx_t); 123 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 124 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); 125 static int ixgbe_if_media_change(if_ctx_t); 126 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 127 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); 128 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); 129 static void ixgbe_if_multi_set(if_ctx_t); 130 static int ixgbe_if_promisc_set(if_ctx_t, int); 131 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 132 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 133 static void ixgbe_if_queues_free(if_ctx_t); 134 static void ixgbe_if_timer(if_ctx_t, uint16_t); 135 static void ixgbe_if_update_admin_status(if_ctx_t); 136 static void ixgbe_if_vlan_register(if_ctx_t, u16); 137 static void ixgbe_if_vlan_unregister(if_ctx_t, u16); 138 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); 139 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 140 int ixgbe_intr(void *); 141 142 /************************************************************************ 143 * Function prototypes 144 ************************************************************************/ 145 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 146 147 static void ixgbe_enable_queue(struct ixgbe_softc *, u32); 148 static void ixgbe_disable_queue(struct ixgbe_softc *, u32); 149 static void ixgbe_add_device_sysctls(if_ctx_t); 150 static int ixgbe_allocate_pci_resources(if_ctx_t); 151 static int ixgbe_setup_low_power_mode(if_ctx_t); 152 153 static void ixgbe_config_dmac(struct ixgbe_softc *); 154 static void ixgbe_configure_ivars(struct ixgbe_softc *); 155 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 156 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 157 static bool ixgbe_sfp_probe(if_ctx_t); 158 159 static void ixgbe_free_pci_resources(if_ctx_t); 160 161 static int ixgbe_msix_link(void *); 162 static int ixgbe_msix_que(void *); 163 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 164 static void ixgbe_initialize_receive_units(if_ctx_t); 165 static void ixgbe_initialize_transmit_units(if_ctx_t); 166 167 static int ixgbe_setup_interface(if_ctx_t); 168 static void ixgbe_init_device_features(struct ixgbe_softc *); 169 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 170 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); 171 static void ixgbe_print_fw_version(if_ctx_t); 172 static void ixgbe_add_media_types(if_ctx_t); 173 static void ixgbe_update_stats_counters(struct ixgbe_softc *); 174 static void ixgbe_config_link(if_ctx_t); 175 static void ixgbe_get_slot_info(struct ixgbe_softc *); 176 static void ixgbe_check_wol_support(struct ixgbe_softc *); 177 static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 178 static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 179 180 static void ixgbe_add_hw_stats(struct ixgbe_softc *); 181 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 182 static int ixgbe_set_advertise(struct ixgbe_softc *, int); 183 static int ixgbe_get_default_advertise(struct ixgbe_softc *); 184 static void ixgbe_setup_vlan_hw_support(if_ctx_t); 185 static void ixgbe_config_gpie(struct ixgbe_softc *); 186 static void ixgbe_config_delay_values(struct ixgbe_softc *); 187 188 /* Sysctl handlers */ 189 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 195 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 196 #ifdef IXGBE_DEBUG 197 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 198 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 199 #endif 200 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 201 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 206 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 207 208 /* Deferred interrupt tasklets */ 209 static void ixgbe_handle_msf(void *); 210 static void ixgbe_handle_mod(void *); 211 static void ixgbe_handle_phy(void *); 212 213 /************************************************************************ 214 * FreeBSD Device Interface Entry Points 215 ************************************************************************/ 216 static device_method_t ix_methods[] = { 217 /* Device interface */ 218 DEVMETHOD(device_register, ixgbe_register), 219 DEVMETHOD(device_probe, iflib_device_probe), 220 DEVMETHOD(device_attach, iflib_device_attach), 221 DEVMETHOD(device_detach, iflib_device_detach), 222 DEVMETHOD(device_shutdown, iflib_device_shutdown), 223 DEVMETHOD(device_suspend, iflib_device_suspend), 224 DEVMETHOD(device_resume, iflib_device_resume), 225 #ifdef PCI_IOV 226 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 227 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 228 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 229 #endif /* PCI_IOV */ 230 DEVMETHOD_END 231 }; 232 233 static driver_t ix_driver = { 234 "ix", ix_methods, sizeof(struct ixgbe_softc), 235 }; 236 237 DRIVER_MODULE(ix, pci, ix_driver, 0, 0); 238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 239 MODULE_DEPEND(ix, pci, 1, 1, 1); 240 MODULE_DEPEND(ix, ether, 1, 1, 1); 241 MODULE_DEPEND(ix, iflib, 1, 1, 1); 242 243 static device_method_t ixgbe_if_methods[] = { 244 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 245 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 246 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 247 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 248 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 249 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 250 DEVMETHOD(ifdi_init, ixgbe_if_init), 251 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 252 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 253 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 254 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 255 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 256 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 257 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 258 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 259 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 260 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 261 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 262 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 263 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 264 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 265 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 266 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 267 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 268 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 269 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 270 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 271 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 272 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 273 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), 274 #ifdef PCI_IOV 275 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 276 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 277 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 278 #endif /* PCI_IOV */ 279 DEVMETHOD_END 280 }; 281 282 /* 283 * TUNEABLE PARAMETERS: 284 */ 285 286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 287 "IXGBE driver parameters"); 288 static driver_t ixgbe_if_driver = { 289 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) 290 }; 291 292 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 293 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 294 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 295 296 /* Flow control setting, default to full */ 297 static int ixgbe_flow_control = ixgbe_fc_full; 298 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 299 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 300 301 /* Advertise Speed, default to 0 (auto) */ 302 static int ixgbe_advertise_speed = 0; 303 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 304 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 305 306 /* 307 * Smart speed setting, default to on 308 * this only works as a compile option 309 * right now as its during attach, set 310 * this to 'ixgbe_smart_speed_off' to 311 * disable. 312 */ 313 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 314 315 /* 316 * MSI-X should be the default for best performance, 317 * but this allows it to be forced off for testing. 318 */ 319 static int ixgbe_enable_msix = 1; 320 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 321 "Enable MSI-X interrupts"); 322 323 /* 324 * Defining this on will allow the use 325 * of unsupported SFP+ modules, note that 326 * doing so you are on your own :) 327 */ 328 static int allow_unsupported_sfp = false; 329 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 330 &allow_unsupported_sfp, 0, 331 "Allow unsupported SFP modules...use at your own risk"); 332 333 /* 334 * Not sure if Flow Director is fully baked, 335 * so we'll default to turning it off. 336 */ 337 static int ixgbe_enable_fdir = 0; 338 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 339 "Enable Flow Director"); 340 341 /* Receive-Side Scaling */ 342 static int ixgbe_enable_rss = 1; 343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 344 "Enable Receive-Side Scaling (RSS)"); 345 346 /* 347 * AIM: Adaptive Interrupt Moderation 348 * which means that the interrupt rate 349 * is varied over time based on the 350 * traffic for that interrupt vector 351 */ 352 static int ixgbe_enable_aim = false; 353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 354 "Enable adaptive interrupt moderation"); 355 356 #if 0 357 /* Keep running tab on them for sanity check */ 358 static int ixgbe_total_ports; 359 #endif 360 361 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 362 363 /* 364 * For Flow Director: this is the number of TX packets we sample 365 * for the filter pool, this means every 20th packet will be probed. 366 * 367 * This feature can be disabled by setting this to 0. 368 */ 369 static int atr_sample_rate = 20; 370 371 extern struct if_txrx ixgbe_txrx; 372 373 static struct if_shared_ctx ixgbe_sctx_init = { 374 .isc_magic = IFLIB_MAGIC, 375 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 376 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 377 .isc_tx_maxsegsize = PAGE_SIZE, 378 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 379 .isc_tso_maxsegsize = PAGE_SIZE, 380 .isc_rx_maxsize = PAGE_SIZE*4, 381 .isc_rx_nsegments = 1, 382 .isc_rx_maxsegsize = PAGE_SIZE*4, 383 .isc_nfl = 1, 384 .isc_ntxqs = 1, 385 .isc_nrxqs = 1, 386 387 .isc_admin_intrcnt = 1, 388 .isc_vendor_info = ixgbe_vendor_info_array, 389 .isc_driver_version = ixgbe_driver_version, 390 .isc_driver = &ixgbe_if_driver, 391 .isc_flags = IFLIB_TSO_INIT_IP, 392 393 .isc_nrxd_min = {MIN_RXD}, 394 .isc_ntxd_min = {MIN_TXD}, 395 .isc_nrxd_max = {MAX_RXD}, 396 .isc_ntxd_max = {MAX_TXD}, 397 .isc_nrxd_default = {DEFAULT_RXD}, 398 .isc_ntxd_default = {DEFAULT_TXD}, 399 }; 400 401 /************************************************************************ 402 * ixgbe_if_tx_queues_alloc 403 ************************************************************************/ 404 static int 405 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 406 int ntxqs, int ntxqsets) 407 { 408 struct ixgbe_softc *sc = iflib_get_softc(ctx); 409 if_softc_ctx_t scctx = sc->shared; 410 struct ix_tx_queue *que; 411 int i, j, error; 412 413 MPASS(sc->num_tx_queues > 0); 414 MPASS(sc->num_tx_queues == ntxqsets); 415 MPASS(ntxqs == 1); 416 417 /* Allocate queue structure memory */ 418 sc->tx_queues = 419 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 420 M_IXGBE, M_NOWAIT | M_ZERO); 421 if (!sc->tx_queues) { 422 device_printf(iflib_get_dev(ctx), 423 "Unable to allocate TX ring memory\n"); 424 return (ENOMEM); 425 } 426 427 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 428 struct tx_ring *txr = &que->txr; 429 430 /* In case SR-IOV is enabled, align the index properly */ 431 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 432 i); 433 434 txr->sc = que->sc = sc; 435 436 /* Allocate report status array */ 437 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 438 if (txr->tx_rsq == NULL) { 439 error = ENOMEM; 440 goto fail; 441 } 442 for (j = 0; j < scctx->isc_ntxd[0]; j++) 443 txr->tx_rsq[j] = QIDX_INVALID; 444 /* get the virtual and physical address of the hardware queues */ 445 txr->tail = IXGBE_TDT(txr->me); 446 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 447 txr->tx_paddr = paddrs[i]; 448 449 txr->bytes = 0; 450 txr->total_packets = 0; 451 452 /* Set the rate at which we sample packets */ 453 if (sc->feat_en & IXGBE_FEATURE_FDIR) 454 txr->atr_sample = atr_sample_rate; 455 456 } 457 458 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 459 sc->num_tx_queues); 460 461 return (0); 462 463 fail: 464 ixgbe_if_queues_free(ctx); 465 466 return (error); 467 } /* ixgbe_if_tx_queues_alloc */ 468 469 /************************************************************************ 470 * ixgbe_if_rx_queues_alloc 471 ************************************************************************/ 472 static int 473 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 474 int nrxqs, int nrxqsets) 475 { 476 struct ixgbe_softc *sc = iflib_get_softc(ctx); 477 struct ix_rx_queue *que; 478 int i; 479 480 MPASS(sc->num_rx_queues > 0); 481 MPASS(sc->num_rx_queues == nrxqsets); 482 MPASS(nrxqs == 1); 483 484 /* Allocate queue structure memory */ 485 sc->rx_queues = 486 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 487 M_IXGBE, M_NOWAIT | M_ZERO); 488 if (!sc->rx_queues) { 489 device_printf(iflib_get_dev(ctx), 490 "Unable to allocate TX ring memory\n"); 491 return (ENOMEM); 492 } 493 494 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 495 struct rx_ring *rxr = &que->rxr; 496 497 /* In case SR-IOV is enabled, align the index properly */ 498 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 499 i); 500 501 rxr->sc = que->sc = sc; 502 503 /* get the virtual and physical address of the hw queues */ 504 rxr->tail = IXGBE_RDT(rxr->me); 505 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 506 rxr->rx_paddr = paddrs[i]; 507 rxr->bytes = 0; 508 rxr->que = que; 509 } 510 511 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 512 sc->num_rx_queues); 513 514 return (0); 515 } /* ixgbe_if_rx_queues_alloc */ 516 517 /************************************************************************ 518 * ixgbe_if_queues_free 519 ************************************************************************/ 520 static void 521 ixgbe_if_queues_free(if_ctx_t ctx) 522 { 523 struct ixgbe_softc *sc = iflib_get_softc(ctx); 524 struct ix_tx_queue *tx_que = sc->tx_queues; 525 struct ix_rx_queue *rx_que = sc->rx_queues; 526 int i; 527 528 if (tx_que != NULL) { 529 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { 530 struct tx_ring *txr = &tx_que->txr; 531 if (txr->tx_rsq == NULL) 532 break; 533 534 free(txr->tx_rsq, M_IXGBE); 535 txr->tx_rsq = NULL; 536 } 537 538 free(sc->tx_queues, M_IXGBE); 539 sc->tx_queues = NULL; 540 } 541 if (rx_que != NULL) { 542 free(sc->rx_queues, M_IXGBE); 543 sc->rx_queues = NULL; 544 } 545 } /* ixgbe_if_queues_free */ 546 547 /************************************************************************ 548 * ixgbe_initialize_rss_mapping 549 ************************************************************************/ 550 static void 551 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 552 { 553 struct ixgbe_hw *hw = &sc->hw; 554 u32 reta = 0, mrqc, rss_key[10]; 555 int queue_id, table_size, index_mult; 556 int i, j; 557 u32 rss_hash_config; 558 559 if (sc->feat_en & IXGBE_FEATURE_RSS) { 560 /* Fetch the configured RSS key */ 561 rss_getkey((uint8_t *)&rss_key); 562 } else { 563 /* set up random bits */ 564 arc4rand(&rss_key, sizeof(rss_key), 0); 565 } 566 567 /* Set multiplier for RETA setup and table size based on MAC */ 568 index_mult = 0x1; 569 table_size = 128; 570 switch (sc->hw.mac.type) { 571 case ixgbe_mac_82598EB: 572 index_mult = 0x11; 573 break; 574 case ixgbe_mac_X550: 575 case ixgbe_mac_X550EM_x: 576 case ixgbe_mac_X550EM_a: 577 table_size = 512; 578 break; 579 default: 580 break; 581 } 582 583 /* Set up the redirection table */ 584 for (i = 0, j = 0; i < table_size; i++, j++) { 585 if (j == sc->num_rx_queues) 586 j = 0; 587 588 if (sc->feat_en & IXGBE_FEATURE_RSS) { 589 /* 590 * Fetch the RSS bucket id for the given indirection 591 * entry. Cap it at the number of configured buckets 592 * (which is num_rx_queues.) 593 */ 594 queue_id = rss_get_indirection_to_bucket(i); 595 queue_id = queue_id % sc->num_rx_queues; 596 } else 597 queue_id = (j * index_mult); 598 599 /* 600 * The low 8 bits are for hash value (n+0); 601 * The next 8 bits are for hash value (n+1), etc. 602 */ 603 reta = reta >> 8; 604 reta = reta | (((uint32_t)queue_id) << 24); 605 if ((i & 3) == 3) { 606 if (i < 128) 607 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 608 else 609 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 610 reta); 611 reta = 0; 612 } 613 } 614 615 /* Now fill our hash function seeds */ 616 for (i = 0; i < 10; i++) 617 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 618 619 /* Perform hash on these packet types */ 620 if (sc->feat_en & IXGBE_FEATURE_RSS) 621 rss_hash_config = rss_gethashconfig(); 622 else { 623 /* 624 * Disable UDP - IP fragments aren't currently being handled 625 * and so we end up with a mix of 2-tuple and 4-tuple 626 * traffic. 627 */ 628 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 629 | RSS_HASHTYPE_RSS_TCP_IPV4 630 | RSS_HASHTYPE_RSS_IPV6 631 | RSS_HASHTYPE_RSS_TCP_IPV6 632 | RSS_HASHTYPE_RSS_IPV6_EX 633 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 634 } 635 636 mrqc = IXGBE_MRQC_RSSEN; 637 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 639 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 641 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 643 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 645 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 647 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 649 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 651 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 653 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 655 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 656 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 657 } /* ixgbe_initialize_rss_mapping */ 658 659 /************************************************************************ 660 * ixgbe_initialize_receive_units - Setup receive registers and features. 661 ************************************************************************/ 662 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 663 664 static void 665 ixgbe_initialize_receive_units(if_ctx_t ctx) 666 { 667 struct ixgbe_softc *sc = iflib_get_softc(ctx); 668 if_softc_ctx_t scctx = sc->shared; 669 struct ixgbe_hw *hw = &sc->hw; 670 if_t ifp = iflib_get_ifp(ctx); 671 struct ix_rx_queue *que; 672 int i, j; 673 u32 bufsz, fctrl, srrctl, rxcsum; 674 u32 hlreg; 675 676 /* 677 * Make sure receives are disabled while 678 * setting up the descriptor ring 679 */ 680 ixgbe_disable_rx(hw); 681 682 /* Enable broadcasts */ 683 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 684 fctrl |= IXGBE_FCTRL_BAM; 685 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 686 fctrl |= IXGBE_FCTRL_DPF; 687 fctrl |= IXGBE_FCTRL_PMCF; 688 } 689 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 690 691 /* Set for Jumbo Frames? */ 692 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 693 if (if_getmtu(ifp) > ETHERMTU) 694 hlreg |= IXGBE_HLREG0_JUMBOEN; 695 else 696 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 697 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 698 699 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 700 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 701 702 /* Setup the Base and Length of the Rx Descriptor Ring */ 703 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { 704 struct rx_ring *rxr = &que->rxr; 705 u64 rdba = rxr->rx_paddr; 706 707 j = rxr->me; 708 709 /* Setup the Base and Length of the Rx Descriptor Ring */ 710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 711 (rdba & 0x00000000ffffffffULL)); 712 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 713 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 714 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 715 716 /* Set up the SRRCTL register */ 717 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 718 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 719 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 720 srrctl |= bufsz; 721 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 722 723 /* 724 * Set DROP_EN iff we have no flow control and >1 queue. 725 * Note that srrctl was cleared shortly before during reset, 726 * so we do not need to clear the bit, but do it just in case 727 * this code is moved elsewhere. 728 */ 729 if (sc->num_rx_queues > 1 && 730 sc->hw.fc.requested_mode == ixgbe_fc_none) { 731 srrctl |= IXGBE_SRRCTL_DROP_EN; 732 } else { 733 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 734 } 735 736 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 737 738 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 739 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 740 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 741 742 /* Set the driver rx tail address */ 743 rxr->tail = IXGBE_RDT(rxr->me); 744 } 745 746 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 747 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 748 | IXGBE_PSRTYPE_UDPHDR 749 | IXGBE_PSRTYPE_IPV4HDR 750 | IXGBE_PSRTYPE_IPV6HDR; 751 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 752 } 753 754 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 755 756 ixgbe_initialize_rss_mapping(sc); 757 758 if (sc->feat_en & IXGBE_FEATURE_RSS) { 759 /* RSS and RX IPP Checksum are mutually exclusive */ 760 rxcsum |= IXGBE_RXCSUM_PCSD; 761 } 762 763 if (if_getcapenable(ifp) & IFCAP_RXCSUM) 764 rxcsum |= IXGBE_RXCSUM_PCSD; 765 766 /* This is useful for calculating UDP/IP fragment checksums */ 767 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 768 rxcsum |= IXGBE_RXCSUM_IPPCSE; 769 770 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 771 772 } /* ixgbe_initialize_receive_units */ 773 774 /************************************************************************ 775 * ixgbe_initialize_transmit_units - Enable transmit units. 776 ************************************************************************/ 777 static void 778 ixgbe_initialize_transmit_units(if_ctx_t ctx) 779 { 780 struct ixgbe_softc *sc = iflib_get_softc(ctx); 781 struct ixgbe_hw *hw = &sc->hw; 782 if_softc_ctx_t scctx = sc->shared; 783 struct ix_tx_queue *que; 784 int i; 785 786 /* Setup the Base and Length of the Tx Descriptor Ring */ 787 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; 788 i++, que++) { 789 struct tx_ring *txr = &que->txr; 790 u64 tdba = txr->tx_paddr; 791 u32 txctrl = 0; 792 int j = txr->me; 793 794 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 795 (tdba & 0x00000000ffffffffULL)); 796 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 797 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 798 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 799 800 /* Setup the HW Tx Head and Tail descriptor pointers */ 801 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 802 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 803 804 /* Cache the tail address */ 805 txr->tail = IXGBE_TDT(txr->me); 806 807 txr->tx_rs_cidx = txr->tx_rs_pidx; 808 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 809 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 810 txr->tx_rsq[k] = QIDX_INVALID; 811 812 /* Disable Head Writeback */ 813 /* 814 * Note: for X550 series devices, these registers are actually 815 * prefixed with TPH_ isntead of DCA_, but the addresses and 816 * fields remain the same. 817 */ 818 switch (hw->mac.type) { 819 case ixgbe_mac_82598EB: 820 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 821 break; 822 default: 823 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 824 break; 825 } 826 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 827 switch (hw->mac.type) { 828 case ixgbe_mac_82598EB: 829 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 830 break; 831 default: 832 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 833 break; 834 } 835 836 } 837 838 if (hw->mac.type != ixgbe_mac_82598EB) { 839 u32 dmatxctl, rttdcs; 840 841 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 842 dmatxctl |= IXGBE_DMATXCTL_TE; 843 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 844 /* Disable arbiter to set MTQC */ 845 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 846 rttdcs |= IXGBE_RTTDCS_ARBDIS; 847 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 848 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 849 ixgbe_get_mtqc(sc->iov_mode)); 850 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 851 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 852 } 853 854 } /* ixgbe_initialize_transmit_units */ 855 856 /************************************************************************ 857 * ixgbe_register 858 ************************************************************************/ 859 static void * 860 ixgbe_register(device_t dev) 861 { 862 return (&ixgbe_sctx_init); 863 } /* ixgbe_register */ 864 865 /************************************************************************ 866 * ixgbe_if_attach_pre - Device initialization routine, part 1 867 * 868 * Called when the driver is being loaded. 869 * Identifies the type of hardware, initializes the hardware, 870 * and initializes iflib structures. 871 * 872 * return 0 on success, positive on failure 873 ************************************************************************/ 874 static int 875 ixgbe_if_attach_pre(if_ctx_t ctx) 876 { 877 struct ixgbe_softc *sc; 878 device_t dev; 879 if_softc_ctx_t scctx; 880 struct ixgbe_hw *hw; 881 int error = 0; 882 u32 ctrl_ext; 883 884 INIT_DEBUGOUT("ixgbe_attach: begin"); 885 886 /* Allocate, clear, and link in our adapter structure */ 887 dev = iflib_get_dev(ctx); 888 sc = iflib_get_softc(ctx); 889 sc->hw.back = sc; 890 sc->ctx = ctx; 891 sc->dev = dev; 892 scctx = sc->shared = iflib_get_softc_ctx(ctx); 893 sc->media = iflib_get_media(ctx); 894 hw = &sc->hw; 895 896 /* Determine hardware revision */ 897 hw->vendor_id = pci_get_vendor(dev); 898 hw->device_id = pci_get_device(dev); 899 hw->revision_id = pci_get_revid(dev); 900 hw->subsystem_vendor_id = pci_get_subvendor(dev); 901 hw->subsystem_device_id = pci_get_subdevice(dev); 902 903 /* Do base PCI setup - map BAR0 */ 904 if (ixgbe_allocate_pci_resources(ctx)) { 905 device_printf(dev, "Allocation of PCI resources failed\n"); 906 return (ENXIO); 907 } 908 909 /* let hardware know driver is loaded */ 910 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 911 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 912 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 913 914 /* 915 * Initialize the shared code 916 */ 917 if (ixgbe_init_shared_code(hw) != 0) { 918 device_printf(dev, "Unable to initialize the shared code\n"); 919 error = ENXIO; 920 goto err_pci; 921 } 922 923 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 924 device_printf(dev, "Firmware recovery mode detected. Limiting " 925 "functionality.\nRefer to the Intel(R) Ethernet Adapters " 926 "and Devices User Guide for details on firmware recovery " 927 "mode."); 928 error = ENOSYS; 929 goto err_pci; 930 } 931 932 if (hw->mbx.ops.init_params) 933 hw->mbx.ops.init_params(hw); 934 935 hw->allow_unsupported_sfp = allow_unsupported_sfp; 936 937 if (hw->mac.type != ixgbe_mac_82598EB) 938 hw->phy.smart_speed = ixgbe_smart_speed; 939 940 ixgbe_init_device_features(sc); 941 942 /* Enable WoL (if supported) */ 943 ixgbe_check_wol_support(sc); 944 945 /* Verify adapter fan is still functional (if applicable) */ 946 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 947 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 948 ixgbe_check_fan_failure(sc, esdp, false); 949 } 950 951 /* Ensure SW/FW semaphore is free */ 952 ixgbe_init_swfw_semaphore(hw); 953 954 /* Set an initial default flow control value */ 955 hw->fc.requested_mode = ixgbe_flow_control; 956 957 hw->phy.reset_if_overtemp = true; 958 error = ixgbe_reset_hw(hw); 959 hw->phy.reset_if_overtemp = false; 960 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 961 /* 962 * No optics in this port, set up 963 * so the timer routine will probe 964 * for later insertion. 965 */ 966 sc->sfp_probe = true; 967 error = 0; 968 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 969 device_printf(dev, "Unsupported SFP+ module detected!\n"); 970 error = EIO; 971 goto err_pci; 972 } else if (error) { 973 device_printf(dev, "Hardware initialization failed\n"); 974 error = EIO; 975 goto err_pci; 976 } 977 978 /* Make sure we have a good EEPROM before we read from it */ 979 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 980 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 981 error = EIO; 982 goto err_pci; 983 } 984 985 error = ixgbe_start_hw(hw); 986 switch (error) { 987 case IXGBE_ERR_EEPROM_VERSION: 988 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 989 break; 990 case IXGBE_ERR_SFP_NOT_SUPPORTED: 991 device_printf(dev, "Unsupported SFP+ Module\n"); 992 error = EIO; 993 goto err_pci; 994 case IXGBE_ERR_SFP_NOT_PRESENT: 995 device_printf(dev, "No SFP+ Module found\n"); 996 /* falls thru */ 997 default: 998 break; 999 } 1000 1001 /* Most of the iflib initialization... */ 1002 1003 iflib_set_mac(ctx, hw->mac.addr); 1004 switch (sc->hw.mac.type) { 1005 case ixgbe_mac_X550: 1006 case ixgbe_mac_X550EM_x: 1007 case ixgbe_mac_X550EM_a: 1008 scctx->isc_rss_table_size = 512; 1009 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1010 break; 1011 default: 1012 scctx->isc_rss_table_size = 128; 1013 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1014 } 1015 1016 /* Allow legacy interrupts */ 1017 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1018 1019 scctx->isc_txqsizes[0] = 1020 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1021 sizeof(u32), DBA_ALIGN), 1022 scctx->isc_rxqsizes[0] = 1023 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1024 DBA_ALIGN); 1025 1026 /* XXX */ 1027 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1028 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1029 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 1030 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1031 } else { 1032 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1033 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1034 } 1035 1036 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1037 1038 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1039 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1040 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1041 1042 scctx->isc_txrx = &ixgbe_txrx; 1043 1044 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1045 1046 return (0); 1047 1048 err_pci: 1049 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1050 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1051 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1052 ixgbe_free_pci_resources(ctx); 1053 1054 return (error); 1055 } /* ixgbe_if_attach_pre */ 1056 1057 /********************************************************************* 1058 * ixgbe_if_attach_post - Device initialization routine, part 2 1059 * 1060 * Called during driver load, but after interrupts and 1061 * resources have been allocated and configured. 1062 * Sets up some data structures not relevant to iflib. 1063 * 1064 * return 0 on success, positive on failure 1065 *********************************************************************/ 1066 static int 1067 ixgbe_if_attach_post(if_ctx_t ctx) 1068 { 1069 device_t dev; 1070 struct ixgbe_softc *sc; 1071 struct ixgbe_hw *hw; 1072 int error = 0; 1073 1074 dev = iflib_get_dev(ctx); 1075 sc = iflib_get_softc(ctx); 1076 hw = &sc->hw; 1077 1078 1079 if (sc->intr_type == IFLIB_INTR_LEGACY && 1080 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1081 device_printf(dev, "Device does not support legacy interrupts"); 1082 error = ENXIO; 1083 goto err; 1084 } 1085 1086 /* Allocate multicast array memory. */ 1087 sc->mta = malloc(sizeof(*sc->mta) * 1088 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1089 if (sc->mta == NULL) { 1090 device_printf(dev, "Can not allocate multicast setup array\n"); 1091 error = ENOMEM; 1092 goto err; 1093 } 1094 1095 /* hw.ix defaults init */ 1096 ixgbe_set_advertise(sc, ixgbe_advertise_speed); 1097 1098 /* Enable the optics for 82599 SFP+ fiber */ 1099 ixgbe_enable_tx_laser(hw); 1100 1101 /* Enable power to the phy. */ 1102 ixgbe_set_phy_power(hw, true); 1103 1104 ixgbe_initialize_iov(sc); 1105 1106 error = ixgbe_setup_interface(ctx); 1107 if (error) { 1108 device_printf(dev, "Interface setup failed: %d\n", error); 1109 goto err; 1110 } 1111 1112 ixgbe_if_update_admin_status(ctx); 1113 1114 /* Initialize statistics */ 1115 ixgbe_update_stats_counters(sc); 1116 ixgbe_add_hw_stats(sc); 1117 1118 /* Check PCIE slot type/speed/width */ 1119 ixgbe_get_slot_info(sc); 1120 1121 /* 1122 * Do time init and sysctl init here, but 1123 * only on the first port of a bypass sc. 1124 */ 1125 ixgbe_bypass_init(sc); 1126 1127 /* Display NVM and Option ROM versions */ 1128 ixgbe_print_fw_version(ctx); 1129 1130 /* Set an initial dmac value */ 1131 sc->dmac = 0; 1132 /* Set initial advertised speeds (if applicable) */ 1133 sc->advertise = ixgbe_get_default_advertise(sc); 1134 1135 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1136 ixgbe_define_iov_schemas(dev, &error); 1137 1138 /* Add sysctls */ 1139 ixgbe_add_device_sysctls(ctx); 1140 1141 return (0); 1142 err: 1143 return (error); 1144 } /* ixgbe_if_attach_post */ 1145 1146 /************************************************************************ 1147 * ixgbe_check_wol_support 1148 * 1149 * Checks whether the adapter's ports are capable of 1150 * Wake On LAN by reading the adapter's NVM. 1151 * 1152 * Sets each port's hw->wol_enabled value depending 1153 * on the value read here. 1154 ************************************************************************/ 1155 static void 1156 ixgbe_check_wol_support(struct ixgbe_softc *sc) 1157 { 1158 struct ixgbe_hw *hw = &sc->hw; 1159 u16 dev_caps = 0; 1160 1161 /* Find out WoL support for port */ 1162 sc->wol_support = hw->wol_enabled = 0; 1163 ixgbe_get_device_caps(hw, &dev_caps); 1164 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1165 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1166 hw->bus.func == 0)) 1167 sc->wol_support = hw->wol_enabled = 1; 1168 1169 /* Save initial wake up filter configuration */ 1170 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1171 1172 return; 1173 } /* ixgbe_check_wol_support */ 1174 1175 /************************************************************************ 1176 * ixgbe_setup_interface 1177 * 1178 * Setup networking device structure and register an interface. 1179 ************************************************************************/ 1180 static int 1181 ixgbe_setup_interface(if_ctx_t ctx) 1182 { 1183 if_t ifp = iflib_get_ifp(ctx); 1184 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1185 1186 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1187 1188 if_setbaudrate(ifp, IF_Gbps(10)); 1189 1190 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 1191 1192 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1193 1194 ixgbe_add_media_types(ctx); 1195 1196 /* Autoselect media by default */ 1197 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1198 1199 return (0); 1200 } /* ixgbe_setup_interface */ 1201 1202 /************************************************************************ 1203 * ixgbe_if_get_counter 1204 ************************************************************************/ 1205 static uint64_t 1206 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1207 { 1208 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1209 if_t ifp = iflib_get_ifp(ctx); 1210 1211 switch (cnt) { 1212 case IFCOUNTER_IPACKETS: 1213 return (sc->ipackets); 1214 case IFCOUNTER_OPACKETS: 1215 return (sc->opackets); 1216 case IFCOUNTER_IBYTES: 1217 return (sc->ibytes); 1218 case IFCOUNTER_OBYTES: 1219 return (sc->obytes); 1220 case IFCOUNTER_IMCASTS: 1221 return (sc->imcasts); 1222 case IFCOUNTER_OMCASTS: 1223 return (sc->omcasts); 1224 case IFCOUNTER_COLLISIONS: 1225 return (0); 1226 case IFCOUNTER_IQDROPS: 1227 return (sc->iqdrops); 1228 case IFCOUNTER_OQDROPS: 1229 return (0); 1230 case IFCOUNTER_IERRORS: 1231 return (sc->ierrors); 1232 default: 1233 return (if_get_counter_default(ifp, cnt)); 1234 } 1235 } /* ixgbe_if_get_counter */ 1236 1237 /************************************************************************ 1238 * ixgbe_if_i2c_req 1239 ************************************************************************/ 1240 static int 1241 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1242 { 1243 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1244 struct ixgbe_hw *hw = &sc->hw; 1245 int i; 1246 1247 1248 if (hw->phy.ops.read_i2c_byte == NULL) 1249 return (ENXIO); 1250 for (i = 0; i < req->len; i++) 1251 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1252 req->dev_addr, &req->data[i]); 1253 return (0); 1254 } /* ixgbe_if_i2c_req */ 1255 1256 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1257 * @ctx: iflib context 1258 * @event: event code to check 1259 * 1260 * Defaults to returning true for unknown events. 1261 * 1262 * @returns true if iflib needs to reinit the interface 1263 */ 1264 static bool 1265 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1266 { 1267 switch (event) { 1268 case IFLIB_RESTART_VLAN_CONFIG: 1269 return (false); 1270 default: 1271 return (true); 1272 } 1273 } 1274 1275 /************************************************************************ 1276 * ixgbe_add_media_types 1277 ************************************************************************/ 1278 static void 1279 ixgbe_add_media_types(if_ctx_t ctx) 1280 { 1281 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1282 struct ixgbe_hw *hw = &sc->hw; 1283 device_t dev = iflib_get_dev(ctx); 1284 u64 layer; 1285 1286 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 1287 1288 /* Media types with matching FreeBSD media defines */ 1289 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1290 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1291 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1292 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1293 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1294 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1295 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1296 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1297 1298 if (hw->mac.type == ixgbe_mac_X550) { 1299 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1300 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL); 1301 } 1302 1303 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1304 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1305 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1306 NULL); 1307 1308 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1309 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1310 if (hw->phy.multispeed_fiber) 1311 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, 1312 NULL); 1313 } 1314 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1315 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1316 if (hw->phy.multispeed_fiber) 1317 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, 1318 NULL); 1319 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1320 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1321 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1322 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1323 1324 #ifdef IFM_ETH_XTYPE 1325 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1326 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1327 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1328 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1329 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1330 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1331 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1332 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1333 #else 1334 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1335 device_printf(dev, "Media supported: 10GbaseKR\n"); 1336 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1337 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1338 } 1339 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1340 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1341 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1342 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1343 } 1344 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1345 device_printf(dev, "Media supported: 1000baseKX\n"); 1346 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1347 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1348 } 1349 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1350 device_printf(dev, "Media supported: 2500baseKX\n"); 1351 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1352 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1353 } 1354 #endif 1355 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1356 device_printf(dev, "Media supported: 1000baseBX\n"); 1357 1358 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1359 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1360 0, NULL); 1361 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1362 } 1363 1364 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1365 } /* ixgbe_add_media_types */ 1366 1367 /************************************************************************ 1368 * ixgbe_is_sfp 1369 ************************************************************************/ 1370 static inline bool 1371 ixgbe_is_sfp(struct ixgbe_hw *hw) 1372 { 1373 switch (hw->mac.type) { 1374 case ixgbe_mac_82598EB: 1375 if (hw->phy.type == ixgbe_phy_nl) 1376 return (true); 1377 return (false); 1378 case ixgbe_mac_82599EB: 1379 switch (hw->mac.ops.get_media_type(hw)) { 1380 case ixgbe_media_type_fiber: 1381 case ixgbe_media_type_fiber_qsfp: 1382 return (true); 1383 default: 1384 return (false); 1385 } 1386 case ixgbe_mac_X550EM_x: 1387 case ixgbe_mac_X550EM_a: 1388 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1389 return (true); 1390 return (false); 1391 default: 1392 return (false); 1393 } 1394 } /* ixgbe_is_sfp */ 1395 1396 /************************************************************************ 1397 * ixgbe_config_link 1398 ************************************************************************/ 1399 static void 1400 ixgbe_config_link(if_ctx_t ctx) 1401 { 1402 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1403 struct ixgbe_hw *hw = &sc->hw; 1404 u32 autoneg, err = 0; 1405 bool sfp, negotiate; 1406 1407 sfp = ixgbe_is_sfp(hw); 1408 1409 if (sfp) { 1410 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 1411 iflib_admin_intr_deferred(ctx); 1412 } else { 1413 if (hw->mac.ops.check_link) 1414 err = ixgbe_check_link(hw, &sc->link_speed, 1415 &sc->link_up, false); 1416 if (err) 1417 return; 1418 autoneg = hw->phy.autoneg_advertised; 1419 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1420 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1421 &negotiate); 1422 if (err) 1423 return; 1424 1425 if (hw->mac.type == ixgbe_mac_X550 && 1426 hw->phy.autoneg_advertised == 0) { 1427 /* 1428 * 2.5G and 5G autonegotiation speeds on X550 1429 * are disabled by default due to reported 1430 * interoperability issues with some switches. 1431 * 1432 * The second condition checks if any operations 1433 * involving setting autonegotiation speeds have 1434 * been performed prior to this ixgbe_config_link() 1435 * call. 1436 * 1437 * If hw->phy.autoneg_advertised does not 1438 * equal 0, this means that the user might have 1439 * set autonegotiation speeds via the sysctl 1440 * before bringing the interface up. In this 1441 * case, we should not disable 2.5G and 5G 1442 * since that speeds might be selected by the 1443 * user. 1444 * 1445 * Otherwise (i.e. if hw->phy.autoneg_advertised 1446 * is set to 0), it is the first time we set 1447 * autonegotiation preferences and the default 1448 * set of speeds should exclude 2.5G and 5G. 1449 */ 1450 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 1451 IXGBE_LINK_SPEED_5GB_FULL); 1452 } 1453 1454 if (hw->mac.ops.setup_link) 1455 err = hw->mac.ops.setup_link(hw, autoneg, 1456 sc->link_up); 1457 } 1458 } /* ixgbe_config_link */ 1459 1460 /************************************************************************ 1461 * ixgbe_update_stats_counters - Update board statistics counters. 1462 ************************************************************************/ 1463 static void 1464 ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1465 { 1466 struct ixgbe_hw *hw = &sc->hw; 1467 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1468 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1469 u32 lxoffrxc; 1470 u64 total_missed_rx = 0; 1471 1472 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1473 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1474 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1475 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1476 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1477 1478 for (int i = 0; i < 16; i++) { 1479 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1480 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1481 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1482 } 1483 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1484 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1485 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1486 1487 /* Hardware workaround, gprc counts missed packets */ 1488 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1489 stats->gprc -= missed_rx; 1490 1491 if (hw->mac.type != ixgbe_mac_82598EB) { 1492 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1493 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1494 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1495 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1496 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1497 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1498 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1499 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1500 stats->lxoffrxc += lxoffrxc; 1501 } else { 1502 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1503 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1504 stats->lxoffrxc += lxoffrxc; 1505 /* 82598 only has a counter in the high register */ 1506 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1507 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1508 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1509 } 1510 1511 /* 1512 * For watchdog management we need to know if we have been paused 1513 * during the last interval, so capture that here. 1514 */ 1515 if (lxoffrxc) 1516 sc->shared->isc_pause_frames = 1; 1517 1518 /* 1519 * Workaround: mprc hardware is incorrectly counting 1520 * broadcasts, so for now we subtract those. 1521 */ 1522 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1523 stats->bprc += bprc; 1524 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1525 if (hw->mac.type == ixgbe_mac_82598EB) 1526 stats->mprc -= bprc; 1527 1528 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1529 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1530 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1531 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1532 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1533 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1534 1535 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1536 stats->lxontxc += lxon; 1537 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1538 stats->lxofftxc += lxoff; 1539 total = lxon + lxoff; 1540 1541 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1542 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1543 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1544 stats->gptc -= total; 1545 stats->mptc -= total; 1546 stats->ptc64 -= total; 1547 stats->gotc -= total * ETHER_MIN_LEN; 1548 1549 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1550 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1551 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1552 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1553 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1554 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1555 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1556 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1557 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1558 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1559 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1560 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1561 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1562 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1563 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1564 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1565 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1566 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1567 /* Only read FCOE on 82599 */ 1568 if (hw->mac.type != ixgbe_mac_82598EB) { 1569 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1570 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1571 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1572 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1573 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1574 } 1575 1576 /* Fill out the OS statistics structure */ 1577 IXGBE_SET_IPACKETS(sc, stats->gprc); 1578 IXGBE_SET_OPACKETS(sc, stats->gptc); 1579 IXGBE_SET_IBYTES(sc, stats->gorc); 1580 IXGBE_SET_OBYTES(sc, stats->gotc); 1581 IXGBE_SET_IMCASTS(sc, stats->mprc); 1582 IXGBE_SET_OMCASTS(sc, stats->mptc); 1583 IXGBE_SET_COLLISIONS(sc, 0); 1584 IXGBE_SET_IQDROPS(sc, total_missed_rx); 1585 1586 /* 1587 * Aggregate following types of errors as RX errors: 1588 * - CRC error count, 1589 * - illegal byte error count, 1590 * - missed packets count, 1591 * - length error count, 1592 * - undersized packets count, 1593 * - fragmented packets count, 1594 * - oversized packets count, 1595 * - jabber count. 1596 */ 1597 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + 1598 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + 1599 stats->rjc); 1600 } /* ixgbe_update_stats_counters */ 1601 1602 /************************************************************************ 1603 * ixgbe_add_hw_stats 1604 * 1605 * Add sysctl variables, one per statistic, to the system. 1606 ************************************************************************/ 1607 static void 1608 ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1609 { 1610 device_t dev = iflib_get_dev(sc->ctx); 1611 struct ix_rx_queue *rx_que; 1612 struct ix_tx_queue *tx_que; 1613 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1614 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1615 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1616 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1617 struct sysctl_oid *stat_node, *queue_node; 1618 struct sysctl_oid_list *stat_list, *queue_list; 1619 int i; 1620 1621 #define QUEUE_NAME_LEN 32 1622 char namebuf[QUEUE_NAME_LEN]; 1623 1624 /* Driver Statistics */ 1625 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1626 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); 1627 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1628 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1629 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1630 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1631 1632 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 1633 struct tx_ring *txr = &tx_que->txr; 1634 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1635 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1636 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1637 queue_list = SYSCTL_CHILDREN(queue_node); 1638 1639 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1640 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1641 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1642 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1643 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1644 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1645 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1646 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1647 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1648 CTLFLAG_RD, &txr->total_packets, 1649 "Queue Packets Transmitted"); 1650 } 1651 1652 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 1653 struct rx_ring *rxr = &rx_que->rxr; 1654 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1655 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1656 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1657 queue_list = SYSCTL_CHILDREN(queue_node); 1658 1659 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1660 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 1661 &sc->rx_queues[i], 0, 1662 ixgbe_sysctl_interrupt_rate_handler, "IU", 1663 "Interrupt Rate"); 1664 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1665 CTLFLAG_RD, &(sc->rx_queues[i].irqs), 1666 "irqs on this queue"); 1667 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1668 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1669 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1670 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1671 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1672 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1673 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1674 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1675 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1676 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1677 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1678 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1679 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1680 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1681 } 1682 1683 /* MAC stats get their own sub node */ 1684 1685 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1686 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); 1687 stat_list = SYSCTL_CHILDREN(stat_node); 1688 1689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", 1690 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); 1691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1692 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1694 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1696 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1698 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1699 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1700 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1701 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1702 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1704 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1706 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1707 1708 /* Flow Control stats */ 1709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1710 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1712 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1714 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1716 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1717 1718 /* Packet Reception Stats */ 1719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1720 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1722 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1724 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1726 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1728 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1730 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1732 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1734 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1736 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1738 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1740 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1742 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1744 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1746 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1747 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1748 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1749 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1750 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1752 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1753 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1754 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1755 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1756 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1757 1758 /* Packet Transmission Stats */ 1759 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1760 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1761 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1762 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1763 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1764 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1765 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1766 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1767 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1768 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1769 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1770 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1771 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1772 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1773 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1774 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1775 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1776 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1777 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1778 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1779 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1780 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1781 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1782 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1783 } /* ixgbe_add_hw_stats */ 1784 1785 /************************************************************************ 1786 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1787 * 1788 * Retrieves the TDH value from the hardware 1789 ************************************************************************/ 1790 static int 1791 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1792 { 1793 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1794 int error; 1795 unsigned int val; 1796 1797 if (!txr) 1798 return (0); 1799 1800 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); 1801 error = sysctl_handle_int(oidp, &val, 0, req); 1802 if (error || !req->newptr) 1803 return error; 1804 1805 return (0); 1806 } /* ixgbe_sysctl_tdh_handler */ 1807 1808 /************************************************************************ 1809 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1810 * 1811 * Retrieves the TDT value from the hardware 1812 ************************************************************************/ 1813 static int 1814 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1815 { 1816 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1817 int error; 1818 unsigned int val; 1819 1820 if (!txr) 1821 return (0); 1822 1823 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); 1824 error = sysctl_handle_int(oidp, &val, 0, req); 1825 if (error || !req->newptr) 1826 return error; 1827 1828 return (0); 1829 } /* ixgbe_sysctl_tdt_handler */ 1830 1831 /************************************************************************ 1832 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1833 * 1834 * Retrieves the RDH value from the hardware 1835 ************************************************************************/ 1836 static int 1837 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1838 { 1839 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1840 int error; 1841 unsigned int val; 1842 1843 if (!rxr) 1844 return (0); 1845 1846 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); 1847 error = sysctl_handle_int(oidp, &val, 0, req); 1848 if (error || !req->newptr) 1849 return error; 1850 1851 return (0); 1852 } /* ixgbe_sysctl_rdh_handler */ 1853 1854 /************************************************************************ 1855 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1856 * 1857 * Retrieves the RDT value from the hardware 1858 ************************************************************************/ 1859 static int 1860 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1861 { 1862 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1863 int error; 1864 unsigned int val; 1865 1866 if (!rxr) 1867 return (0); 1868 1869 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); 1870 error = sysctl_handle_int(oidp, &val, 0, req); 1871 if (error || !req->newptr) 1872 return error; 1873 1874 return (0); 1875 } /* ixgbe_sysctl_rdt_handler */ 1876 1877 /************************************************************************ 1878 * ixgbe_if_vlan_register 1879 * 1880 * Run via vlan config EVENT, it enables us to use the 1881 * HW Filter table since we can get the vlan id. This 1882 * just creates the entry in the soft version of the 1883 * VFTA, init will repopulate the real table. 1884 ************************************************************************/ 1885 static void 1886 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1887 { 1888 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1889 u16 index, bit; 1890 1891 index = (vtag >> 5) & 0x7F; 1892 bit = vtag & 0x1F; 1893 sc->shadow_vfta[index] |= (1 << bit); 1894 ++sc->num_vlans; 1895 ixgbe_setup_vlan_hw_support(ctx); 1896 } /* ixgbe_if_vlan_register */ 1897 1898 /************************************************************************ 1899 * ixgbe_if_vlan_unregister 1900 * 1901 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1902 ************************************************************************/ 1903 static void 1904 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1905 { 1906 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1907 u16 index, bit; 1908 1909 index = (vtag >> 5) & 0x7F; 1910 bit = vtag & 0x1F; 1911 sc->shadow_vfta[index] &= ~(1 << bit); 1912 --sc->num_vlans; 1913 /* Re-init to load the changes */ 1914 ixgbe_setup_vlan_hw_support(ctx); 1915 } /* ixgbe_if_vlan_unregister */ 1916 1917 /************************************************************************ 1918 * ixgbe_setup_vlan_hw_support 1919 ************************************************************************/ 1920 static void 1921 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1922 { 1923 if_t ifp = iflib_get_ifp(ctx); 1924 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1925 struct ixgbe_hw *hw = &sc->hw; 1926 struct rx_ring *rxr; 1927 int i; 1928 u32 ctrl; 1929 1930 1931 /* 1932 * We get here thru init_locked, meaning 1933 * a soft reset, this has already cleared 1934 * the VFTA and other state, so if there 1935 * have been no vlan's registered do nothing. 1936 */ 1937 if (sc->num_vlans == 0 || (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) { 1938 /* Clear the vlan hw flag */ 1939 for (i = 0; i < sc->num_rx_queues; i++) { 1940 rxr = &sc->rx_queues[i].rxr; 1941 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1942 if (hw->mac.type != ixgbe_mac_82598EB) { 1943 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1944 ctrl &= ~IXGBE_RXDCTL_VME; 1945 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1946 } 1947 rxr->vtag_strip = false; 1948 } 1949 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1950 /* Enable the Filter Table if enabled */ 1951 ctrl |= IXGBE_VLNCTRL_CFIEN; 1952 ctrl &= ~IXGBE_VLNCTRL_VFE; 1953 if (hw->mac.type == ixgbe_mac_82598EB) 1954 ctrl &= ~IXGBE_VLNCTRL_VME; 1955 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1956 return; 1957 } 1958 1959 /* Setup the queues for vlans */ 1960 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { 1961 for (i = 0; i < sc->num_rx_queues; i++) { 1962 rxr = &sc->rx_queues[i].rxr; 1963 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1964 if (hw->mac.type != ixgbe_mac_82598EB) { 1965 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1966 ctrl |= IXGBE_RXDCTL_VME; 1967 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1968 } 1969 rxr->vtag_strip = true; 1970 } 1971 } 1972 1973 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 1974 return; 1975 /* 1976 * A soft reset zero's out the VFTA, so 1977 * we need to repopulate it now. 1978 */ 1979 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1980 if (sc->shadow_vfta[i] != 0) 1981 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1982 sc->shadow_vfta[i]); 1983 1984 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1985 /* Enable the Filter Table if enabled */ 1986 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) { 1987 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1988 ctrl |= IXGBE_VLNCTRL_VFE; 1989 } 1990 if (hw->mac.type == ixgbe_mac_82598EB) 1991 ctrl |= IXGBE_VLNCTRL_VME; 1992 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1993 } /* ixgbe_setup_vlan_hw_support */ 1994 1995 /************************************************************************ 1996 * ixgbe_get_slot_info 1997 * 1998 * Get the width and transaction speed of 1999 * the slot this adapter is plugged into. 2000 ************************************************************************/ 2001 static void 2002 ixgbe_get_slot_info(struct ixgbe_softc *sc) 2003 { 2004 device_t dev = iflib_get_dev(sc->ctx); 2005 struct ixgbe_hw *hw = &sc->hw; 2006 int bus_info_valid = true; 2007 u32 offset; 2008 u16 link; 2009 2010 /* Some devices are behind an internal bridge */ 2011 switch (hw->device_id) { 2012 case IXGBE_DEV_ID_82599_SFP_SF_QP: 2013 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 2014 goto get_parent_info; 2015 default: 2016 break; 2017 } 2018 2019 ixgbe_get_bus_info(hw); 2020 2021 /* 2022 * Some devices don't use PCI-E, but there is no need 2023 * to display "Unknown" for bus speed and width. 2024 */ 2025 switch (hw->mac.type) { 2026 case ixgbe_mac_X550EM_x: 2027 case ixgbe_mac_X550EM_a: 2028 return; 2029 default: 2030 goto display; 2031 } 2032 2033 get_parent_info: 2034 /* 2035 * For the Quad port adapter we need to parse back 2036 * up the PCI tree to find the speed of the expansion 2037 * slot into which this adapter is plugged. A bit more work. 2038 */ 2039 dev = device_get_parent(device_get_parent(dev)); 2040 #ifdef IXGBE_DEBUG 2041 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2042 pci_get_slot(dev), pci_get_function(dev)); 2043 #endif 2044 dev = device_get_parent(device_get_parent(dev)); 2045 #ifdef IXGBE_DEBUG 2046 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2047 pci_get_slot(dev), pci_get_function(dev)); 2048 #endif 2049 /* Now get the PCI Express Capabilities offset */ 2050 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 2051 /* 2052 * Hmm...can't get PCI-Express capabilities. 2053 * Falling back to default method. 2054 */ 2055 bus_info_valid = false; 2056 ixgbe_get_bus_info(hw); 2057 goto display; 2058 } 2059 /* ...and read the Link Status Register */ 2060 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2061 ixgbe_set_pci_config_data_generic(hw, link); 2062 2063 display: 2064 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2065 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2066 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2067 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2068 "Unknown"), 2069 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 2070 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 2071 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 2072 "Unknown")); 2073 2074 if (bus_info_valid) { 2075 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2076 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2077 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2078 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2079 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 2080 } 2081 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2082 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2083 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2084 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2085 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 2086 } 2087 } else 2088 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2089 2090 return; 2091 } /* ixgbe_get_slot_info */ 2092 2093 /************************************************************************ 2094 * ixgbe_if_msix_intr_assign 2095 * 2096 * Setup MSI-X Interrupt resources and handlers 2097 ************************************************************************/ 2098 static int 2099 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2100 { 2101 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2102 struct ix_rx_queue *rx_que = sc->rx_queues; 2103 struct ix_tx_queue *tx_que; 2104 int error, rid, vector = 0; 2105 char buf[16]; 2106 2107 /* Admin Que is vector 0*/ 2108 rid = vector + 1; 2109 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 2110 rid = vector + 1; 2111 2112 snprintf(buf, sizeof(buf), "rxq%d", i); 2113 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2114 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2115 2116 if (error) { 2117 device_printf(iflib_get_dev(ctx), 2118 "Failed to allocate que int %d err: %d", i, error); 2119 sc->num_rx_queues = i + 1; 2120 goto fail; 2121 } 2122 2123 rx_que->msix = vector; 2124 } 2125 for (int i = 0; i < sc->num_tx_queues; i++) { 2126 snprintf(buf, sizeof(buf), "txq%d", i); 2127 tx_que = &sc->tx_queues[i]; 2128 tx_que->msix = i % sc->num_rx_queues; 2129 iflib_softirq_alloc_generic(ctx, 2130 &sc->rx_queues[tx_que->msix].que_irq, 2131 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2132 } 2133 rid = vector + 1; 2134 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 2135 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq"); 2136 if (error) { 2137 device_printf(iflib_get_dev(ctx), 2138 "Failed to register admin handler"); 2139 return (error); 2140 } 2141 2142 sc->vector = vector; 2143 2144 return (0); 2145 fail: 2146 iflib_irq_free(ctx, &sc->irq); 2147 rx_que = sc->rx_queues; 2148 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 2149 iflib_irq_free(ctx, &rx_que->que_irq); 2150 2151 return (error); 2152 } /* ixgbe_if_msix_intr_assign */ 2153 2154 static inline void 2155 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que) 2156 { 2157 uint32_t newitr = 0; 2158 struct rx_ring *rxr = &que->rxr; 2159 2160 /* 2161 * Do Adaptive Interrupt Moderation: 2162 * - Write out last calculated setting 2163 * - Calculate based on average size over 2164 * the last interval. 2165 */ 2166 if (que->eitr_setting) { 2167 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), 2168 que->eitr_setting); 2169 } 2170 2171 que->eitr_setting = 0; 2172 /* Idle, do nothing */ 2173 if (rxr->bytes == 0) { 2174 return; 2175 } 2176 2177 if ((rxr->bytes) && (rxr->packets)) { 2178 newitr = (rxr->bytes / rxr->packets); 2179 } 2180 2181 newitr += 24; /* account for hardware frame, crc */ 2182 /* set an upper boundary */ 2183 newitr = min(newitr, 3000); 2184 2185 /* Be nice to the mid range */ 2186 if ((newitr > 300) && (newitr < 1200)) { 2187 newitr = (newitr / 3); 2188 } else { 2189 newitr = (newitr / 2); 2190 } 2191 2192 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2193 newitr |= newitr << 16; 2194 } else { 2195 newitr |= IXGBE_EITR_CNT_WDIS; 2196 } 2197 2198 /* save for next interrupt */ 2199 que->eitr_setting = newitr; 2200 2201 /* Reset state */ 2202 rxr->bytes = 0; 2203 rxr->packets = 0; 2204 2205 return; 2206 } 2207 2208 /********************************************************************* 2209 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2210 **********************************************************************/ 2211 static int 2212 ixgbe_msix_que(void *arg) 2213 { 2214 struct ix_rx_queue *que = arg; 2215 struct ixgbe_softc *sc = que->sc; 2216 if_t ifp = iflib_get_ifp(que->sc->ctx); 2217 2218 /* Protect against spurious interrupts */ 2219 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 2220 return (FILTER_HANDLED); 2221 2222 ixgbe_disable_queue(sc, que->msix); 2223 ++que->irqs; 2224 2225 /* Check for AIM */ 2226 if (sc->enable_aim) { 2227 ixgbe_perform_aim(sc, que); 2228 } 2229 2230 return (FILTER_SCHEDULE_THREAD); 2231 } /* ixgbe_msix_que */ 2232 2233 /************************************************************************ 2234 * ixgbe_media_status - Media Ioctl callback 2235 * 2236 * Called whenever the user queries the status of 2237 * the interface using ifconfig. 2238 ************************************************************************/ 2239 static void 2240 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2241 { 2242 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2243 struct ixgbe_hw *hw = &sc->hw; 2244 int layer; 2245 2246 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2247 2248 ifmr->ifm_status = IFM_AVALID; 2249 ifmr->ifm_active = IFM_ETHER; 2250 2251 if (!sc->link_active) 2252 return; 2253 2254 ifmr->ifm_status |= IFM_ACTIVE; 2255 layer = sc->phy_layer; 2256 2257 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2258 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2259 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2260 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2261 switch (sc->link_speed) { 2262 case IXGBE_LINK_SPEED_10GB_FULL: 2263 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2264 break; 2265 case IXGBE_LINK_SPEED_1GB_FULL: 2266 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2267 break; 2268 case IXGBE_LINK_SPEED_100_FULL: 2269 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2270 break; 2271 case IXGBE_LINK_SPEED_10_FULL: 2272 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2273 break; 2274 } 2275 if (hw->mac.type == ixgbe_mac_X550) 2276 switch (sc->link_speed) { 2277 case IXGBE_LINK_SPEED_5GB_FULL: 2278 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2279 break; 2280 case IXGBE_LINK_SPEED_2_5GB_FULL: 2281 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2282 break; 2283 } 2284 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2285 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2286 switch (sc->link_speed) { 2287 case IXGBE_LINK_SPEED_10GB_FULL: 2288 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2289 break; 2290 } 2291 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2292 switch (sc->link_speed) { 2293 case IXGBE_LINK_SPEED_10GB_FULL: 2294 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2295 break; 2296 case IXGBE_LINK_SPEED_1GB_FULL: 2297 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2298 break; 2299 } 2300 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2301 switch (sc->link_speed) { 2302 case IXGBE_LINK_SPEED_10GB_FULL: 2303 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2304 break; 2305 case IXGBE_LINK_SPEED_1GB_FULL: 2306 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2307 break; 2308 } 2309 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2310 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2311 switch (sc->link_speed) { 2312 case IXGBE_LINK_SPEED_10GB_FULL: 2313 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2314 break; 2315 case IXGBE_LINK_SPEED_1GB_FULL: 2316 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2317 break; 2318 } 2319 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2320 switch (sc->link_speed) { 2321 case IXGBE_LINK_SPEED_10GB_FULL: 2322 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2323 break; 2324 } 2325 /* 2326 * XXX: These need to use the proper media types once 2327 * they're added. 2328 */ 2329 #ifndef IFM_ETH_XTYPE 2330 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2331 switch (sc->link_speed) { 2332 case IXGBE_LINK_SPEED_10GB_FULL: 2333 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2334 break; 2335 case IXGBE_LINK_SPEED_2_5GB_FULL: 2336 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2337 break; 2338 case IXGBE_LINK_SPEED_1GB_FULL: 2339 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2340 break; 2341 } 2342 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2343 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2344 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2345 switch (sc->link_speed) { 2346 case IXGBE_LINK_SPEED_10GB_FULL: 2347 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2348 break; 2349 case IXGBE_LINK_SPEED_2_5GB_FULL: 2350 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2351 break; 2352 case IXGBE_LINK_SPEED_1GB_FULL: 2353 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2354 break; 2355 } 2356 #else 2357 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2358 switch (sc->link_speed) { 2359 case IXGBE_LINK_SPEED_10GB_FULL: 2360 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2361 break; 2362 case IXGBE_LINK_SPEED_2_5GB_FULL: 2363 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2364 break; 2365 case IXGBE_LINK_SPEED_1GB_FULL: 2366 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2367 break; 2368 } 2369 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2370 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2371 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2372 switch (sc->link_speed) { 2373 case IXGBE_LINK_SPEED_10GB_FULL: 2374 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2375 break; 2376 case IXGBE_LINK_SPEED_2_5GB_FULL: 2377 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2378 break; 2379 case IXGBE_LINK_SPEED_1GB_FULL: 2380 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2381 break; 2382 } 2383 #endif 2384 2385 /* If nothing is recognized... */ 2386 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2387 ifmr->ifm_active |= IFM_UNKNOWN; 2388 2389 /* Display current flow control setting used on link */ 2390 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2391 hw->fc.current_mode == ixgbe_fc_full) 2392 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2393 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2394 hw->fc.current_mode == ixgbe_fc_full) 2395 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2396 } /* ixgbe_media_status */ 2397 2398 /************************************************************************ 2399 * ixgbe_media_change - Media Ioctl callback 2400 * 2401 * Called when the user changes speed/duplex using 2402 * media/mediopt option with ifconfig. 2403 ************************************************************************/ 2404 static int 2405 ixgbe_if_media_change(if_ctx_t ctx) 2406 { 2407 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2408 struct ifmedia *ifm = iflib_get_media(ctx); 2409 struct ixgbe_hw *hw = &sc->hw; 2410 ixgbe_link_speed speed = 0; 2411 2412 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2413 2414 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2415 return (EINVAL); 2416 2417 if (hw->phy.media_type == ixgbe_media_type_backplane) 2418 return (EPERM); 2419 2420 /* 2421 * We don't actually need to check against the supported 2422 * media types of the adapter; ifmedia will take care of 2423 * that for us. 2424 */ 2425 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2426 case IFM_AUTO: 2427 case IFM_10G_T: 2428 speed |= IXGBE_LINK_SPEED_100_FULL; 2429 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2430 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2431 break; 2432 case IFM_10G_LRM: 2433 case IFM_10G_LR: 2434 #ifndef IFM_ETH_XTYPE 2435 case IFM_10G_SR: /* KR, too */ 2436 case IFM_10G_CX4: /* KX4 */ 2437 #else 2438 case IFM_10G_KR: 2439 case IFM_10G_KX4: 2440 #endif 2441 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2442 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2443 break; 2444 #ifndef IFM_ETH_XTYPE 2445 case IFM_1000_CX: /* KX */ 2446 #else 2447 case IFM_1000_KX: 2448 #endif 2449 case IFM_1000_LX: 2450 case IFM_1000_SX: 2451 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2452 break; 2453 case IFM_1000_T: 2454 speed |= IXGBE_LINK_SPEED_100_FULL; 2455 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2456 break; 2457 case IFM_10G_TWINAX: 2458 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2459 break; 2460 case IFM_5000_T: 2461 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2462 break; 2463 case IFM_2500_T: 2464 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2465 break; 2466 case IFM_100_TX: 2467 speed |= IXGBE_LINK_SPEED_100_FULL; 2468 break; 2469 case IFM_10_T: 2470 speed |= IXGBE_LINK_SPEED_10_FULL; 2471 break; 2472 default: 2473 goto invalid; 2474 } 2475 2476 hw->mac.autotry_restart = true; 2477 hw->mac.ops.setup_link(hw, speed, true); 2478 sc->advertise = 2479 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 2480 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 2481 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 2482 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 2483 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 2484 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 2485 2486 return (0); 2487 2488 invalid: 2489 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2490 2491 return (EINVAL); 2492 } /* ixgbe_if_media_change */ 2493 2494 /************************************************************************ 2495 * ixgbe_set_promisc 2496 ************************************************************************/ 2497 static int 2498 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2499 { 2500 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2501 if_t ifp = iflib_get_ifp(ctx); 2502 u32 rctl; 2503 int mcnt = 0; 2504 2505 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 2506 rctl &= (~IXGBE_FCTRL_UPE); 2507 if (if_getflags(ifp) & IFF_ALLMULTI) 2508 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2509 else { 2510 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2511 } 2512 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2513 rctl &= (~IXGBE_FCTRL_MPE); 2514 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2515 2516 if (if_getflags(ifp) & IFF_PROMISC) { 2517 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2518 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2519 } else if (if_getflags(ifp) & IFF_ALLMULTI) { 2520 rctl |= IXGBE_FCTRL_MPE; 2521 rctl &= ~IXGBE_FCTRL_UPE; 2522 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2523 } 2524 return (0); 2525 } /* ixgbe_if_promisc_set */ 2526 2527 /************************************************************************ 2528 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2529 ************************************************************************/ 2530 static int 2531 ixgbe_msix_link(void *arg) 2532 { 2533 struct ixgbe_softc *sc = arg; 2534 struct ixgbe_hw *hw = &sc->hw; 2535 u32 eicr, eicr_mask; 2536 s32 retval; 2537 2538 ++sc->link_irq; 2539 2540 /* Pause other interrupts */ 2541 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2542 2543 /* First get the cause */ 2544 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2545 /* Be sure the queue bits are not cleared */ 2546 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2547 /* Clear interrupt with write */ 2548 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2549 2550 /* Link status change */ 2551 if (eicr & IXGBE_EICR_LSC) { 2552 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2553 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; 2554 } 2555 2556 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 2557 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 2558 (eicr & IXGBE_EICR_FLOW_DIR)) { 2559 /* This is probably overkill :) */ 2560 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) 2561 return (FILTER_HANDLED); 2562 /* Disable the interrupt */ 2563 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2564 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2565 } else 2566 if (eicr & IXGBE_EICR_ECC) { 2567 device_printf(iflib_get_dev(sc->ctx), 2568 "Received ECC Err, initiating reset\n"); 2569 hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2570 ixgbe_reset_hw(hw); 2571 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2572 } 2573 2574 /* Check for over temp condition */ 2575 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2576 switch (sc->hw.mac.type) { 2577 case ixgbe_mac_X550EM_a: 2578 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2579 break; 2580 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2581 IXGBE_EICR_GPI_SDP0_X550EM_a); 2582 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2583 IXGBE_EICR_GPI_SDP0_X550EM_a); 2584 retval = hw->phy.ops.check_overtemp(hw); 2585 if (retval != IXGBE_ERR_OVERTEMP) 2586 break; 2587 device_printf(iflib_get_dev(sc->ctx), 2588 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2589 device_printf(iflib_get_dev(sc->ctx), 2590 "System shutdown required!\n"); 2591 break; 2592 default: 2593 if (!(eicr & IXGBE_EICR_TS)) 2594 break; 2595 retval = hw->phy.ops.check_overtemp(hw); 2596 if (retval != IXGBE_ERR_OVERTEMP) 2597 break; 2598 device_printf(iflib_get_dev(sc->ctx), 2599 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2600 device_printf(iflib_get_dev(sc->ctx), 2601 "System shutdown required!\n"); 2602 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2603 break; 2604 } 2605 } 2606 2607 /* Check for VF message */ 2608 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 2609 (eicr & IXGBE_EICR_MAILBOX)) 2610 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; 2611 } 2612 2613 if (ixgbe_is_sfp(hw)) { 2614 /* Pluggable optics-related interrupt */ 2615 if (hw->mac.type >= ixgbe_mac_X540) 2616 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2617 else 2618 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2619 2620 if (eicr & eicr_mask) { 2621 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2622 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 2623 } 2624 2625 if ((hw->mac.type == ixgbe_mac_82599EB) && 2626 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2627 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2628 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2629 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 2630 } 2631 } 2632 2633 /* Check for fan failure */ 2634 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2635 ixgbe_check_fan_failure(sc, eicr, true); 2636 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2637 } 2638 2639 /* External PHY interrupt */ 2640 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2641 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2642 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2643 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 2644 } 2645 2646 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2647 } /* ixgbe_msix_link */ 2648 2649 /************************************************************************ 2650 * ixgbe_sysctl_interrupt_rate_handler 2651 ************************************************************************/ 2652 static int 2653 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2654 { 2655 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2656 int error; 2657 unsigned int reg, usec, rate; 2658 2659 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); 2660 usec = ((reg & 0x0FF8) >> 3); 2661 if (usec > 0) 2662 rate = 500000 / usec; 2663 else 2664 rate = 0; 2665 error = sysctl_handle_int(oidp, &rate, 0, req); 2666 if (error || !req->newptr) 2667 return error; 2668 reg &= ~0xfff; /* default, no limitation */ 2669 ixgbe_max_interrupt_rate = 0; 2670 if (rate > 0 && rate < 500000) { 2671 if (rate < 1000) 2672 rate = 1000; 2673 ixgbe_max_interrupt_rate = rate; 2674 reg |= ((4000000/rate) & 0xff8); 2675 } 2676 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); 2677 2678 return (0); 2679 } /* ixgbe_sysctl_interrupt_rate_handler */ 2680 2681 /************************************************************************ 2682 * ixgbe_add_device_sysctls 2683 ************************************************************************/ 2684 static void 2685 ixgbe_add_device_sysctls(if_ctx_t ctx) 2686 { 2687 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2688 device_t dev = iflib_get_dev(ctx); 2689 struct ixgbe_hw *hw = &sc->hw; 2690 struct sysctl_oid_list *child; 2691 struct sysctl_ctx_list *ctx_list; 2692 2693 ctx_list = device_get_sysctl_ctx(dev); 2694 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2695 2696 /* Sysctls for all devices */ 2697 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2698 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2699 sc, 0, ixgbe_sysctl_flowcntl, "I", 2700 IXGBE_SYSCTL_DESC_SET_FC); 2701 2702 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2703 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2704 sc, 0, ixgbe_sysctl_advertise, "I", 2705 IXGBE_SYSCTL_DESC_ADV_SPEED); 2706 2707 sc->enable_aim = ixgbe_enable_aim; 2708 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2709 &sc->enable_aim, 0, "Interrupt Moderation"); 2710 2711 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", 2712 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2713 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); 2714 2715 #ifdef IXGBE_DEBUG 2716 /* testing sysctls (for all devices) */ 2717 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2718 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2719 sc, 0, ixgbe_sysctl_power_state, 2720 "I", "PCI Power State"); 2721 2722 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2723 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2724 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2725 #endif 2726 /* for X550 series devices */ 2727 if (hw->mac.type >= ixgbe_mac_X550) 2728 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2729 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2730 sc, 0, ixgbe_sysctl_dmac, 2731 "I", "DMA Coalesce"); 2732 2733 /* for WoL-capable devices */ 2734 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2735 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2736 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2737 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2738 2739 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2740 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2741 sc, 0, ixgbe_sysctl_wufc, 2742 "I", "Enable/Disable Wake Up Filters"); 2743 } 2744 2745 /* for X552/X557-AT devices */ 2746 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2747 struct sysctl_oid *phy_node; 2748 struct sysctl_oid_list *phy_list; 2749 2750 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2751 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); 2752 phy_list = SYSCTL_CHILDREN(phy_node); 2753 2754 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2755 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2756 sc, 0, ixgbe_sysctl_phy_temp, 2757 "I", "Current External PHY Temperature (Celsius)"); 2758 2759 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2760 "overtemp_occurred", 2761 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2762 ixgbe_sysctl_phy_overtemp_occurred, "I", 2763 "External PHY High Temperature Event Occurred"); 2764 } 2765 2766 if (sc->feat_cap & IXGBE_FEATURE_EEE) { 2767 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2768 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2769 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2770 } 2771 } /* ixgbe_add_device_sysctls */ 2772 2773 /************************************************************************ 2774 * ixgbe_allocate_pci_resources 2775 ************************************************************************/ 2776 static int 2777 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2778 { 2779 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2780 device_t dev = iflib_get_dev(ctx); 2781 int rid; 2782 2783 rid = PCIR_BAR(0); 2784 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2785 RF_ACTIVE); 2786 2787 if (!(sc->pci_mem)) { 2788 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2789 return (ENXIO); 2790 } 2791 2792 /* Save bus_space values for READ/WRITE_REG macros */ 2793 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 2794 sc->osdep.mem_bus_space_handle = 2795 rman_get_bushandle(sc->pci_mem); 2796 /* Set hw values for shared code */ 2797 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 2798 2799 return (0); 2800 } /* ixgbe_allocate_pci_resources */ 2801 2802 /************************************************************************ 2803 * ixgbe_detach - Device removal routine 2804 * 2805 * Called when the driver is being removed. 2806 * Stops the adapter and deallocates all the resources 2807 * that were allocated for driver operation. 2808 * 2809 * return 0 on success, positive on failure 2810 ************************************************************************/ 2811 static int 2812 ixgbe_if_detach(if_ctx_t ctx) 2813 { 2814 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2815 device_t dev = iflib_get_dev(ctx); 2816 u32 ctrl_ext; 2817 2818 INIT_DEBUGOUT("ixgbe_detach: begin"); 2819 2820 if (ixgbe_pci_iov_detach(dev) != 0) { 2821 device_printf(dev, "SR-IOV in use; detach first.\n"); 2822 return (EBUSY); 2823 } 2824 2825 ixgbe_setup_low_power_mode(ctx); 2826 2827 /* let hardware know driver is unloading */ 2828 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 2829 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2830 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 2831 2832 ixgbe_free_pci_resources(ctx); 2833 free(sc->mta, M_IXGBE); 2834 2835 return (0); 2836 } /* ixgbe_if_detach */ 2837 2838 /************************************************************************ 2839 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2840 * 2841 * Prepare the adapter/port for LPLU and/or WoL 2842 ************************************************************************/ 2843 static int 2844 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2845 { 2846 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2847 struct ixgbe_hw *hw = &sc->hw; 2848 device_t dev = iflib_get_dev(ctx); 2849 s32 error = 0; 2850 2851 if (!hw->wol_enabled) 2852 ixgbe_set_phy_power(hw, false); 2853 2854 /* Limit power management flow to X550EM baseT */ 2855 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2856 hw->phy.ops.enter_lplu) { 2857 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2858 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2859 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2860 2861 /* 2862 * Clear Wake Up Status register to prevent any previous wakeup 2863 * events from waking us up immediately after we suspend. 2864 */ 2865 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2866 2867 /* 2868 * Program the Wakeup Filter Control register with user filter 2869 * settings 2870 */ 2871 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 2872 2873 /* Enable wakeups and power management in Wakeup Control */ 2874 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2875 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2876 2877 /* X550EM baseT adapters need a special LPLU flow */ 2878 hw->phy.reset_disable = true; 2879 ixgbe_if_stop(ctx); 2880 error = hw->phy.ops.enter_lplu(hw); 2881 if (error) 2882 device_printf(dev, "Error entering LPLU: %d\n", error); 2883 hw->phy.reset_disable = false; 2884 } else { 2885 /* Just stop for other adapters */ 2886 ixgbe_if_stop(ctx); 2887 } 2888 2889 return error; 2890 } /* ixgbe_setup_low_power_mode */ 2891 2892 /************************************************************************ 2893 * ixgbe_shutdown - Shutdown entry point 2894 ************************************************************************/ 2895 static int 2896 ixgbe_if_shutdown(if_ctx_t ctx) 2897 { 2898 int error = 0; 2899 2900 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2901 2902 error = ixgbe_setup_low_power_mode(ctx); 2903 2904 return (error); 2905 } /* ixgbe_if_shutdown */ 2906 2907 /************************************************************************ 2908 * ixgbe_suspend 2909 * 2910 * From D0 to D3 2911 ************************************************************************/ 2912 static int 2913 ixgbe_if_suspend(if_ctx_t ctx) 2914 { 2915 int error = 0; 2916 2917 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2918 2919 error = ixgbe_setup_low_power_mode(ctx); 2920 2921 return (error); 2922 } /* ixgbe_if_suspend */ 2923 2924 /************************************************************************ 2925 * ixgbe_resume 2926 * 2927 * From D3 to D0 2928 ************************************************************************/ 2929 static int 2930 ixgbe_if_resume(if_ctx_t ctx) 2931 { 2932 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2933 device_t dev = iflib_get_dev(ctx); 2934 if_t ifp = iflib_get_ifp(ctx); 2935 struct ixgbe_hw *hw = &sc->hw; 2936 u32 wus; 2937 2938 INIT_DEBUGOUT("ixgbe_resume: begin"); 2939 2940 /* Read & clear WUS register */ 2941 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2942 if (wus) 2943 device_printf(dev, "Woken up by (WUS): %#010x\n", 2944 IXGBE_READ_REG(hw, IXGBE_WUS)); 2945 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2946 /* And clear WUFC until next low-power transition */ 2947 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2948 2949 /* 2950 * Required after D3->D0 transition; 2951 * will re-advertise all previous advertised speeds 2952 */ 2953 if (if_getflags(ifp) & IFF_UP) 2954 ixgbe_if_init(ctx); 2955 2956 return (0); 2957 } /* ixgbe_if_resume */ 2958 2959 /************************************************************************ 2960 * ixgbe_if_mtu_set - Ioctl mtu entry point 2961 * 2962 * Return 0 on success, EINVAL on failure 2963 ************************************************************************/ 2964 static int 2965 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2966 { 2967 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2968 int error = 0; 2969 2970 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2971 2972 if (mtu > IXGBE_MAX_MTU) { 2973 error = EINVAL; 2974 } else { 2975 sc->max_frame_size = mtu + IXGBE_MTU_HDR; 2976 } 2977 2978 return error; 2979 } /* ixgbe_if_mtu_set */ 2980 2981 /************************************************************************ 2982 * ixgbe_if_crcstrip_set 2983 ************************************************************************/ 2984 static void 2985 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2986 { 2987 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2988 struct ixgbe_hw *hw = &sc->hw; 2989 /* crc stripping is set in two places: 2990 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2991 * IXGBE_RDRXCTL (set by the original driver in 2992 * ixgbe_setup_hw_rsc() called in init_locked. 2993 * We disable the setting when netmap is compiled in). 2994 * We update the values here, but also in ixgbe.c because 2995 * init_locked sometimes is called outside our control. 2996 */ 2997 uint32_t hl, rxc; 2998 2999 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3000 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 3001 #ifdef NETMAP 3002 if (netmap_verbose) 3003 D("%s read HLREG 0x%x rxc 0x%x", 3004 onoff ? "enter" : "exit", hl, rxc); 3005 #endif 3006 /* hw requirements ... */ 3007 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 3008 rxc |= IXGBE_RDRXCTL_RSCACKC; 3009 if (onoff && !crcstrip) { 3010 /* keep the crc. Fast rx */ 3011 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 3012 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 3013 } else { 3014 /* reset default mode */ 3015 hl |= IXGBE_HLREG0_RXCRCSTRP; 3016 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 3017 } 3018 #ifdef NETMAP 3019 if (netmap_verbose) 3020 D("%s write HLREG 0x%x rxc 0x%x", 3021 onoff ? "enter" : "exit", hl, rxc); 3022 #endif 3023 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 3024 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 3025 } /* ixgbe_if_crcstrip_set */ 3026 3027 /********************************************************************* 3028 * ixgbe_if_init - Init entry point 3029 * 3030 * Used in two ways: It is used by the stack as an init 3031 * entry point in network interface structure. It is also 3032 * used by the driver as a hw/sw initialization routine to 3033 * get to a consistent state. 3034 * 3035 * Return 0 on success, positive on failure 3036 **********************************************************************/ 3037 void 3038 ixgbe_if_init(if_ctx_t ctx) 3039 { 3040 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3041 if_t ifp = iflib_get_ifp(ctx); 3042 device_t dev = iflib_get_dev(ctx); 3043 struct ixgbe_hw *hw = &sc->hw; 3044 struct ix_rx_queue *rx_que; 3045 struct ix_tx_queue *tx_que; 3046 u32 txdctl, mhadd; 3047 u32 rxdctl, rxctrl; 3048 u32 ctrl_ext; 3049 3050 int i, j, err; 3051 3052 INIT_DEBUGOUT("ixgbe_if_init: begin"); 3053 3054 /* Queue indices may change with IOV mode */ 3055 ixgbe_align_all_queue_indices(sc); 3056 3057 /* reprogram the RAR[0] in case user changed it. */ 3058 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 3059 3060 /* Get the latest mac address, User can use a LAA */ 3061 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3062 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 3063 hw->addr_ctrl.rar_used_count = 1; 3064 3065 ixgbe_init_hw(hw); 3066 3067 ixgbe_initialize_iov(sc); 3068 3069 ixgbe_initialize_transmit_units(ctx); 3070 3071 /* Setup Multicast table */ 3072 ixgbe_if_multi_set(ctx); 3073 3074 /* Determine the correct mbuf pool, based on frame size */ 3075 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 3076 3077 /* Configure RX settings */ 3078 ixgbe_initialize_receive_units(ctx); 3079 3080 /* 3081 * Initialize variable holding task enqueue requests 3082 * from MSI-X interrupts 3083 */ 3084 sc->task_requests = 0; 3085 3086 /* Enable SDP & MSI-X interrupts based on adapter */ 3087 ixgbe_config_gpie(sc); 3088 3089 /* Set MTU size */ 3090 if (if_getmtu(ifp) > ETHERMTU) { 3091 /* aka IXGBE_MAXFRS on 82599 and newer */ 3092 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3093 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3094 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3095 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3096 } 3097 3098 /* Now enable all the queues */ 3099 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 3100 struct tx_ring *txr = &tx_que->txr; 3101 3102 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3103 txdctl |= IXGBE_TXDCTL_ENABLE; 3104 /* Set WTHRESH to 8, burst writeback */ 3105 txdctl |= (8 << 16); 3106 /* 3107 * When the internal queue falls below PTHRESH (32), 3108 * start prefetching as long as there are at least 3109 * HTHRESH (1) buffers ready. The values are taken 3110 * from the Intel linux driver 3.8.21. 3111 * Prefetching enables tx line rate even with 1 queue. 3112 */ 3113 txdctl |= (32 << 0) | (1 << 8); 3114 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 3115 } 3116 3117 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 3118 struct rx_ring *rxr = &rx_que->rxr; 3119 3120 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3121 if (hw->mac.type == ixgbe_mac_82598EB) { 3122 /* 3123 * PTHRESH = 21 3124 * HTHRESH = 4 3125 * WTHRESH = 8 3126 */ 3127 rxdctl &= ~0x3FFFFF; 3128 rxdctl |= 0x080420; 3129 } 3130 rxdctl |= IXGBE_RXDCTL_ENABLE; 3131 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 3132 for (j = 0; j < 10; j++) { 3133 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 3134 IXGBE_RXDCTL_ENABLE) 3135 break; 3136 else 3137 msec_delay(1); 3138 } 3139 wmb(); 3140 } 3141 3142 /* Enable Receive engine */ 3143 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3144 if (hw->mac.type == ixgbe_mac_82598EB) 3145 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3146 rxctrl |= IXGBE_RXCTRL_RXEN; 3147 ixgbe_enable_rx_dma(hw, rxctrl); 3148 3149 /* Set up MSI/MSI-X routing */ 3150 if (ixgbe_enable_msix) { 3151 ixgbe_configure_ivars(sc); 3152 /* Set up auto-mask */ 3153 if (hw->mac.type == ixgbe_mac_82598EB) 3154 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3155 else { 3156 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3157 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3158 } 3159 } else { /* Simple settings for Legacy/MSI */ 3160 ixgbe_set_ivar(sc, 0, 0, 0); 3161 ixgbe_set_ivar(sc, 0, 0, 1); 3162 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3163 } 3164 3165 ixgbe_init_fdir(sc); 3166 3167 /* 3168 * Check on any SFP devices that 3169 * need to be kick-started 3170 */ 3171 if (hw->phy.type == ixgbe_phy_none) { 3172 err = hw->phy.ops.identify(hw); 3173 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3174 device_printf(dev, 3175 "Unsupported SFP+ module type was detected.\n"); 3176 return; 3177 } 3178 } 3179 3180 /* Set moderation on the Link interrupt */ 3181 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); 3182 3183 /* Enable power to the phy. */ 3184 ixgbe_set_phy_power(hw, true); 3185 3186 /* Config/Enable Link */ 3187 ixgbe_config_link(ctx); 3188 3189 /* Hardware Packet Buffer & Flow Control setup */ 3190 ixgbe_config_delay_values(sc); 3191 3192 /* Initialize the FC settings */ 3193 ixgbe_start_hw(hw); 3194 3195 /* Set up VLAN support and filter */ 3196 ixgbe_setup_vlan_hw_support(ctx); 3197 3198 /* Setup DMA Coalescing */ 3199 ixgbe_config_dmac(sc); 3200 3201 /* And now turn on interrupts */ 3202 ixgbe_if_enable_intr(ctx); 3203 3204 /* Enable the use of the MBX by the VF's */ 3205 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 3206 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3207 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3208 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3209 } 3210 3211 } /* ixgbe_init_locked */ 3212 3213 /************************************************************************ 3214 * ixgbe_set_ivar 3215 * 3216 * Setup the correct IVAR register for a particular MSI-X interrupt 3217 * (yes this is all very magic and confusing :) 3218 * - entry is the register array entry 3219 * - vector is the MSI-X vector for this queue 3220 * - type is RX/TX/MISC 3221 ************************************************************************/ 3222 static void 3223 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 3224 { 3225 struct ixgbe_hw *hw = &sc->hw; 3226 u32 ivar, index; 3227 3228 vector |= IXGBE_IVAR_ALLOC_VAL; 3229 3230 switch (hw->mac.type) { 3231 case ixgbe_mac_82598EB: 3232 if (type == -1) 3233 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3234 else 3235 entry += (type * 64); 3236 index = (entry >> 2) & 0x1F; 3237 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3238 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3239 ivar |= (vector << (8 * (entry & 0x3))); 3240 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 3241 break; 3242 case ixgbe_mac_82599EB: 3243 case ixgbe_mac_X540: 3244 case ixgbe_mac_X550: 3245 case ixgbe_mac_X550EM_x: 3246 case ixgbe_mac_X550EM_a: 3247 if (type == -1) { /* MISC IVAR */ 3248 index = (entry & 1) * 8; 3249 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3250 ivar &= ~(0xFF << index); 3251 ivar |= (vector << index); 3252 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3253 } else { /* RX/TX IVARS */ 3254 index = (16 * (entry & 1)) + (8 * type); 3255 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3256 ivar &= ~(0xFF << index); 3257 ivar |= (vector << index); 3258 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3259 } 3260 default: 3261 break; 3262 } 3263 } /* ixgbe_set_ivar */ 3264 3265 /************************************************************************ 3266 * ixgbe_configure_ivars 3267 ************************************************************************/ 3268 static void 3269 ixgbe_configure_ivars(struct ixgbe_softc *sc) 3270 { 3271 struct ix_rx_queue *rx_que = sc->rx_queues; 3272 struct ix_tx_queue *tx_que = sc->tx_queues; 3273 u32 newitr; 3274 3275 if (ixgbe_max_interrupt_rate > 0) 3276 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3277 else { 3278 /* 3279 * Disable DMA coalescing if interrupt moderation is 3280 * disabled. 3281 */ 3282 sc->dmac = 0; 3283 newitr = 0; 3284 } 3285 3286 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 3287 struct rx_ring *rxr = &rx_que->rxr; 3288 3289 /* First the RX queue entry */ 3290 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); 3291 3292 /* Set an Initial EITR value */ 3293 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); 3294 } 3295 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 3296 struct tx_ring *txr = &tx_que->txr; 3297 3298 /* ... and the TX */ 3299 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); 3300 } 3301 /* For the Link interrupt */ 3302 ixgbe_set_ivar(sc, 1, sc->vector, -1); 3303 } /* ixgbe_configure_ivars */ 3304 3305 /************************************************************************ 3306 * ixgbe_config_gpie 3307 ************************************************************************/ 3308 static void 3309 ixgbe_config_gpie(struct ixgbe_softc *sc) 3310 { 3311 struct ixgbe_hw *hw = &sc->hw; 3312 u32 gpie; 3313 3314 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3315 3316 if (sc->intr_type == IFLIB_INTR_MSIX) { 3317 /* Enable Enhanced MSI-X mode */ 3318 gpie |= IXGBE_GPIE_MSIX_MODE 3319 | IXGBE_GPIE_EIAME 3320 | IXGBE_GPIE_PBA_SUPPORT 3321 | IXGBE_GPIE_OCD; 3322 } 3323 3324 /* Fan Failure Interrupt */ 3325 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3326 gpie |= IXGBE_SDP1_GPIEN; 3327 3328 /* Thermal Sensor Interrupt */ 3329 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3330 gpie |= IXGBE_SDP0_GPIEN_X540; 3331 3332 /* Link detection */ 3333 switch (hw->mac.type) { 3334 case ixgbe_mac_82599EB: 3335 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3336 break; 3337 case ixgbe_mac_X550EM_x: 3338 case ixgbe_mac_X550EM_a: 3339 gpie |= IXGBE_SDP0_GPIEN_X540; 3340 break; 3341 default: 3342 break; 3343 } 3344 3345 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3346 3347 } /* ixgbe_config_gpie */ 3348 3349 /************************************************************************ 3350 * ixgbe_config_delay_values 3351 * 3352 * Requires sc->max_frame_size to be set. 3353 ************************************************************************/ 3354 static void 3355 ixgbe_config_delay_values(struct ixgbe_softc *sc) 3356 { 3357 struct ixgbe_hw *hw = &sc->hw; 3358 u32 rxpb, frame, size, tmp; 3359 3360 frame = sc->max_frame_size; 3361 3362 /* Calculate High Water */ 3363 switch (hw->mac.type) { 3364 case ixgbe_mac_X540: 3365 case ixgbe_mac_X550: 3366 case ixgbe_mac_X550EM_x: 3367 case ixgbe_mac_X550EM_a: 3368 tmp = IXGBE_DV_X540(frame, frame); 3369 break; 3370 default: 3371 tmp = IXGBE_DV(frame, frame); 3372 break; 3373 } 3374 size = IXGBE_BT2KB(tmp); 3375 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3376 hw->fc.high_water[0] = rxpb - size; 3377 3378 /* Now calculate Low Water */ 3379 switch (hw->mac.type) { 3380 case ixgbe_mac_X540: 3381 case ixgbe_mac_X550: 3382 case ixgbe_mac_X550EM_x: 3383 case ixgbe_mac_X550EM_a: 3384 tmp = IXGBE_LOW_DV_X540(frame); 3385 break; 3386 default: 3387 tmp = IXGBE_LOW_DV(frame); 3388 break; 3389 } 3390 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3391 3392 hw->fc.pause_time = IXGBE_FC_PAUSE; 3393 hw->fc.send_xon = true; 3394 } /* ixgbe_config_delay_values */ 3395 3396 /************************************************************************ 3397 * ixgbe_set_multi - Multicast Update 3398 * 3399 * Called whenever multicast address list is updated. 3400 ************************************************************************/ 3401 static u_int 3402 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) 3403 { 3404 struct ixgbe_softc *sc = arg; 3405 struct ixgbe_mc_addr *mta = sc->mta; 3406 3407 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 3408 return (0); 3409 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3410 mta[idx].vmdq = sc->pool; 3411 3412 return (1); 3413 } /* ixgbe_mc_filter_apply */ 3414 3415 static void 3416 ixgbe_if_multi_set(if_ctx_t ctx) 3417 { 3418 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3419 struct ixgbe_mc_addr *mta; 3420 if_t ifp = iflib_get_ifp(ctx); 3421 u8 *update_ptr; 3422 u32 fctrl; 3423 u_int mcnt; 3424 3425 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3426 3427 mta = sc->mta; 3428 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3429 3430 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc); 3431 3432 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3433 update_ptr = (u8 *)mta; 3434 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 3435 ixgbe_mc_array_itr, true); 3436 } 3437 3438 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 3439 3440 if (if_getflags(ifp) & IFF_PROMISC) 3441 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3442 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3443 if_getflags(ifp) & IFF_ALLMULTI) { 3444 fctrl |= IXGBE_FCTRL_MPE; 3445 fctrl &= ~IXGBE_FCTRL_UPE; 3446 } else 3447 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3448 3449 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 3450 } /* ixgbe_if_multi_set */ 3451 3452 /************************************************************************ 3453 * ixgbe_mc_array_itr 3454 * 3455 * An iterator function needed by the multicast shared code. 3456 * It feeds the shared code routine the addresses in the 3457 * array of ixgbe_set_multi() one by one. 3458 ************************************************************************/ 3459 static u8 * 3460 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3461 { 3462 struct ixgbe_mc_addr *mta; 3463 3464 mta = (struct ixgbe_mc_addr *)*update_ptr; 3465 *vmdq = mta->vmdq; 3466 3467 *update_ptr = (u8*)(mta + 1); 3468 3469 return (mta->addr); 3470 } /* ixgbe_mc_array_itr */ 3471 3472 /************************************************************************ 3473 * ixgbe_local_timer - Timer routine 3474 * 3475 * Checks for link status, updates statistics, 3476 * and runs the watchdog check. 3477 ************************************************************************/ 3478 static void 3479 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3480 { 3481 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3482 3483 if (qid != 0) 3484 return; 3485 3486 /* Check for pluggable optics */ 3487 if (sc->sfp_probe) 3488 if (!ixgbe_sfp_probe(ctx)) 3489 return; /* Nothing to do */ 3490 3491 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3492 3493 /* Fire off the adminq task */ 3494 iflib_admin_intr_deferred(ctx); 3495 3496 } /* ixgbe_if_timer */ 3497 3498 /************************************************************************ 3499 * ixgbe_sfp_probe 3500 * 3501 * Determine if a port had optics inserted. 3502 ************************************************************************/ 3503 static bool 3504 ixgbe_sfp_probe(if_ctx_t ctx) 3505 { 3506 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3507 struct ixgbe_hw *hw = &sc->hw; 3508 device_t dev = iflib_get_dev(ctx); 3509 bool result = false; 3510 3511 if ((hw->phy.type == ixgbe_phy_nl) && 3512 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3513 s32 ret = hw->phy.ops.identify_sfp(hw); 3514 if (ret) 3515 goto out; 3516 ret = hw->phy.ops.reset(hw); 3517 sc->sfp_probe = false; 3518 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3519 device_printf(dev, "Unsupported SFP+ module detected!"); 3520 device_printf(dev, 3521 "Reload driver with supported module.\n"); 3522 goto out; 3523 } else 3524 device_printf(dev, "SFP+ module detected!\n"); 3525 /* We now have supported optics */ 3526 result = true; 3527 } 3528 out: 3529 3530 return (result); 3531 } /* ixgbe_sfp_probe */ 3532 3533 /************************************************************************ 3534 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3535 ************************************************************************/ 3536 static void 3537 ixgbe_handle_mod(void *context) 3538 { 3539 if_ctx_t ctx = context; 3540 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3541 struct ixgbe_hw *hw = &sc->hw; 3542 device_t dev = iflib_get_dev(ctx); 3543 u32 err, cage_full = 0; 3544 3545 if (sc->hw.need_crosstalk_fix) { 3546 switch (hw->mac.type) { 3547 case ixgbe_mac_82599EB: 3548 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3549 IXGBE_ESDP_SDP2; 3550 break; 3551 case ixgbe_mac_X550EM_x: 3552 case ixgbe_mac_X550EM_a: 3553 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3554 IXGBE_ESDP_SDP0; 3555 break; 3556 default: 3557 break; 3558 } 3559 3560 if (!cage_full) 3561 goto handle_mod_out; 3562 } 3563 3564 err = hw->phy.ops.identify_sfp(hw); 3565 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3566 device_printf(dev, 3567 "Unsupported SFP+ module type was detected.\n"); 3568 goto handle_mod_out; 3569 } 3570 3571 if (hw->mac.type == ixgbe_mac_82598EB) 3572 err = hw->phy.ops.reset(hw); 3573 else 3574 err = hw->mac.ops.setup_sfp(hw); 3575 3576 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3577 device_printf(dev, 3578 "Setup failure - unsupported SFP+ module type.\n"); 3579 goto handle_mod_out; 3580 } 3581 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3582 return; 3583 3584 handle_mod_out: 3585 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3586 } /* ixgbe_handle_mod */ 3587 3588 3589 /************************************************************************ 3590 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3591 ************************************************************************/ 3592 static void 3593 ixgbe_handle_msf(void *context) 3594 { 3595 if_ctx_t ctx = context; 3596 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3597 struct ixgbe_hw *hw = &sc->hw; 3598 u32 autoneg; 3599 bool negotiate; 3600 3601 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3602 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 3603 3604 autoneg = hw->phy.autoneg_advertised; 3605 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3606 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3607 if (hw->mac.ops.setup_link) 3608 hw->mac.ops.setup_link(hw, autoneg, true); 3609 3610 /* Adjust media types shown in ifconfig */ 3611 ifmedia_removeall(sc->media); 3612 ixgbe_add_media_types(sc->ctx); 3613 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 3614 } /* ixgbe_handle_msf */ 3615 3616 /************************************************************************ 3617 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3618 ************************************************************************/ 3619 static void 3620 ixgbe_handle_phy(void *context) 3621 { 3622 if_ctx_t ctx = context; 3623 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3624 struct ixgbe_hw *hw = &sc->hw; 3625 int error; 3626 3627 error = hw->phy.ops.handle_lasi(hw); 3628 if (error == IXGBE_ERR_OVERTEMP) 3629 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3630 else if (error) 3631 device_printf(sc->dev, 3632 "Error handling LASI interrupt: %d\n", error); 3633 } /* ixgbe_handle_phy */ 3634 3635 /************************************************************************ 3636 * ixgbe_if_stop - Stop the hardware 3637 * 3638 * Disables all traffic on the adapter by issuing a 3639 * global reset on the MAC and deallocates TX/RX buffers. 3640 ************************************************************************/ 3641 static void 3642 ixgbe_if_stop(if_ctx_t ctx) 3643 { 3644 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3645 struct ixgbe_hw *hw = &sc->hw; 3646 3647 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3648 3649 ixgbe_reset_hw(hw); 3650 hw->adapter_stopped = false; 3651 ixgbe_stop_adapter(hw); 3652 if (hw->mac.type == ixgbe_mac_82599EB) 3653 ixgbe_stop_mac_link_on_d3_82599(hw); 3654 /* Turn off the laser - noop with no optics */ 3655 ixgbe_disable_tx_laser(hw); 3656 3657 /* Update the stack */ 3658 sc->link_up = false; 3659 ixgbe_if_update_admin_status(ctx); 3660 3661 /* reprogram the RAR[0] in case user changed it. */ 3662 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 3663 3664 return; 3665 } /* ixgbe_if_stop */ 3666 3667 /************************************************************************ 3668 * ixgbe_update_link_status - Update OS on link state 3669 * 3670 * Note: Only updates the OS on the cached link state. 3671 * The real check of the hardware only happens with 3672 * a link interrupt. 3673 ************************************************************************/ 3674 static void 3675 ixgbe_if_update_admin_status(if_ctx_t ctx) 3676 { 3677 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3678 device_t dev = iflib_get_dev(ctx); 3679 3680 if (sc->link_up) { 3681 if (sc->link_active == false) { 3682 if (bootverbose) 3683 device_printf(dev, "Link is up %d Gbps %s \n", 3684 ((sc->link_speed == 128) ? 10 : 1), 3685 "Full Duplex"); 3686 sc->link_active = true; 3687 /* Update any Flow Control changes */ 3688 ixgbe_fc_enable(&sc->hw); 3689 /* Update DMA coalescing config */ 3690 ixgbe_config_dmac(sc); 3691 iflib_link_state_change(ctx, LINK_STATE_UP, 3692 ixgbe_link_speed_to_baudrate(sc->link_speed)); 3693 3694 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3695 ixgbe_ping_all_vfs(sc); 3696 } 3697 } else { /* Link down */ 3698 if (sc->link_active == true) { 3699 if (bootverbose) 3700 device_printf(dev, "Link is Down\n"); 3701 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3702 sc->link_active = false; 3703 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3704 ixgbe_ping_all_vfs(sc); 3705 } 3706 } 3707 3708 /* Handle task requests from msix_link() */ 3709 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) 3710 ixgbe_handle_mod(ctx); 3711 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) 3712 ixgbe_handle_msf(ctx); 3713 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) 3714 ixgbe_handle_mbx(ctx); 3715 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) 3716 ixgbe_reinit_fdir(ctx); 3717 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) 3718 ixgbe_handle_phy(ctx); 3719 sc->task_requests = 0; 3720 3721 ixgbe_update_stats_counters(sc); 3722 } /* ixgbe_if_update_admin_status */ 3723 3724 /************************************************************************ 3725 * ixgbe_config_dmac - Configure DMA Coalescing 3726 ************************************************************************/ 3727 static void 3728 ixgbe_config_dmac(struct ixgbe_softc *sc) 3729 { 3730 struct ixgbe_hw *hw = &sc->hw; 3731 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3732 3733 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3734 return; 3735 3736 if (dcfg->watchdog_timer ^ sc->dmac || 3737 dcfg->link_speed ^ sc->link_speed) { 3738 dcfg->watchdog_timer = sc->dmac; 3739 dcfg->fcoe_en = false; 3740 dcfg->link_speed = sc->link_speed; 3741 dcfg->num_tcs = 1; 3742 3743 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3744 dcfg->watchdog_timer, dcfg->link_speed); 3745 3746 hw->mac.ops.dmac_config(hw); 3747 } 3748 } /* ixgbe_config_dmac */ 3749 3750 /************************************************************************ 3751 * ixgbe_if_enable_intr 3752 ************************************************************************/ 3753 void 3754 ixgbe_if_enable_intr(if_ctx_t ctx) 3755 { 3756 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3757 struct ixgbe_hw *hw = &sc->hw; 3758 struct ix_rx_queue *que = sc->rx_queues; 3759 u32 mask, fwsm; 3760 3761 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3762 3763 switch (sc->hw.mac.type) { 3764 case ixgbe_mac_82599EB: 3765 mask |= IXGBE_EIMS_ECC; 3766 /* Temperature sensor on some scs */ 3767 mask |= IXGBE_EIMS_GPI_SDP0; 3768 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3769 mask |= IXGBE_EIMS_GPI_SDP1; 3770 mask |= IXGBE_EIMS_GPI_SDP2; 3771 break; 3772 case ixgbe_mac_X540: 3773 /* Detect if Thermal Sensor is enabled */ 3774 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3775 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3776 mask |= IXGBE_EIMS_TS; 3777 mask |= IXGBE_EIMS_ECC; 3778 break; 3779 case ixgbe_mac_X550: 3780 /* MAC thermal sensor is automatically enabled */ 3781 mask |= IXGBE_EIMS_TS; 3782 mask |= IXGBE_EIMS_ECC; 3783 break; 3784 case ixgbe_mac_X550EM_x: 3785 case ixgbe_mac_X550EM_a: 3786 /* Some devices use SDP0 for important information */ 3787 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3788 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3789 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3790 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3791 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3792 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3793 mask |= IXGBE_EICR_GPI_SDP0_X540; 3794 mask |= IXGBE_EIMS_ECC; 3795 break; 3796 default: 3797 break; 3798 } 3799 3800 /* Enable Fan Failure detection */ 3801 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3802 mask |= IXGBE_EIMS_GPI_SDP1; 3803 /* Enable SR-IOV */ 3804 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3805 mask |= IXGBE_EIMS_MAILBOX; 3806 /* Enable Flow Director */ 3807 if (sc->feat_en & IXGBE_FEATURE_FDIR) 3808 mask |= IXGBE_EIMS_FLOW_DIR; 3809 3810 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3811 3812 /* With MSI-X we use auto clear */ 3813 if (sc->intr_type == IFLIB_INTR_MSIX) { 3814 mask = IXGBE_EIMS_ENABLE_MASK; 3815 /* Don't autoclear Link */ 3816 mask &= ~IXGBE_EIMS_OTHER; 3817 mask &= ~IXGBE_EIMS_LSC; 3818 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 3819 mask &= ~IXGBE_EIMS_MAILBOX; 3820 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3821 } 3822 3823 /* 3824 * Now enable all queues, this is done separately to 3825 * allow for handling the extended (beyond 32) MSI-X 3826 * vectors that can be used by 82599 3827 */ 3828 for (int i = 0; i < sc->num_rx_queues; i++, que++) 3829 ixgbe_enable_queue(sc, que->msix); 3830 3831 IXGBE_WRITE_FLUSH(hw); 3832 3833 } /* ixgbe_if_enable_intr */ 3834 3835 /************************************************************************ 3836 * ixgbe_disable_intr 3837 ************************************************************************/ 3838 static void 3839 ixgbe_if_disable_intr(if_ctx_t ctx) 3840 { 3841 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3842 3843 if (sc->intr_type == IFLIB_INTR_MSIX) 3844 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 3845 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 3846 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 3847 } else { 3848 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 3849 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 3850 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 3851 } 3852 IXGBE_WRITE_FLUSH(&sc->hw); 3853 3854 } /* ixgbe_if_disable_intr */ 3855 3856 /************************************************************************ 3857 * ixgbe_link_intr_enable 3858 ************************************************************************/ 3859 static void 3860 ixgbe_link_intr_enable(if_ctx_t ctx) 3861 { 3862 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; 3863 3864 /* Re-enable other interrupts */ 3865 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3866 } /* ixgbe_link_intr_enable */ 3867 3868 /************************************************************************ 3869 * ixgbe_if_rx_queue_intr_enable 3870 ************************************************************************/ 3871 static int 3872 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3873 { 3874 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3875 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 3876 3877 ixgbe_enable_queue(sc, que->msix); 3878 3879 return (0); 3880 } /* ixgbe_if_rx_queue_intr_enable */ 3881 3882 /************************************************************************ 3883 * ixgbe_enable_queue 3884 ************************************************************************/ 3885 static void 3886 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 3887 { 3888 struct ixgbe_hw *hw = &sc->hw; 3889 u64 queue = 1ULL << vector; 3890 u32 mask; 3891 3892 if (hw->mac.type == ixgbe_mac_82598EB) { 3893 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3894 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3895 } else { 3896 mask = (queue & 0xFFFFFFFF); 3897 if (mask) 3898 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3899 mask = (queue >> 32); 3900 if (mask) 3901 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3902 } 3903 } /* ixgbe_enable_queue */ 3904 3905 /************************************************************************ 3906 * ixgbe_disable_queue 3907 ************************************************************************/ 3908 static void 3909 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 3910 { 3911 struct ixgbe_hw *hw = &sc->hw; 3912 u64 queue = 1ULL << vector; 3913 u32 mask; 3914 3915 if (hw->mac.type == ixgbe_mac_82598EB) { 3916 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3917 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3918 } else { 3919 mask = (queue & 0xFFFFFFFF); 3920 if (mask) 3921 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3922 mask = (queue >> 32); 3923 if (mask) 3924 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3925 } 3926 } /* ixgbe_disable_queue */ 3927 3928 /************************************************************************ 3929 * ixgbe_intr - Legacy Interrupt Service Routine 3930 ************************************************************************/ 3931 int 3932 ixgbe_intr(void *arg) 3933 { 3934 struct ixgbe_softc *sc = arg; 3935 struct ix_rx_queue *que = sc->rx_queues; 3936 struct ixgbe_hw *hw = &sc->hw; 3937 if_ctx_t ctx = sc->ctx; 3938 u32 eicr, eicr_mask; 3939 3940 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3941 3942 ++que->irqs; 3943 if (eicr == 0) { 3944 ixgbe_if_enable_intr(ctx); 3945 return (FILTER_HANDLED); 3946 } 3947 3948 /* Check for fan failure */ 3949 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3950 (eicr & IXGBE_EICR_GPI_SDP1)) { 3951 device_printf(sc->dev, 3952 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3953 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3954 } 3955 3956 /* Link status change */ 3957 if (eicr & IXGBE_EICR_LSC) { 3958 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3959 iflib_admin_intr_deferred(ctx); 3960 } 3961 3962 if (ixgbe_is_sfp(hw)) { 3963 /* Pluggable optics-related interrupt */ 3964 if (hw->mac.type >= ixgbe_mac_X540) 3965 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3966 else 3967 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3968 3969 if (eicr & eicr_mask) { 3970 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3971 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 3972 } 3973 3974 if ((hw->mac.type == ixgbe_mac_82599EB) && 3975 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3976 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3977 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3978 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3979 } 3980 } 3981 3982 /* External PHY interrupt */ 3983 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3984 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3985 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 3986 3987 return (FILTER_SCHEDULE_THREAD); 3988 } /* ixgbe_intr */ 3989 3990 /************************************************************************ 3991 * ixgbe_free_pci_resources 3992 ************************************************************************/ 3993 static void 3994 ixgbe_free_pci_resources(if_ctx_t ctx) 3995 { 3996 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3997 struct ix_rx_queue *que = sc->rx_queues; 3998 device_t dev = iflib_get_dev(ctx); 3999 4000 /* Release all MSI-X queue resources */ 4001 if (sc->intr_type == IFLIB_INTR_MSIX) 4002 iflib_irq_free(ctx, &sc->irq); 4003 4004 if (que != NULL) { 4005 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 4006 iflib_irq_free(ctx, &que->que_irq); 4007 } 4008 } 4009 4010 if (sc->pci_mem != NULL) 4011 bus_release_resource(dev, SYS_RES_MEMORY, 4012 rman_get_rid(sc->pci_mem), sc->pci_mem); 4013 } /* ixgbe_free_pci_resources */ 4014 4015 /************************************************************************ 4016 * ixgbe_sysctl_flowcntl 4017 * 4018 * SYSCTL wrapper around setting Flow Control 4019 ************************************************************************/ 4020 static int 4021 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 4022 { 4023 struct ixgbe_softc *sc; 4024 int error, fc; 4025 4026 sc = (struct ixgbe_softc *)arg1; 4027 fc = sc->hw.fc.current_mode; 4028 4029 error = sysctl_handle_int(oidp, &fc, 0, req); 4030 if ((error) || (req->newptr == NULL)) 4031 return (error); 4032 4033 /* Don't bother if it's not changed */ 4034 if (fc == sc->hw.fc.current_mode) 4035 return (0); 4036 4037 return ixgbe_set_flowcntl(sc, fc); 4038 } /* ixgbe_sysctl_flowcntl */ 4039 4040 /************************************************************************ 4041 * ixgbe_set_flowcntl - Set flow control 4042 * 4043 * Flow control values: 4044 * 0 - off 4045 * 1 - rx pause 4046 * 2 - tx pause 4047 * 3 - full 4048 ************************************************************************/ 4049 static int 4050 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 4051 { 4052 switch (fc) { 4053 case ixgbe_fc_rx_pause: 4054 case ixgbe_fc_tx_pause: 4055 case ixgbe_fc_full: 4056 sc->hw.fc.requested_mode = fc; 4057 if (sc->num_rx_queues > 1) 4058 ixgbe_disable_rx_drop(sc); 4059 break; 4060 case ixgbe_fc_none: 4061 sc->hw.fc.requested_mode = ixgbe_fc_none; 4062 if (sc->num_rx_queues > 1) 4063 ixgbe_enable_rx_drop(sc); 4064 break; 4065 default: 4066 return (EINVAL); 4067 } 4068 4069 /* Don't autoneg if forcing a value */ 4070 sc->hw.fc.disable_fc_autoneg = true; 4071 ixgbe_fc_enable(&sc->hw); 4072 4073 return (0); 4074 } /* ixgbe_set_flowcntl */ 4075 4076 /************************************************************************ 4077 * ixgbe_enable_rx_drop 4078 * 4079 * Enable the hardware to drop packets when the buffer is 4080 * full. This is useful with multiqueue, so that no single 4081 * queue being full stalls the entire RX engine. We only 4082 * enable this when Multiqueue is enabled AND Flow Control 4083 * is disabled. 4084 ************************************************************************/ 4085 static void 4086 ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 4087 { 4088 struct ixgbe_hw *hw = &sc->hw; 4089 struct rx_ring *rxr; 4090 u32 srrctl; 4091 4092 for (int i = 0; i < sc->num_rx_queues; i++) { 4093 rxr = &sc->rx_queues[i].rxr; 4094 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4095 srrctl |= IXGBE_SRRCTL_DROP_EN; 4096 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4097 } 4098 4099 /* enable drop for each vf */ 4100 for (int i = 0; i < sc->num_vfs; i++) { 4101 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4102 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4103 IXGBE_QDE_ENABLE)); 4104 } 4105 } /* ixgbe_enable_rx_drop */ 4106 4107 /************************************************************************ 4108 * ixgbe_disable_rx_drop 4109 ************************************************************************/ 4110 static void 4111 ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 4112 { 4113 struct ixgbe_hw *hw = &sc->hw; 4114 struct rx_ring *rxr; 4115 u32 srrctl; 4116 4117 for (int i = 0; i < sc->num_rx_queues; i++) { 4118 rxr = &sc->rx_queues[i].rxr; 4119 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4120 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4121 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4122 } 4123 4124 /* disable drop for each vf */ 4125 for (int i = 0; i < sc->num_vfs; i++) { 4126 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4127 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4128 } 4129 } /* ixgbe_disable_rx_drop */ 4130 4131 /************************************************************************ 4132 * ixgbe_sysctl_advertise 4133 * 4134 * SYSCTL wrapper around setting advertised speed 4135 ************************************************************************/ 4136 static int 4137 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4138 { 4139 struct ixgbe_softc *sc; 4140 int error, advertise; 4141 4142 sc = (struct ixgbe_softc *)arg1; 4143 advertise = sc->advertise; 4144 4145 error = sysctl_handle_int(oidp, &advertise, 0, req); 4146 if ((error) || (req->newptr == NULL)) 4147 return (error); 4148 4149 return ixgbe_set_advertise(sc, advertise); 4150 } /* ixgbe_sysctl_advertise */ 4151 4152 /************************************************************************ 4153 * ixgbe_set_advertise - Control advertised link speed 4154 * 4155 * Flags: 4156 * 0x1 - advertise 100 Mb 4157 * 0x2 - advertise 1G 4158 * 0x4 - advertise 10G 4159 * 0x8 - advertise 10 Mb (yes, Mb) 4160 * 0x10 - advertise 2.5G (disabled by default) 4161 * 0x20 - advertise 5G (disabled by default) 4162 * 4163 ************************************************************************/ 4164 static int 4165 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 4166 { 4167 device_t dev = iflib_get_dev(sc->ctx); 4168 struct ixgbe_hw *hw; 4169 ixgbe_link_speed speed = 0; 4170 ixgbe_link_speed link_caps = 0; 4171 s32 err = IXGBE_NOT_IMPLEMENTED; 4172 bool negotiate = false; 4173 4174 /* Checks to validate new value */ 4175 if (sc->advertise == advertise) /* no change */ 4176 return (0); 4177 4178 hw = &sc->hw; 4179 4180 /* No speed changes for backplane media */ 4181 if (hw->phy.media_type == ixgbe_media_type_backplane) 4182 return (ENODEV); 4183 4184 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4185 (hw->phy.multispeed_fiber))) { 4186 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4187 return (EINVAL); 4188 } 4189 4190 if (advertise < 0x1 || advertise > 0x3F) { 4191 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n"); 4192 return (EINVAL); 4193 } 4194 4195 if (hw->mac.ops.get_link_capabilities) { 4196 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4197 &negotiate); 4198 if (err != IXGBE_SUCCESS) { 4199 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4200 return (ENODEV); 4201 } 4202 } 4203 4204 /* Set new value and report new advertised mode */ 4205 if (advertise & 0x1) { 4206 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4207 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4208 return (EINVAL); 4209 } 4210 speed |= IXGBE_LINK_SPEED_100_FULL; 4211 } 4212 if (advertise & 0x2) { 4213 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4214 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4215 return (EINVAL); 4216 } 4217 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4218 } 4219 if (advertise & 0x4) { 4220 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4221 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4222 return (EINVAL); 4223 } 4224 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4225 } 4226 if (advertise & 0x8) { 4227 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4228 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4229 return (EINVAL); 4230 } 4231 speed |= IXGBE_LINK_SPEED_10_FULL; 4232 } 4233 if (advertise & 0x10) { 4234 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 4235 device_printf(dev, "Interface does not support 2.5G advertised speed\n"); 4236 return (EINVAL); 4237 } 4238 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 4239 } 4240 if (advertise & 0x20) { 4241 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 4242 device_printf(dev, "Interface does not support 5G advertised speed\n"); 4243 return (EINVAL); 4244 } 4245 speed |= IXGBE_LINK_SPEED_5GB_FULL; 4246 } 4247 4248 hw->mac.autotry_restart = true; 4249 hw->mac.ops.setup_link(hw, speed, true); 4250 sc->advertise = advertise; 4251 4252 return (0); 4253 } /* ixgbe_set_advertise */ 4254 4255 /************************************************************************ 4256 * ixgbe_get_default_advertise - Get default advertised speed settings 4257 * 4258 * Formatted for sysctl usage. 4259 * Flags: 4260 * 0x1 - advertise 100 Mb 4261 * 0x2 - advertise 1G 4262 * 0x4 - advertise 10G 4263 * 0x8 - advertise 10 Mb (yes, Mb) 4264 * 0x10 - advertise 2.5G (disabled by default) 4265 * 0x20 - advertise 5G (disabled by default) 4266 ************************************************************************/ 4267 static int 4268 ixgbe_get_default_advertise(struct ixgbe_softc *sc) 4269 { 4270 struct ixgbe_hw *hw = &sc->hw; 4271 int speed; 4272 ixgbe_link_speed link_caps = 0; 4273 s32 err; 4274 bool negotiate = false; 4275 4276 /* 4277 * Advertised speed means nothing unless it's copper or 4278 * multi-speed fiber 4279 */ 4280 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4281 !(hw->phy.multispeed_fiber)) 4282 return (0); 4283 4284 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4285 if (err != IXGBE_SUCCESS) 4286 return (0); 4287 4288 if (hw->mac.type == ixgbe_mac_X550) { 4289 /* 4290 * 2.5G and 5G autonegotiation speeds on X550 4291 * are disabled by default due to reported 4292 * interoperability issues with some switches. 4293 */ 4294 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 4295 IXGBE_LINK_SPEED_5GB_FULL); 4296 } 4297 4298 speed = 4299 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 4300 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 4301 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 4302 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 4303 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 4304 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 4305 4306 return speed; 4307 } /* ixgbe_get_default_advertise */ 4308 4309 /************************************************************************ 4310 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4311 * 4312 * Control values: 4313 * 0/1 - off / on (use default value of 1000) 4314 * 4315 * Legal timer values are: 4316 * 50,100,250,500,1000,2000,5000,10000 4317 * 4318 * Turning off interrupt moderation will also turn this off. 4319 ************************************************************************/ 4320 static int 4321 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4322 { 4323 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4324 if_t ifp = iflib_get_ifp(sc->ctx); 4325 int error; 4326 u16 newval; 4327 4328 newval = sc->dmac; 4329 error = sysctl_handle_16(oidp, &newval, 0, req); 4330 if ((error) || (req->newptr == NULL)) 4331 return (error); 4332 4333 switch (newval) { 4334 case 0: 4335 /* Disabled */ 4336 sc->dmac = 0; 4337 break; 4338 case 1: 4339 /* Enable and use default */ 4340 sc->dmac = 1000; 4341 break; 4342 case 50: 4343 case 100: 4344 case 250: 4345 case 500: 4346 case 1000: 4347 case 2000: 4348 case 5000: 4349 case 10000: 4350 /* Legal values - allow */ 4351 sc->dmac = newval; 4352 break; 4353 default: 4354 /* Do nothing, illegal value */ 4355 return (EINVAL); 4356 } 4357 4358 /* Re-initialize hardware if it's already running */ 4359 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 4360 if_init(ifp, ifp); 4361 4362 return (0); 4363 } /* ixgbe_sysctl_dmac */ 4364 4365 #ifdef IXGBE_DEBUG 4366 /************************************************************************ 4367 * ixgbe_sysctl_power_state 4368 * 4369 * Sysctl to test power states 4370 * Values: 4371 * 0 - set device to D0 4372 * 3 - set device to D3 4373 * (none) - get current device power state 4374 ************************************************************************/ 4375 static int 4376 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4377 { 4378 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4379 device_t dev = sc->dev; 4380 int curr_ps, new_ps, error = 0; 4381 4382 curr_ps = new_ps = pci_get_powerstate(dev); 4383 4384 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4385 if ((error) || (req->newptr == NULL)) 4386 return (error); 4387 4388 if (new_ps == curr_ps) 4389 return (0); 4390 4391 if (new_ps == 3 && curr_ps == 0) 4392 error = DEVICE_SUSPEND(dev); 4393 else if (new_ps == 0 && curr_ps == 3) 4394 error = DEVICE_RESUME(dev); 4395 else 4396 return (EINVAL); 4397 4398 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4399 4400 return (error); 4401 } /* ixgbe_sysctl_power_state */ 4402 #endif 4403 4404 /************************************************************************ 4405 * ixgbe_sysctl_wol_enable 4406 * 4407 * Sysctl to enable/disable the WoL capability, 4408 * if supported by the adapter. 4409 * 4410 * Values: 4411 * 0 - disabled 4412 * 1 - enabled 4413 ************************************************************************/ 4414 static int 4415 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4416 { 4417 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4418 struct ixgbe_hw *hw = &sc->hw; 4419 int new_wol_enabled; 4420 int error = 0; 4421 4422 new_wol_enabled = hw->wol_enabled; 4423 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4424 if ((error) || (req->newptr == NULL)) 4425 return (error); 4426 new_wol_enabled = !!(new_wol_enabled); 4427 if (new_wol_enabled == hw->wol_enabled) 4428 return (0); 4429 4430 if (new_wol_enabled > 0 && !sc->wol_support) 4431 return (ENODEV); 4432 else 4433 hw->wol_enabled = new_wol_enabled; 4434 4435 return (0); 4436 } /* ixgbe_sysctl_wol_enable */ 4437 4438 /************************************************************************ 4439 * ixgbe_sysctl_wufc - Wake Up Filter Control 4440 * 4441 * Sysctl to enable/disable the types of packets that the 4442 * adapter will wake up on upon receipt. 4443 * Flags: 4444 * 0x1 - Link Status Change 4445 * 0x2 - Magic Packet 4446 * 0x4 - Direct Exact 4447 * 0x8 - Directed Multicast 4448 * 0x10 - Broadcast 4449 * 0x20 - ARP/IPv4 Request Packet 4450 * 0x40 - Direct IPv4 Packet 4451 * 0x80 - Direct IPv6 Packet 4452 * 4453 * Settings not listed above will cause the sysctl to return an error. 4454 ************************************************************************/ 4455 static int 4456 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4457 { 4458 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4459 int error = 0; 4460 u32 new_wufc; 4461 4462 new_wufc = sc->wufc; 4463 4464 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4465 if ((error) || (req->newptr == NULL)) 4466 return (error); 4467 if (new_wufc == sc->wufc) 4468 return (0); 4469 4470 if (new_wufc & 0xffffff00) 4471 return (EINVAL); 4472 4473 new_wufc &= 0xff; 4474 new_wufc |= (0xffffff & sc->wufc); 4475 sc->wufc = new_wufc; 4476 4477 return (0); 4478 } /* ixgbe_sysctl_wufc */ 4479 4480 #ifdef IXGBE_DEBUG 4481 /************************************************************************ 4482 * ixgbe_sysctl_print_rss_config 4483 ************************************************************************/ 4484 static int 4485 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4486 { 4487 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4488 struct ixgbe_hw *hw = &sc->hw; 4489 device_t dev = sc->dev; 4490 struct sbuf *buf; 4491 int error = 0, reta_size; 4492 u32 reg; 4493 4494 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4495 if (!buf) { 4496 device_printf(dev, "Could not allocate sbuf for output.\n"); 4497 return (ENOMEM); 4498 } 4499 4500 // TODO: use sbufs to make a string to print out 4501 /* Set multiplier for RETA setup and table size based on MAC */ 4502 switch (sc->hw.mac.type) { 4503 case ixgbe_mac_X550: 4504 case ixgbe_mac_X550EM_x: 4505 case ixgbe_mac_X550EM_a: 4506 reta_size = 128; 4507 break; 4508 default: 4509 reta_size = 32; 4510 break; 4511 } 4512 4513 /* Print out the redirection table */ 4514 sbuf_cat(buf, "\n"); 4515 for (int i = 0; i < reta_size; i++) { 4516 if (i < 32) { 4517 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4518 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4519 } else { 4520 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4521 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4522 } 4523 } 4524 4525 // TODO: print more config 4526 4527 error = sbuf_finish(buf); 4528 if (error) 4529 device_printf(dev, "Error finishing sbuf: %d\n", error); 4530 4531 sbuf_delete(buf); 4532 4533 return (0); 4534 } /* ixgbe_sysctl_print_rss_config */ 4535 #endif /* IXGBE_DEBUG */ 4536 4537 /************************************************************************ 4538 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4539 * 4540 * For X552/X557-AT devices using an external PHY 4541 ************************************************************************/ 4542 static int 4543 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4544 { 4545 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4546 struct ixgbe_hw *hw = &sc->hw; 4547 u16 reg; 4548 4549 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4550 device_printf(iflib_get_dev(sc->ctx), 4551 "Device has no supported external thermal sensor.\n"); 4552 return (ENODEV); 4553 } 4554 4555 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4556 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4557 device_printf(iflib_get_dev(sc->ctx), 4558 "Error reading from PHY's current temperature register\n"); 4559 return (EAGAIN); 4560 } 4561 4562 /* Shift temp for output */ 4563 reg = reg >> 8; 4564 4565 return (sysctl_handle_16(oidp, NULL, reg, req)); 4566 } /* ixgbe_sysctl_phy_temp */ 4567 4568 /************************************************************************ 4569 * ixgbe_sysctl_phy_overtemp_occurred 4570 * 4571 * Reports (directly from the PHY) whether the current PHY 4572 * temperature is over the overtemp threshold. 4573 ************************************************************************/ 4574 static int 4575 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4576 { 4577 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4578 struct ixgbe_hw *hw = &sc->hw; 4579 u16 reg; 4580 4581 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4582 device_printf(iflib_get_dev(sc->ctx), 4583 "Device has no supported external thermal sensor.\n"); 4584 return (ENODEV); 4585 } 4586 4587 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4588 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4589 device_printf(iflib_get_dev(sc->ctx), 4590 "Error reading from PHY's temperature status register\n"); 4591 return (EAGAIN); 4592 } 4593 4594 /* Get occurrence bit */ 4595 reg = !!(reg & 0x4000); 4596 4597 return (sysctl_handle_16(oidp, 0, reg, req)); 4598 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4599 4600 /************************************************************************ 4601 * ixgbe_sysctl_eee_state 4602 * 4603 * Sysctl to set EEE power saving feature 4604 * Values: 4605 * 0 - disable EEE 4606 * 1 - enable EEE 4607 * (none) - get current device EEE state 4608 ************************************************************************/ 4609 static int 4610 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4611 { 4612 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4613 device_t dev = sc->dev; 4614 if_t ifp = iflib_get_ifp(sc->ctx); 4615 int curr_eee, new_eee, error = 0; 4616 s32 retval; 4617 4618 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 4619 4620 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4621 if ((error) || (req->newptr == NULL)) 4622 return (error); 4623 4624 /* Nothing to do */ 4625 if (new_eee == curr_eee) 4626 return (0); 4627 4628 /* Not supported */ 4629 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 4630 return (EINVAL); 4631 4632 /* Bounds checking */ 4633 if ((new_eee < 0) || (new_eee > 1)) 4634 return (EINVAL); 4635 4636 retval = ixgbe_setup_eee(&sc->hw, new_eee); 4637 if (retval) { 4638 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4639 return (EINVAL); 4640 } 4641 4642 /* Restart auto-neg */ 4643 if_init(ifp, ifp); 4644 4645 device_printf(dev, "New EEE state: %d\n", new_eee); 4646 4647 /* Cache new value */ 4648 if (new_eee) 4649 sc->feat_en |= IXGBE_FEATURE_EEE; 4650 else 4651 sc->feat_en &= ~IXGBE_FEATURE_EEE; 4652 4653 return (error); 4654 } /* ixgbe_sysctl_eee_state */ 4655 4656 /************************************************************************ 4657 * ixgbe_init_device_features 4658 ************************************************************************/ 4659 static void 4660 ixgbe_init_device_features(struct ixgbe_softc *sc) 4661 { 4662 sc->feat_cap = IXGBE_FEATURE_NETMAP 4663 | IXGBE_FEATURE_RSS 4664 | IXGBE_FEATURE_MSI 4665 | IXGBE_FEATURE_MSIX 4666 | IXGBE_FEATURE_LEGACY_IRQ; 4667 4668 /* Set capabilities first... */ 4669 switch (sc->hw.mac.type) { 4670 case ixgbe_mac_82598EB: 4671 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 4672 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4673 break; 4674 case ixgbe_mac_X540: 4675 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4676 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4677 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4678 (sc->hw.bus.func == 0)) 4679 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4680 break; 4681 case ixgbe_mac_X550: 4682 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4683 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4684 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4685 break; 4686 case ixgbe_mac_X550EM_x: 4687 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4688 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4689 break; 4690 case ixgbe_mac_X550EM_a: 4691 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4692 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4693 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4694 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4695 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4696 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4697 sc->feat_cap |= IXGBE_FEATURE_EEE; 4698 } 4699 break; 4700 case ixgbe_mac_82599EB: 4701 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4702 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4703 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4704 (sc->hw.bus.func == 0)) 4705 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4706 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4707 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4708 break; 4709 default: 4710 break; 4711 } 4712 4713 /* Enabled by default... */ 4714 /* Fan failure detection */ 4715 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4716 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4717 /* Netmap */ 4718 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 4719 sc->feat_en |= IXGBE_FEATURE_NETMAP; 4720 /* EEE */ 4721 if (sc->feat_cap & IXGBE_FEATURE_EEE) 4722 sc->feat_en |= IXGBE_FEATURE_EEE; 4723 /* Thermal Sensor */ 4724 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4725 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4726 4727 /* Enabled via global sysctl... */ 4728 /* Flow Director */ 4729 if (ixgbe_enable_fdir) { 4730 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 4731 sc->feat_en |= IXGBE_FEATURE_FDIR; 4732 else 4733 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled."); 4734 } 4735 /* 4736 * Message Signal Interrupts - Extended (MSI-X) 4737 * Normal MSI is only enabled if MSI-X calls fail. 4738 */ 4739 if (!ixgbe_enable_msix) 4740 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 4741 /* Receive-Side Scaling (RSS) */ 4742 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4743 sc->feat_en |= IXGBE_FEATURE_RSS; 4744 4745 /* Disable features with unmet dependencies... */ 4746 /* No MSI-X */ 4747 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 4748 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 4749 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4750 sc->feat_en &= ~IXGBE_FEATURE_RSS; 4751 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 4752 } 4753 } /* ixgbe_init_device_features */ 4754 4755 /************************************************************************ 4756 * ixgbe_check_fan_failure 4757 ************************************************************************/ 4758 static void 4759 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 4760 { 4761 u32 mask; 4762 4763 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 4764 IXGBE_ESDP_SDP1; 4765 4766 if (reg & mask) 4767 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4768 } /* ixgbe_check_fan_failure */ 4769 4770 /************************************************************************ 4771 * ixgbe_sbuf_fw_version 4772 ************************************************************************/ 4773 static void 4774 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) 4775 { 4776 struct ixgbe_nvm_version nvm_ver = {0}; 4777 uint16_t phyfw = 0; 4778 int status; 4779 const char *space = ""; 4780 4781 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ 4782 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ 4783 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ 4784 status = ixgbe_get_phy_firmware_version(hw, &phyfw); 4785 4786 if (nvm_ver.oem_valid) { 4787 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, 4788 nvm_ver.oem_minor, nvm_ver.oem_release); 4789 space = " "; 4790 } 4791 4792 if (nvm_ver.or_valid) { 4793 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 4794 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); 4795 space = " "; 4796 } 4797 4798 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | 4799 NVM_VER_INVALID)) { 4800 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); 4801 space = " "; 4802 } 4803 4804 if (phyfw != 0 && status == IXGBE_SUCCESS) 4805 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); 4806 } /* ixgbe_sbuf_fw_version */ 4807 4808 /************************************************************************ 4809 * ixgbe_print_fw_version 4810 ************************************************************************/ 4811 static void 4812 ixgbe_print_fw_version(if_ctx_t ctx) 4813 { 4814 struct ixgbe_softc *sc = iflib_get_softc(ctx); 4815 struct ixgbe_hw *hw = &sc->hw; 4816 device_t dev = sc->dev; 4817 struct sbuf *buf; 4818 int error = 0; 4819 4820 buf = sbuf_new_auto(); 4821 if (!buf) { 4822 device_printf(dev, "Could not allocate sbuf for output.\n"); 4823 return; 4824 } 4825 4826 ixgbe_sbuf_fw_version(hw, buf); 4827 4828 error = sbuf_finish(buf); 4829 if (error) 4830 device_printf(dev, "Error finishing sbuf: %d\n", error); 4831 else if (sbuf_len(buf)) 4832 device_printf(dev, "%s\n", sbuf_data(buf)); 4833 4834 sbuf_delete(buf); 4835 } /* ixgbe_print_fw_version */ 4836 4837 /************************************************************************ 4838 * ixgbe_sysctl_print_fw_version 4839 ************************************************************************/ 4840 static int 4841 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 4842 { 4843 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4844 struct ixgbe_hw *hw = &sc->hw; 4845 device_t dev = sc->dev; 4846 struct sbuf *buf; 4847 int error = 0; 4848 4849 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4850 if (!buf) { 4851 device_printf(dev, "Could not allocate sbuf for output.\n"); 4852 return (ENOMEM); 4853 } 4854 4855 ixgbe_sbuf_fw_version(hw, buf); 4856 4857 error = sbuf_finish(buf); 4858 if (error) 4859 device_printf(dev, "Error finishing sbuf: %d\n", error); 4860 4861 sbuf_delete(buf); 4862 4863 return (0); 4864 } /* ixgbe_sysctl_print_fw_version */ 4865