1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_rss.h" 38 39 #include "ixgbe.h" 40 #include "ixgbe_sriov.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixgbe_driver_version[] = "4.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixgbe_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixgbe_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), 106 /* required last entry */ 107 PVID_END 108 }; 109 110 static void *ixgbe_register(device_t); 111 static int ixgbe_if_attach_pre(if_ctx_t); 112 static int ixgbe_if_attach_post(if_ctx_t); 113 static int ixgbe_if_detach(if_ctx_t); 114 static int ixgbe_if_shutdown(if_ctx_t); 115 static int ixgbe_if_suspend(if_ctx_t); 116 static int ixgbe_if_resume(if_ctx_t); 117 118 static void ixgbe_if_stop(if_ctx_t); 119 void ixgbe_if_enable_intr(if_ctx_t); 120 static void ixgbe_if_disable_intr(if_ctx_t); 121 static void ixgbe_link_intr_enable(if_ctx_t); 122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); 124 static int ixgbe_if_media_change(if_ctx_t); 125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); 127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); 128 static void ixgbe_if_multi_set(if_ctx_t); 129 static int ixgbe_if_promisc_set(if_ctx_t, int); 130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 132 static void ixgbe_if_queues_free(if_ctx_t); 133 static void ixgbe_if_timer(if_ctx_t, uint16_t); 134 static void ixgbe_if_update_admin_status(if_ctx_t); 135 static void ixgbe_if_vlan_register(if_ctx_t, u16); 136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16); 137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); 138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 139 int ixgbe_intr(void *); 140 141 /************************************************************************ 142 * Function prototypes 143 ************************************************************************/ 144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 145 146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32); 147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32); 148 static void ixgbe_add_device_sysctls(if_ctx_t); 149 static int ixgbe_allocate_pci_resources(if_ctx_t); 150 static int ixgbe_setup_low_power_mode(if_ctx_t); 151 152 static void ixgbe_config_dmac(struct ixgbe_softc *); 153 static void ixgbe_configure_ivars(struct ixgbe_softc *); 154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 156 static bool ixgbe_sfp_probe(if_ctx_t); 157 158 static void ixgbe_free_pci_resources(if_ctx_t); 159 160 static int ixgbe_msix_link(void *); 161 static int ixgbe_msix_que(void *); 162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 163 static void ixgbe_initialize_receive_units(if_ctx_t); 164 static void ixgbe_initialize_transmit_units(if_ctx_t); 165 166 static int ixgbe_setup_interface(if_ctx_t); 167 static void ixgbe_init_device_features(struct ixgbe_softc *); 168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); 170 static void ixgbe_print_fw_version(if_ctx_t); 171 static void ixgbe_add_media_types(if_ctx_t); 172 static void ixgbe_update_stats_counters(struct ixgbe_softc *); 173 static void ixgbe_config_link(if_ctx_t); 174 static void ixgbe_get_slot_info(struct ixgbe_softc *); 175 static void ixgbe_check_wol_support(struct ixgbe_softc *); 176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 178 179 static void ixgbe_add_hw_stats(struct ixgbe_softc *); 180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 181 static int ixgbe_set_advertise(struct ixgbe_softc *, int); 182 static int ixgbe_get_default_advertise(struct ixgbe_softc *); 183 static void ixgbe_setup_vlan_hw_support(if_ctx_t); 184 static void ixgbe_config_gpie(struct ixgbe_softc *); 185 static void ixgbe_config_delay_values(struct ixgbe_softc *); 186 187 /* Sysctl handlers */ 188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 195 #ifdef IXGBE_DEBUG 196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 198 #endif 199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 206 207 /* Deferred interrupt tasklets */ 208 static void ixgbe_handle_msf(void *); 209 static void ixgbe_handle_mod(void *); 210 static void ixgbe_handle_phy(void *); 211 212 /************************************************************************ 213 * FreeBSD Device Interface Entry Points 214 ************************************************************************/ 215 static device_method_t ix_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_register, ixgbe_register), 218 DEVMETHOD(device_probe, iflib_device_probe), 219 DEVMETHOD(device_attach, iflib_device_attach), 220 DEVMETHOD(device_detach, iflib_device_detach), 221 DEVMETHOD(device_shutdown, iflib_device_shutdown), 222 DEVMETHOD(device_suspend, iflib_device_suspend), 223 DEVMETHOD(device_resume, iflib_device_resume), 224 #ifdef PCI_IOV 225 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 228 #endif /* PCI_IOV */ 229 DEVMETHOD_END 230 }; 231 232 static driver_t ix_driver = { 233 "ix", ix_methods, sizeof(struct ixgbe_softc), 234 }; 235 236 devclass_t ix_devclass; 237 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 239 MODULE_DEPEND(ix, pci, 1, 1, 1); 240 MODULE_DEPEND(ix, ether, 1, 1, 1); 241 MODULE_DEPEND(ix, iflib, 1, 1, 1); 242 243 static device_method_t ixgbe_if_methods[] = { 244 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 245 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 246 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 247 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 248 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 249 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 250 DEVMETHOD(ifdi_init, ixgbe_if_init), 251 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 252 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 253 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 254 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 255 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 256 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 257 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 258 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 259 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 260 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 261 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 262 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 263 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 264 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 265 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 266 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 267 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 268 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 269 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 270 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 271 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 272 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 273 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), 274 #ifdef PCI_IOV 275 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 276 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 277 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 278 #endif /* PCI_IOV */ 279 DEVMETHOD_END 280 }; 281 282 /* 283 * TUNEABLE PARAMETERS: 284 */ 285 286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 287 "IXGBE driver parameters"); 288 static driver_t ixgbe_if_driver = { 289 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) 290 }; 291 292 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 293 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 294 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 295 296 /* Flow control setting, default to full */ 297 static int ixgbe_flow_control = ixgbe_fc_full; 298 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 299 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 300 301 /* Advertise Speed, default to 0 (auto) */ 302 static int ixgbe_advertise_speed = 0; 303 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 304 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 305 306 /* 307 * Smart speed setting, default to on 308 * this only works as a compile option 309 * right now as its during attach, set 310 * this to 'ixgbe_smart_speed_off' to 311 * disable. 312 */ 313 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 314 315 /* 316 * MSI-X should be the default for best performance, 317 * but this allows it to be forced off for testing. 318 */ 319 static int ixgbe_enable_msix = 1; 320 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 321 "Enable MSI-X interrupts"); 322 323 /* 324 * Defining this on will allow the use 325 * of unsupported SFP+ modules, note that 326 * doing so you are on your own :) 327 */ 328 static int allow_unsupported_sfp = false; 329 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 330 &allow_unsupported_sfp, 0, 331 "Allow unsupported SFP modules...use at your own risk"); 332 333 /* 334 * Not sure if Flow Director is fully baked, 335 * so we'll default to turning it off. 336 */ 337 static int ixgbe_enable_fdir = 0; 338 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 339 "Enable Flow Director"); 340 341 /* Receive-Side Scaling */ 342 static int ixgbe_enable_rss = 1; 343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 344 "Enable Receive-Side Scaling (RSS)"); 345 346 /* 347 * AIM: Adaptive Interrupt Moderation 348 * which means that the interrupt rate 349 * is varied over time based on the 350 * traffic for that interrupt vector 351 */ 352 static int ixgbe_enable_aim = false; 353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 354 "Enable adaptive interrupt moderation"); 355 356 #if 0 357 /* Keep running tab on them for sanity check */ 358 static int ixgbe_total_ports; 359 #endif 360 361 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 362 363 /* 364 * For Flow Director: this is the number of TX packets we sample 365 * for the filter pool, this means every 20th packet will be probed. 366 * 367 * This feature can be disabled by setting this to 0. 368 */ 369 static int atr_sample_rate = 20; 370 371 extern struct if_txrx ixgbe_txrx; 372 373 static struct if_shared_ctx ixgbe_sctx_init = { 374 .isc_magic = IFLIB_MAGIC, 375 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 376 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 377 .isc_tx_maxsegsize = PAGE_SIZE, 378 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 379 .isc_tso_maxsegsize = PAGE_SIZE, 380 .isc_rx_maxsize = PAGE_SIZE*4, 381 .isc_rx_nsegments = 1, 382 .isc_rx_maxsegsize = PAGE_SIZE*4, 383 .isc_nfl = 1, 384 .isc_ntxqs = 1, 385 .isc_nrxqs = 1, 386 387 .isc_admin_intrcnt = 1, 388 .isc_vendor_info = ixgbe_vendor_info_array, 389 .isc_driver_version = ixgbe_driver_version, 390 .isc_driver = &ixgbe_if_driver, 391 .isc_flags = IFLIB_TSO_INIT_IP, 392 393 .isc_nrxd_min = {MIN_RXD}, 394 .isc_ntxd_min = {MIN_TXD}, 395 .isc_nrxd_max = {MAX_RXD}, 396 .isc_ntxd_max = {MAX_TXD}, 397 .isc_nrxd_default = {DEFAULT_RXD}, 398 .isc_ntxd_default = {DEFAULT_TXD}, 399 }; 400 401 /************************************************************************ 402 * ixgbe_if_tx_queues_alloc 403 ************************************************************************/ 404 static int 405 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 406 int ntxqs, int ntxqsets) 407 { 408 struct ixgbe_softc *sc = iflib_get_softc(ctx); 409 if_softc_ctx_t scctx = sc->shared; 410 struct ix_tx_queue *que; 411 int i, j, error; 412 413 MPASS(sc->num_tx_queues > 0); 414 MPASS(sc->num_tx_queues == ntxqsets); 415 MPASS(ntxqs == 1); 416 417 /* Allocate queue structure memory */ 418 sc->tx_queues = 419 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 420 M_IXGBE, M_NOWAIT | M_ZERO); 421 if (!sc->tx_queues) { 422 device_printf(iflib_get_dev(ctx), 423 "Unable to allocate TX ring memory\n"); 424 return (ENOMEM); 425 } 426 427 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 428 struct tx_ring *txr = &que->txr; 429 430 /* In case SR-IOV is enabled, align the index properly */ 431 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 432 i); 433 434 txr->sc = que->sc = sc; 435 436 /* Allocate report status array */ 437 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 438 if (txr->tx_rsq == NULL) { 439 error = ENOMEM; 440 goto fail; 441 } 442 for (j = 0; j < scctx->isc_ntxd[0]; j++) 443 txr->tx_rsq[j] = QIDX_INVALID; 444 /* get the virtual and physical address of the hardware queues */ 445 txr->tail = IXGBE_TDT(txr->me); 446 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 447 txr->tx_paddr = paddrs[i]; 448 449 txr->bytes = 0; 450 txr->total_packets = 0; 451 452 /* Set the rate at which we sample packets */ 453 if (sc->feat_en & IXGBE_FEATURE_FDIR) 454 txr->atr_sample = atr_sample_rate; 455 456 } 457 458 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 459 sc->num_tx_queues); 460 461 return (0); 462 463 fail: 464 ixgbe_if_queues_free(ctx); 465 466 return (error); 467 } /* ixgbe_if_tx_queues_alloc */ 468 469 /************************************************************************ 470 * ixgbe_if_rx_queues_alloc 471 ************************************************************************/ 472 static int 473 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 474 int nrxqs, int nrxqsets) 475 { 476 struct ixgbe_softc *sc = iflib_get_softc(ctx); 477 struct ix_rx_queue *que; 478 int i; 479 480 MPASS(sc->num_rx_queues > 0); 481 MPASS(sc->num_rx_queues == nrxqsets); 482 MPASS(nrxqs == 1); 483 484 /* Allocate queue structure memory */ 485 sc->rx_queues = 486 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 487 M_IXGBE, M_NOWAIT | M_ZERO); 488 if (!sc->rx_queues) { 489 device_printf(iflib_get_dev(ctx), 490 "Unable to allocate TX ring memory\n"); 491 return (ENOMEM); 492 } 493 494 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 495 struct rx_ring *rxr = &que->rxr; 496 497 /* In case SR-IOV is enabled, align the index properly */ 498 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 499 i); 500 501 rxr->sc = que->sc = sc; 502 503 /* get the virtual and physical address of the hw queues */ 504 rxr->tail = IXGBE_RDT(rxr->me); 505 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 506 rxr->rx_paddr = paddrs[i]; 507 rxr->bytes = 0; 508 rxr->que = que; 509 } 510 511 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 512 sc->num_rx_queues); 513 514 return (0); 515 } /* ixgbe_if_rx_queues_alloc */ 516 517 /************************************************************************ 518 * ixgbe_if_queues_free 519 ************************************************************************/ 520 static void 521 ixgbe_if_queues_free(if_ctx_t ctx) 522 { 523 struct ixgbe_softc *sc = iflib_get_softc(ctx); 524 struct ix_tx_queue *tx_que = sc->tx_queues; 525 struct ix_rx_queue *rx_que = sc->rx_queues; 526 int i; 527 528 if (tx_que != NULL) { 529 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { 530 struct tx_ring *txr = &tx_que->txr; 531 if (txr->tx_rsq == NULL) 532 break; 533 534 free(txr->tx_rsq, M_IXGBE); 535 txr->tx_rsq = NULL; 536 } 537 538 free(sc->tx_queues, M_IXGBE); 539 sc->tx_queues = NULL; 540 } 541 if (rx_que != NULL) { 542 free(sc->rx_queues, M_IXGBE); 543 sc->rx_queues = NULL; 544 } 545 } /* ixgbe_if_queues_free */ 546 547 /************************************************************************ 548 * ixgbe_initialize_rss_mapping 549 ************************************************************************/ 550 static void 551 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 552 { 553 struct ixgbe_hw *hw = &sc->hw; 554 u32 reta = 0, mrqc, rss_key[10]; 555 int queue_id, table_size, index_mult; 556 int i, j; 557 u32 rss_hash_config; 558 559 if (sc->feat_en & IXGBE_FEATURE_RSS) { 560 /* Fetch the configured RSS key */ 561 rss_getkey((uint8_t *)&rss_key); 562 } else { 563 /* set up random bits */ 564 arc4rand(&rss_key, sizeof(rss_key), 0); 565 } 566 567 /* Set multiplier for RETA setup and table size based on MAC */ 568 index_mult = 0x1; 569 table_size = 128; 570 switch (sc->hw.mac.type) { 571 case ixgbe_mac_82598EB: 572 index_mult = 0x11; 573 break; 574 case ixgbe_mac_X550: 575 case ixgbe_mac_X550EM_x: 576 case ixgbe_mac_X550EM_a: 577 table_size = 512; 578 break; 579 default: 580 break; 581 } 582 583 /* Set up the redirection table */ 584 for (i = 0, j = 0; i < table_size; i++, j++) { 585 if (j == sc->num_rx_queues) 586 j = 0; 587 588 if (sc->feat_en & IXGBE_FEATURE_RSS) { 589 /* 590 * Fetch the RSS bucket id for the given indirection 591 * entry. Cap it at the number of configured buckets 592 * (which is num_rx_queues.) 593 */ 594 queue_id = rss_get_indirection_to_bucket(i); 595 queue_id = queue_id % sc->num_rx_queues; 596 } else 597 queue_id = (j * index_mult); 598 599 /* 600 * The low 8 bits are for hash value (n+0); 601 * The next 8 bits are for hash value (n+1), etc. 602 */ 603 reta = reta >> 8; 604 reta = reta | (((uint32_t)queue_id) << 24); 605 if ((i & 3) == 3) { 606 if (i < 128) 607 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 608 else 609 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 610 reta); 611 reta = 0; 612 } 613 } 614 615 /* Now fill our hash function seeds */ 616 for (i = 0; i < 10; i++) 617 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 618 619 /* Perform hash on these packet types */ 620 if (sc->feat_en & IXGBE_FEATURE_RSS) 621 rss_hash_config = rss_gethashconfig(); 622 else { 623 /* 624 * Disable UDP - IP fragments aren't currently being handled 625 * and so we end up with a mix of 2-tuple and 4-tuple 626 * traffic. 627 */ 628 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 629 | RSS_HASHTYPE_RSS_TCP_IPV4 630 | RSS_HASHTYPE_RSS_IPV6 631 | RSS_HASHTYPE_RSS_TCP_IPV6 632 | RSS_HASHTYPE_RSS_IPV6_EX 633 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 634 } 635 636 mrqc = IXGBE_MRQC_RSSEN; 637 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 639 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 641 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 643 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 645 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 647 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 649 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 651 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 653 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 655 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 656 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 657 } /* ixgbe_initialize_rss_mapping */ 658 659 /************************************************************************ 660 * ixgbe_initialize_receive_units - Setup receive registers and features. 661 ************************************************************************/ 662 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 663 664 static void 665 ixgbe_initialize_receive_units(if_ctx_t ctx) 666 { 667 struct ixgbe_softc *sc = iflib_get_softc(ctx); 668 if_softc_ctx_t scctx = sc->shared; 669 struct ixgbe_hw *hw = &sc->hw; 670 struct ifnet *ifp = iflib_get_ifp(ctx); 671 struct ix_rx_queue *que; 672 int i, j; 673 u32 bufsz, fctrl, srrctl, rxcsum; 674 u32 hlreg; 675 676 /* 677 * Make sure receives are disabled while 678 * setting up the descriptor ring 679 */ 680 ixgbe_disable_rx(hw); 681 682 /* Enable broadcasts */ 683 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 684 fctrl |= IXGBE_FCTRL_BAM; 685 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 686 fctrl |= IXGBE_FCTRL_DPF; 687 fctrl |= IXGBE_FCTRL_PMCF; 688 } 689 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 690 691 /* Set for Jumbo Frames? */ 692 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 693 if (ifp->if_mtu > ETHERMTU) 694 hlreg |= IXGBE_HLREG0_JUMBOEN; 695 else 696 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 697 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 698 699 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 700 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 701 702 /* Setup the Base and Length of the Rx Descriptor Ring */ 703 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { 704 struct rx_ring *rxr = &que->rxr; 705 u64 rdba = rxr->rx_paddr; 706 707 j = rxr->me; 708 709 /* Setup the Base and Length of the Rx Descriptor Ring */ 710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 711 (rdba & 0x00000000ffffffffULL)); 712 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 713 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 714 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 715 716 /* Set up the SRRCTL register */ 717 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 718 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 719 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 720 srrctl |= bufsz; 721 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 722 723 /* 724 * Set DROP_EN iff we have no flow control and >1 queue. 725 * Note that srrctl was cleared shortly before during reset, 726 * so we do not need to clear the bit, but do it just in case 727 * this code is moved elsewhere. 728 */ 729 if (sc->num_rx_queues > 1 && 730 sc->hw.fc.requested_mode == ixgbe_fc_none) { 731 srrctl |= IXGBE_SRRCTL_DROP_EN; 732 } else { 733 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 734 } 735 736 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 737 738 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 739 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 740 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 741 742 /* Set the driver rx tail address */ 743 rxr->tail = IXGBE_RDT(rxr->me); 744 } 745 746 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 747 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 748 | IXGBE_PSRTYPE_UDPHDR 749 | IXGBE_PSRTYPE_IPV4HDR 750 | IXGBE_PSRTYPE_IPV6HDR; 751 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 752 } 753 754 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 755 756 ixgbe_initialize_rss_mapping(sc); 757 758 if (sc->num_rx_queues > 1) { 759 /* RSS and RX IPP Checksum are mutually exclusive */ 760 rxcsum |= IXGBE_RXCSUM_PCSD; 761 } 762 763 if (ifp->if_capenable & IFCAP_RXCSUM) 764 rxcsum |= IXGBE_RXCSUM_PCSD; 765 766 /* This is useful for calculating UDP/IP fragment checksums */ 767 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 768 rxcsum |= IXGBE_RXCSUM_IPPCSE; 769 770 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 771 772 } /* ixgbe_initialize_receive_units */ 773 774 /************************************************************************ 775 * ixgbe_initialize_transmit_units - Enable transmit units. 776 ************************************************************************/ 777 static void 778 ixgbe_initialize_transmit_units(if_ctx_t ctx) 779 { 780 struct ixgbe_softc *sc = iflib_get_softc(ctx); 781 struct ixgbe_hw *hw = &sc->hw; 782 if_softc_ctx_t scctx = sc->shared; 783 struct ix_tx_queue *que; 784 int i; 785 786 /* Setup the Base and Length of the Tx Descriptor Ring */ 787 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; 788 i++, que++) { 789 struct tx_ring *txr = &que->txr; 790 u64 tdba = txr->tx_paddr; 791 u32 txctrl = 0; 792 int j = txr->me; 793 794 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 795 (tdba & 0x00000000ffffffffULL)); 796 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 797 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 798 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 799 800 /* Setup the HW Tx Head and Tail descriptor pointers */ 801 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 802 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 803 804 /* Cache the tail address */ 805 txr->tail = IXGBE_TDT(txr->me); 806 807 txr->tx_rs_cidx = txr->tx_rs_pidx; 808 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 809 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 810 txr->tx_rsq[k] = QIDX_INVALID; 811 812 /* Disable Head Writeback */ 813 /* 814 * Note: for X550 series devices, these registers are actually 815 * prefixed with TPH_ isntead of DCA_, but the addresses and 816 * fields remain the same. 817 */ 818 switch (hw->mac.type) { 819 case ixgbe_mac_82598EB: 820 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 821 break; 822 default: 823 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 824 break; 825 } 826 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 827 switch (hw->mac.type) { 828 case ixgbe_mac_82598EB: 829 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 830 break; 831 default: 832 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 833 break; 834 } 835 836 } 837 838 if (hw->mac.type != ixgbe_mac_82598EB) { 839 u32 dmatxctl, rttdcs; 840 841 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 842 dmatxctl |= IXGBE_DMATXCTL_TE; 843 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 844 /* Disable arbiter to set MTQC */ 845 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 846 rttdcs |= IXGBE_RTTDCS_ARBDIS; 847 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 848 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 849 ixgbe_get_mtqc(sc->iov_mode)); 850 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 851 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 852 } 853 854 } /* ixgbe_initialize_transmit_units */ 855 856 /************************************************************************ 857 * ixgbe_register 858 ************************************************************************/ 859 static void * 860 ixgbe_register(device_t dev) 861 { 862 return (&ixgbe_sctx_init); 863 } /* ixgbe_register */ 864 865 /************************************************************************ 866 * ixgbe_if_attach_pre - Device initialization routine, part 1 867 * 868 * Called when the driver is being loaded. 869 * Identifies the type of hardware, initializes the hardware, 870 * and initializes iflib structures. 871 * 872 * return 0 on success, positive on failure 873 ************************************************************************/ 874 static int 875 ixgbe_if_attach_pre(if_ctx_t ctx) 876 { 877 struct ixgbe_softc *sc; 878 device_t dev; 879 if_softc_ctx_t scctx; 880 struct ixgbe_hw *hw; 881 int error = 0; 882 u32 ctrl_ext; 883 884 INIT_DEBUGOUT("ixgbe_attach: begin"); 885 886 /* Allocate, clear, and link in our adapter structure */ 887 dev = iflib_get_dev(ctx); 888 sc = iflib_get_softc(ctx); 889 sc->hw.back = sc; 890 sc->ctx = ctx; 891 sc->dev = dev; 892 scctx = sc->shared = iflib_get_softc_ctx(ctx); 893 sc->media = iflib_get_media(ctx); 894 hw = &sc->hw; 895 896 /* Determine hardware revision */ 897 hw->vendor_id = pci_get_vendor(dev); 898 hw->device_id = pci_get_device(dev); 899 hw->revision_id = pci_get_revid(dev); 900 hw->subsystem_vendor_id = pci_get_subvendor(dev); 901 hw->subsystem_device_id = pci_get_subdevice(dev); 902 903 /* Do base PCI setup - map BAR0 */ 904 if (ixgbe_allocate_pci_resources(ctx)) { 905 device_printf(dev, "Allocation of PCI resources failed\n"); 906 return (ENXIO); 907 } 908 909 /* let hardware know driver is loaded */ 910 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 911 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 912 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 913 914 /* 915 * Initialize the shared code 916 */ 917 if (ixgbe_init_shared_code(hw) != 0) { 918 device_printf(dev, "Unable to initialize the shared code\n"); 919 error = ENXIO; 920 goto err_pci; 921 } 922 923 if (hw->mbx.ops.init_params) 924 hw->mbx.ops.init_params(hw); 925 926 hw->allow_unsupported_sfp = allow_unsupported_sfp; 927 928 if (hw->mac.type != ixgbe_mac_82598EB) 929 hw->phy.smart_speed = ixgbe_smart_speed; 930 931 ixgbe_init_device_features(sc); 932 933 /* Enable WoL (if supported) */ 934 ixgbe_check_wol_support(sc); 935 936 /* Verify adapter fan is still functional (if applicable) */ 937 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 938 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 939 ixgbe_check_fan_failure(sc, esdp, false); 940 } 941 942 /* Ensure SW/FW semaphore is free */ 943 ixgbe_init_swfw_semaphore(hw); 944 945 /* Set an initial default flow control value */ 946 hw->fc.requested_mode = ixgbe_flow_control; 947 948 hw->phy.reset_if_overtemp = true; 949 error = ixgbe_reset_hw(hw); 950 hw->phy.reset_if_overtemp = false; 951 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 952 /* 953 * No optics in this port, set up 954 * so the timer routine will probe 955 * for later insertion. 956 */ 957 sc->sfp_probe = true; 958 error = 0; 959 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 960 device_printf(dev, "Unsupported SFP+ module detected!\n"); 961 error = EIO; 962 goto err_pci; 963 } else if (error) { 964 device_printf(dev, "Hardware initialization failed\n"); 965 error = EIO; 966 goto err_pci; 967 } 968 969 /* Make sure we have a good EEPROM before we read from it */ 970 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 971 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 972 error = EIO; 973 goto err_pci; 974 } 975 976 error = ixgbe_start_hw(hw); 977 switch (error) { 978 case IXGBE_ERR_EEPROM_VERSION: 979 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 980 break; 981 case IXGBE_ERR_SFP_NOT_SUPPORTED: 982 device_printf(dev, "Unsupported SFP+ Module\n"); 983 error = EIO; 984 goto err_pci; 985 case IXGBE_ERR_SFP_NOT_PRESENT: 986 device_printf(dev, "No SFP+ Module found\n"); 987 /* falls thru */ 988 default: 989 break; 990 } 991 992 /* Most of the iflib initialization... */ 993 994 iflib_set_mac(ctx, hw->mac.addr); 995 switch (sc->hw.mac.type) { 996 case ixgbe_mac_X550: 997 case ixgbe_mac_X550EM_x: 998 case ixgbe_mac_X550EM_a: 999 scctx->isc_rss_table_size = 512; 1000 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1001 break; 1002 default: 1003 scctx->isc_rss_table_size = 128; 1004 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1005 } 1006 1007 /* Allow legacy interrupts */ 1008 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1009 1010 scctx->isc_txqsizes[0] = 1011 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1012 sizeof(u32), DBA_ALIGN), 1013 scctx->isc_rxqsizes[0] = 1014 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1015 DBA_ALIGN); 1016 1017 /* XXX */ 1018 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1019 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1020 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 1021 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1022 } else { 1023 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1024 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1025 } 1026 1027 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1028 1029 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1030 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1031 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1032 1033 scctx->isc_txrx = &ixgbe_txrx; 1034 1035 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1036 1037 return (0); 1038 1039 err_pci: 1040 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1041 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1042 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1043 ixgbe_free_pci_resources(ctx); 1044 1045 return (error); 1046 } /* ixgbe_if_attach_pre */ 1047 1048 /********************************************************************* 1049 * ixgbe_if_attach_post - Device initialization routine, part 2 1050 * 1051 * Called during driver load, but after interrupts and 1052 * resources have been allocated and configured. 1053 * Sets up some data structures not relevant to iflib. 1054 * 1055 * return 0 on success, positive on failure 1056 *********************************************************************/ 1057 static int 1058 ixgbe_if_attach_post(if_ctx_t ctx) 1059 { 1060 device_t dev; 1061 struct ixgbe_softc *sc; 1062 struct ixgbe_hw *hw; 1063 int error = 0; 1064 1065 dev = iflib_get_dev(ctx); 1066 sc = iflib_get_softc(ctx); 1067 hw = &sc->hw; 1068 1069 1070 if (sc->intr_type == IFLIB_INTR_LEGACY && 1071 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1072 device_printf(dev, "Device does not support legacy interrupts"); 1073 error = ENXIO; 1074 goto err; 1075 } 1076 1077 /* Allocate multicast array memory. */ 1078 sc->mta = malloc(sizeof(*sc->mta) * 1079 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1080 if (sc->mta == NULL) { 1081 device_printf(dev, "Can not allocate multicast setup array\n"); 1082 error = ENOMEM; 1083 goto err; 1084 } 1085 1086 /* hw.ix defaults init */ 1087 ixgbe_set_advertise(sc, ixgbe_advertise_speed); 1088 1089 /* Enable the optics for 82599 SFP+ fiber */ 1090 ixgbe_enable_tx_laser(hw); 1091 1092 /* Enable power to the phy. */ 1093 ixgbe_set_phy_power(hw, true); 1094 1095 ixgbe_initialize_iov(sc); 1096 1097 error = ixgbe_setup_interface(ctx); 1098 if (error) { 1099 device_printf(dev, "Interface setup failed: %d\n", error); 1100 goto err; 1101 } 1102 1103 ixgbe_if_update_admin_status(ctx); 1104 1105 /* Initialize statistics */ 1106 ixgbe_update_stats_counters(sc); 1107 ixgbe_add_hw_stats(sc); 1108 1109 /* Check PCIE slot type/speed/width */ 1110 ixgbe_get_slot_info(sc); 1111 1112 /* 1113 * Do time init and sysctl init here, but 1114 * only on the first port of a bypass sc. 1115 */ 1116 ixgbe_bypass_init(sc); 1117 1118 /* Display NVM and Option ROM versions */ 1119 ixgbe_print_fw_version(ctx); 1120 1121 /* Set an initial dmac value */ 1122 sc->dmac = 0; 1123 /* Set initial advertised speeds (if applicable) */ 1124 sc->advertise = ixgbe_get_default_advertise(sc); 1125 1126 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1127 ixgbe_define_iov_schemas(dev, &error); 1128 1129 /* Add sysctls */ 1130 ixgbe_add_device_sysctls(ctx); 1131 1132 return (0); 1133 err: 1134 return (error); 1135 } /* ixgbe_if_attach_post */ 1136 1137 /************************************************************************ 1138 * ixgbe_check_wol_support 1139 * 1140 * Checks whether the adapter's ports are capable of 1141 * Wake On LAN by reading the adapter's NVM. 1142 * 1143 * Sets each port's hw->wol_enabled value depending 1144 * on the value read here. 1145 ************************************************************************/ 1146 static void 1147 ixgbe_check_wol_support(struct ixgbe_softc *sc) 1148 { 1149 struct ixgbe_hw *hw = &sc->hw; 1150 u16 dev_caps = 0; 1151 1152 /* Find out WoL support for port */ 1153 sc->wol_support = hw->wol_enabled = 0; 1154 ixgbe_get_device_caps(hw, &dev_caps); 1155 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1156 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1157 hw->bus.func == 0)) 1158 sc->wol_support = hw->wol_enabled = 1; 1159 1160 /* Save initial wake up filter configuration */ 1161 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1162 1163 return; 1164 } /* ixgbe_check_wol_support */ 1165 1166 /************************************************************************ 1167 * ixgbe_setup_interface 1168 * 1169 * Setup networking device structure and register an interface. 1170 ************************************************************************/ 1171 static int 1172 ixgbe_setup_interface(if_ctx_t ctx) 1173 { 1174 struct ifnet *ifp = iflib_get_ifp(ctx); 1175 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1176 1177 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1178 1179 if_setbaudrate(ifp, IF_Gbps(10)); 1180 1181 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1182 1183 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1184 1185 ixgbe_add_media_types(ctx); 1186 1187 /* Autoselect media by default */ 1188 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1189 1190 return (0); 1191 } /* ixgbe_setup_interface */ 1192 1193 /************************************************************************ 1194 * ixgbe_if_get_counter 1195 ************************************************************************/ 1196 static uint64_t 1197 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1198 { 1199 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1200 if_t ifp = iflib_get_ifp(ctx); 1201 1202 switch (cnt) { 1203 case IFCOUNTER_IPACKETS: 1204 return (sc->ipackets); 1205 case IFCOUNTER_OPACKETS: 1206 return (sc->opackets); 1207 case IFCOUNTER_IBYTES: 1208 return (sc->ibytes); 1209 case IFCOUNTER_OBYTES: 1210 return (sc->obytes); 1211 case IFCOUNTER_IMCASTS: 1212 return (sc->imcasts); 1213 case IFCOUNTER_OMCASTS: 1214 return (sc->omcasts); 1215 case IFCOUNTER_COLLISIONS: 1216 return (0); 1217 case IFCOUNTER_IQDROPS: 1218 return (sc->iqdrops); 1219 case IFCOUNTER_OQDROPS: 1220 return (0); 1221 case IFCOUNTER_IERRORS: 1222 return (sc->ierrors); 1223 default: 1224 return (if_get_counter_default(ifp, cnt)); 1225 } 1226 } /* ixgbe_if_get_counter */ 1227 1228 /************************************************************************ 1229 * ixgbe_if_i2c_req 1230 ************************************************************************/ 1231 static int 1232 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1233 { 1234 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1235 struct ixgbe_hw *hw = &sc->hw; 1236 int i; 1237 1238 1239 if (hw->phy.ops.read_i2c_byte == NULL) 1240 return (ENXIO); 1241 for (i = 0; i < req->len; i++) 1242 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1243 req->dev_addr, &req->data[i]); 1244 return (0); 1245 } /* ixgbe_if_i2c_req */ 1246 1247 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1248 * @ctx: iflib context 1249 * @event: event code to check 1250 * 1251 * Defaults to returning true for unknown events. 1252 * 1253 * @returns true if iflib needs to reinit the interface 1254 */ 1255 static bool 1256 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1257 { 1258 switch (event) { 1259 case IFLIB_RESTART_VLAN_CONFIG: 1260 return (false); 1261 default: 1262 return (true); 1263 } 1264 } 1265 1266 /************************************************************************ 1267 * ixgbe_add_media_types 1268 ************************************************************************/ 1269 static void 1270 ixgbe_add_media_types(if_ctx_t ctx) 1271 { 1272 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1273 struct ixgbe_hw *hw = &sc->hw; 1274 device_t dev = iflib_get_dev(ctx); 1275 u64 layer; 1276 1277 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 1278 1279 /* Media types with matching FreeBSD media defines */ 1280 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1281 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1282 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1283 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1284 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1285 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1286 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1287 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1288 1289 if (hw->mac.type == ixgbe_mac_X550) { 1290 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1291 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL); 1292 } 1293 1294 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1295 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1296 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1297 NULL); 1298 1299 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1300 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1301 if (hw->phy.multispeed_fiber) 1302 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, 1303 NULL); 1304 } 1305 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1306 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1307 if (hw->phy.multispeed_fiber) 1308 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, 1309 NULL); 1310 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1311 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1312 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1313 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1314 1315 #ifdef IFM_ETH_XTYPE 1316 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1317 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1318 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1319 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1320 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1321 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1322 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1323 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1324 #else 1325 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1326 device_printf(dev, "Media supported: 10GbaseKR\n"); 1327 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1328 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1329 } 1330 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1331 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1332 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1333 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1334 } 1335 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1336 device_printf(dev, "Media supported: 1000baseKX\n"); 1337 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1338 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1339 } 1340 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1341 device_printf(dev, "Media supported: 2500baseKX\n"); 1342 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1343 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1344 } 1345 #endif 1346 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1347 device_printf(dev, "Media supported: 1000baseBX\n"); 1348 1349 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1350 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1351 0, NULL); 1352 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1353 } 1354 1355 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1356 } /* ixgbe_add_media_types */ 1357 1358 /************************************************************************ 1359 * ixgbe_is_sfp 1360 ************************************************************************/ 1361 static inline bool 1362 ixgbe_is_sfp(struct ixgbe_hw *hw) 1363 { 1364 switch (hw->mac.type) { 1365 case ixgbe_mac_82598EB: 1366 if (hw->phy.type == ixgbe_phy_nl) 1367 return (true); 1368 return (false); 1369 case ixgbe_mac_82599EB: 1370 switch (hw->mac.ops.get_media_type(hw)) { 1371 case ixgbe_media_type_fiber: 1372 case ixgbe_media_type_fiber_qsfp: 1373 return (true); 1374 default: 1375 return (false); 1376 } 1377 case ixgbe_mac_X550EM_x: 1378 case ixgbe_mac_X550EM_a: 1379 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1380 return (true); 1381 return (false); 1382 default: 1383 return (false); 1384 } 1385 } /* ixgbe_is_sfp */ 1386 1387 /************************************************************************ 1388 * ixgbe_config_link 1389 ************************************************************************/ 1390 static void 1391 ixgbe_config_link(if_ctx_t ctx) 1392 { 1393 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1394 struct ixgbe_hw *hw = &sc->hw; 1395 u32 autoneg, err = 0; 1396 bool sfp, negotiate; 1397 1398 sfp = ixgbe_is_sfp(hw); 1399 1400 if (sfp) { 1401 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 1402 iflib_admin_intr_deferred(ctx); 1403 } else { 1404 if (hw->mac.ops.check_link) 1405 err = ixgbe_check_link(hw, &sc->link_speed, 1406 &sc->link_up, false); 1407 if (err) 1408 return; 1409 autoneg = hw->phy.autoneg_advertised; 1410 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1411 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1412 &negotiate); 1413 if (err) 1414 return; 1415 1416 if (hw->mac.type == ixgbe_mac_X550 && 1417 hw->phy.autoneg_advertised == 0) { 1418 /* 1419 * 2.5G and 5G autonegotiation speeds on X550 1420 * are disabled by default due to reported 1421 * interoperability issues with some switches. 1422 * 1423 * The second condition checks if any operations 1424 * involving setting autonegotiation speeds have 1425 * been performed prior to this ixgbe_config_link() 1426 * call. 1427 * 1428 * If hw->phy.autoneg_advertised does not 1429 * equal 0, this means that the user might have 1430 * set autonegotiation speeds via the sysctl 1431 * before bringing the interface up. In this 1432 * case, we should not disable 2.5G and 5G 1433 * since that speeds might be selected by the 1434 * user. 1435 * 1436 * Otherwise (i.e. if hw->phy.autoneg_advertised 1437 * is set to 0), it is the first time we set 1438 * autonegotiation preferences and the default 1439 * set of speeds should exclude 2.5G and 5G. 1440 */ 1441 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 1442 IXGBE_LINK_SPEED_5GB_FULL); 1443 } 1444 1445 if (hw->mac.ops.setup_link) 1446 err = hw->mac.ops.setup_link(hw, autoneg, 1447 sc->link_up); 1448 } 1449 } /* ixgbe_config_link */ 1450 1451 /************************************************************************ 1452 * ixgbe_update_stats_counters - Update board statistics counters. 1453 ************************************************************************/ 1454 static void 1455 ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1456 { 1457 struct ixgbe_hw *hw = &sc->hw; 1458 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1459 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1460 u32 lxoffrxc; 1461 u64 total_missed_rx = 0; 1462 1463 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1464 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1465 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1466 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1467 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1468 1469 for (int i = 0; i < 16; i++) { 1470 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1471 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1472 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1473 } 1474 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1475 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1476 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1477 1478 /* Hardware workaround, gprc counts missed packets */ 1479 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1480 stats->gprc -= missed_rx; 1481 1482 if (hw->mac.type != ixgbe_mac_82598EB) { 1483 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1484 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1485 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1486 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1487 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1488 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1489 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1490 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1491 stats->lxoffrxc += lxoffrxc; 1492 } else { 1493 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1494 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1495 stats->lxoffrxc += lxoffrxc; 1496 /* 82598 only has a counter in the high register */ 1497 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1498 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1499 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1500 } 1501 1502 /* 1503 * For watchdog management we need to know if we have been paused 1504 * during the last interval, so capture that here. 1505 */ 1506 if (lxoffrxc) 1507 sc->shared->isc_pause_frames = 1; 1508 1509 /* 1510 * Workaround: mprc hardware is incorrectly counting 1511 * broadcasts, so for now we subtract those. 1512 */ 1513 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1514 stats->bprc += bprc; 1515 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1516 if (hw->mac.type == ixgbe_mac_82598EB) 1517 stats->mprc -= bprc; 1518 1519 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1520 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1521 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1522 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1523 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1524 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1525 1526 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1527 stats->lxontxc += lxon; 1528 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1529 stats->lxofftxc += lxoff; 1530 total = lxon + lxoff; 1531 1532 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1533 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1534 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1535 stats->gptc -= total; 1536 stats->mptc -= total; 1537 stats->ptc64 -= total; 1538 stats->gotc -= total * ETHER_MIN_LEN; 1539 1540 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1541 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1542 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1543 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1544 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1545 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1546 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1547 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1548 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1549 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1550 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1551 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1552 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1553 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1554 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1555 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1556 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1557 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1558 /* Only read FCOE on 82599 */ 1559 if (hw->mac.type != ixgbe_mac_82598EB) { 1560 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1561 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1562 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1563 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1564 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1565 } 1566 1567 /* Fill out the OS statistics structure */ 1568 IXGBE_SET_IPACKETS(sc, stats->gprc); 1569 IXGBE_SET_OPACKETS(sc, stats->gptc); 1570 IXGBE_SET_IBYTES(sc, stats->gorc); 1571 IXGBE_SET_OBYTES(sc, stats->gotc); 1572 IXGBE_SET_IMCASTS(sc, stats->mprc); 1573 IXGBE_SET_OMCASTS(sc, stats->mptc); 1574 IXGBE_SET_COLLISIONS(sc, 0); 1575 IXGBE_SET_IQDROPS(sc, total_missed_rx); 1576 1577 /* 1578 * Aggregate following types of errors as RX errors: 1579 * - CRC error count, 1580 * - illegal byte error count, 1581 * - checksum error count, 1582 * - missed packets count, 1583 * - length error count, 1584 * - undersized packets count, 1585 * - fragmented packets count, 1586 * - oversized packets count, 1587 * - jabber count. 1588 */ 1589 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + stats->xec + 1590 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + 1591 stats->rjc); 1592 } /* ixgbe_update_stats_counters */ 1593 1594 /************************************************************************ 1595 * ixgbe_add_hw_stats 1596 * 1597 * Add sysctl variables, one per statistic, to the system. 1598 ************************************************************************/ 1599 static void 1600 ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1601 { 1602 device_t dev = iflib_get_dev(sc->ctx); 1603 struct ix_rx_queue *rx_que; 1604 struct ix_tx_queue *tx_que; 1605 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1606 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1607 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1608 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1609 struct sysctl_oid *stat_node, *queue_node; 1610 struct sysctl_oid_list *stat_list, *queue_list; 1611 int i; 1612 1613 #define QUEUE_NAME_LEN 32 1614 char namebuf[QUEUE_NAME_LEN]; 1615 1616 /* Driver Statistics */ 1617 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1618 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); 1619 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1620 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1621 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1622 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1623 1624 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 1625 struct tx_ring *txr = &tx_que->txr; 1626 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1627 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1628 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1629 queue_list = SYSCTL_CHILDREN(queue_node); 1630 1631 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1632 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1633 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1634 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1635 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1636 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1637 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1638 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1639 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1640 CTLFLAG_RD, &txr->total_packets, 1641 "Queue Packets Transmitted"); 1642 } 1643 1644 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 1645 struct rx_ring *rxr = &rx_que->rxr; 1646 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1647 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1648 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1649 queue_list = SYSCTL_CHILDREN(queue_node); 1650 1651 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1652 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 1653 &sc->rx_queues[i], 0, 1654 ixgbe_sysctl_interrupt_rate_handler, "IU", 1655 "Interrupt Rate"); 1656 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1657 CTLFLAG_RD, &(sc->rx_queues[i].irqs), 1658 "irqs on this queue"); 1659 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1660 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1661 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1662 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1663 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1664 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1665 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1666 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1667 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1668 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1669 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1670 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1671 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1672 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1673 } 1674 1675 /* MAC stats get their own sub node */ 1676 1677 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1678 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); 1679 stat_list = SYSCTL_CHILDREN(stat_node); 1680 1681 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", 1682 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); 1683 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1684 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1685 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1686 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1687 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1688 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1690 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1692 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1694 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1696 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1698 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1699 1700 /* Flow Control stats */ 1701 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1702 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1704 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1706 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1707 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1708 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1709 1710 /* Packet Reception Stats */ 1711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1712 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1714 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1716 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1718 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1720 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1722 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1724 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1726 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1728 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1730 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1732 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1734 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1736 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1738 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1740 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1742 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1744 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1746 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1747 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1748 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1749 1750 /* Packet Transmission Stats */ 1751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1752 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1753 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1754 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1755 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1756 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1757 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1758 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1759 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1760 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1761 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1762 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1763 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1764 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1765 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1766 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1767 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1768 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1769 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1770 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1771 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1772 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1773 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1774 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1775 } /* ixgbe_add_hw_stats */ 1776 1777 /************************************************************************ 1778 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1779 * 1780 * Retrieves the TDH value from the hardware 1781 ************************************************************************/ 1782 static int 1783 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1784 { 1785 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1786 int error; 1787 unsigned int val; 1788 1789 if (!txr) 1790 return (0); 1791 1792 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); 1793 error = sysctl_handle_int(oidp, &val, 0, req); 1794 if (error || !req->newptr) 1795 return error; 1796 1797 return (0); 1798 } /* ixgbe_sysctl_tdh_handler */ 1799 1800 /************************************************************************ 1801 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1802 * 1803 * Retrieves the TDT value from the hardware 1804 ************************************************************************/ 1805 static int 1806 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1807 { 1808 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1809 int error; 1810 unsigned int val; 1811 1812 if (!txr) 1813 return (0); 1814 1815 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); 1816 error = sysctl_handle_int(oidp, &val, 0, req); 1817 if (error || !req->newptr) 1818 return error; 1819 1820 return (0); 1821 } /* ixgbe_sysctl_tdt_handler */ 1822 1823 /************************************************************************ 1824 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1825 * 1826 * Retrieves the RDH value from the hardware 1827 ************************************************************************/ 1828 static int 1829 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1830 { 1831 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1832 int error; 1833 unsigned int val; 1834 1835 if (!rxr) 1836 return (0); 1837 1838 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); 1839 error = sysctl_handle_int(oidp, &val, 0, req); 1840 if (error || !req->newptr) 1841 return error; 1842 1843 return (0); 1844 } /* ixgbe_sysctl_rdh_handler */ 1845 1846 /************************************************************************ 1847 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1848 * 1849 * Retrieves the RDT value from the hardware 1850 ************************************************************************/ 1851 static int 1852 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1853 { 1854 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1855 int error; 1856 unsigned int val; 1857 1858 if (!rxr) 1859 return (0); 1860 1861 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); 1862 error = sysctl_handle_int(oidp, &val, 0, req); 1863 if (error || !req->newptr) 1864 return error; 1865 1866 return (0); 1867 } /* ixgbe_sysctl_rdt_handler */ 1868 1869 /************************************************************************ 1870 * ixgbe_if_vlan_register 1871 * 1872 * Run via vlan config EVENT, it enables us to use the 1873 * HW Filter table since we can get the vlan id. This 1874 * just creates the entry in the soft version of the 1875 * VFTA, init will repopulate the real table. 1876 ************************************************************************/ 1877 static void 1878 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1879 { 1880 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1881 u16 index, bit; 1882 1883 index = (vtag >> 5) & 0x7F; 1884 bit = vtag & 0x1F; 1885 sc->shadow_vfta[index] |= (1 << bit); 1886 ++sc->num_vlans; 1887 ixgbe_setup_vlan_hw_support(ctx); 1888 } /* ixgbe_if_vlan_register */ 1889 1890 /************************************************************************ 1891 * ixgbe_if_vlan_unregister 1892 * 1893 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1894 ************************************************************************/ 1895 static void 1896 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1897 { 1898 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1899 u16 index, bit; 1900 1901 index = (vtag >> 5) & 0x7F; 1902 bit = vtag & 0x1F; 1903 sc->shadow_vfta[index] &= ~(1 << bit); 1904 --sc->num_vlans; 1905 /* Re-init to load the changes */ 1906 ixgbe_setup_vlan_hw_support(ctx); 1907 } /* ixgbe_if_vlan_unregister */ 1908 1909 /************************************************************************ 1910 * ixgbe_setup_vlan_hw_support 1911 ************************************************************************/ 1912 static void 1913 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1914 { 1915 struct ifnet *ifp = iflib_get_ifp(ctx); 1916 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1917 struct ixgbe_hw *hw = &sc->hw; 1918 struct rx_ring *rxr; 1919 int i; 1920 u32 ctrl; 1921 1922 1923 /* 1924 * We get here thru init_locked, meaning 1925 * a soft reset, this has already cleared 1926 * the VFTA and other state, so if there 1927 * have been no vlan's registered do nothing. 1928 */ 1929 if (sc->num_vlans == 0) 1930 return; 1931 1932 /* Setup the queues for vlans */ 1933 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1934 for (i = 0; i < sc->num_rx_queues; i++) { 1935 rxr = &sc->rx_queues[i].rxr; 1936 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1937 if (hw->mac.type != ixgbe_mac_82598EB) { 1938 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1939 ctrl |= IXGBE_RXDCTL_VME; 1940 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1941 } 1942 rxr->vtag_strip = true; 1943 } 1944 } 1945 1946 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1947 return; 1948 /* 1949 * A soft reset zero's out the VFTA, so 1950 * we need to repopulate it now. 1951 */ 1952 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1953 if (sc->shadow_vfta[i] != 0) 1954 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1955 sc->shadow_vfta[i]); 1956 1957 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1958 /* Enable the Filter Table if enabled */ 1959 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1960 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1961 ctrl |= IXGBE_VLNCTRL_VFE; 1962 } 1963 if (hw->mac.type == ixgbe_mac_82598EB) 1964 ctrl |= IXGBE_VLNCTRL_VME; 1965 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1966 } /* ixgbe_setup_vlan_hw_support */ 1967 1968 /************************************************************************ 1969 * ixgbe_get_slot_info 1970 * 1971 * Get the width and transaction speed of 1972 * the slot this adapter is plugged into. 1973 ************************************************************************/ 1974 static void 1975 ixgbe_get_slot_info(struct ixgbe_softc *sc) 1976 { 1977 device_t dev = iflib_get_dev(sc->ctx); 1978 struct ixgbe_hw *hw = &sc->hw; 1979 int bus_info_valid = true; 1980 u32 offset; 1981 u16 link; 1982 1983 /* Some devices are behind an internal bridge */ 1984 switch (hw->device_id) { 1985 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1986 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1987 goto get_parent_info; 1988 default: 1989 break; 1990 } 1991 1992 ixgbe_get_bus_info(hw); 1993 1994 /* 1995 * Some devices don't use PCI-E, but there is no need 1996 * to display "Unknown" for bus speed and width. 1997 */ 1998 switch (hw->mac.type) { 1999 case ixgbe_mac_X550EM_x: 2000 case ixgbe_mac_X550EM_a: 2001 return; 2002 default: 2003 goto display; 2004 } 2005 2006 get_parent_info: 2007 /* 2008 * For the Quad port adapter we need to parse back 2009 * up the PCI tree to find the speed of the expansion 2010 * slot into which this adapter is plugged. A bit more work. 2011 */ 2012 dev = device_get_parent(device_get_parent(dev)); 2013 #ifdef IXGBE_DEBUG 2014 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2015 pci_get_slot(dev), pci_get_function(dev)); 2016 #endif 2017 dev = device_get_parent(device_get_parent(dev)); 2018 #ifdef IXGBE_DEBUG 2019 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2020 pci_get_slot(dev), pci_get_function(dev)); 2021 #endif 2022 /* Now get the PCI Express Capabilities offset */ 2023 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 2024 /* 2025 * Hmm...can't get PCI-Express capabilities. 2026 * Falling back to default method. 2027 */ 2028 bus_info_valid = false; 2029 ixgbe_get_bus_info(hw); 2030 goto display; 2031 } 2032 /* ...and read the Link Status Register */ 2033 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2034 ixgbe_set_pci_config_data_generic(hw, link); 2035 2036 display: 2037 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2038 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2039 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2040 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2041 "Unknown"), 2042 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 2043 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 2044 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 2045 "Unknown")); 2046 2047 if (bus_info_valid) { 2048 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2049 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2050 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2051 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2052 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 2053 } 2054 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2055 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2056 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2057 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2058 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 2059 } 2060 } else 2061 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2062 2063 return; 2064 } /* ixgbe_get_slot_info */ 2065 2066 /************************************************************************ 2067 * ixgbe_if_msix_intr_assign 2068 * 2069 * Setup MSI-X Interrupt resources and handlers 2070 ************************************************************************/ 2071 static int 2072 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2073 { 2074 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2075 struct ix_rx_queue *rx_que = sc->rx_queues; 2076 struct ix_tx_queue *tx_que; 2077 int error, rid, vector = 0; 2078 int cpu_id = 0; 2079 char buf[16]; 2080 2081 /* Admin Que is vector 0*/ 2082 rid = vector + 1; 2083 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 2084 rid = vector + 1; 2085 2086 snprintf(buf, sizeof(buf), "rxq%d", i); 2087 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2088 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2089 2090 if (error) { 2091 device_printf(iflib_get_dev(ctx), 2092 "Failed to allocate que int %d err: %d", i, error); 2093 sc->num_rx_queues = i + 1; 2094 goto fail; 2095 } 2096 2097 rx_que->msix = vector; 2098 if (sc->feat_en & IXGBE_FEATURE_RSS) { 2099 /* 2100 * The queue ID is used as the RSS layer bucket ID. 2101 * We look up the queue ID -> RSS CPU ID and select 2102 * that. 2103 */ 2104 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2105 } else { 2106 /* 2107 * Bind the MSI-X vector, and thus the 2108 * rings to the corresponding cpu. 2109 * 2110 * This just happens to match the default RSS 2111 * round-robin bucket -> queue -> CPU allocation. 2112 */ 2113 if (sc->num_rx_queues > 1) 2114 cpu_id = i; 2115 } 2116 2117 } 2118 for (int i = 0; i < sc->num_tx_queues; i++) { 2119 snprintf(buf, sizeof(buf), "txq%d", i); 2120 tx_que = &sc->tx_queues[i]; 2121 tx_que->msix = i % sc->num_rx_queues; 2122 iflib_softirq_alloc_generic(ctx, 2123 &sc->rx_queues[tx_que->msix].que_irq, 2124 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2125 } 2126 rid = vector + 1; 2127 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 2128 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq"); 2129 if (error) { 2130 device_printf(iflib_get_dev(ctx), 2131 "Failed to register admin handler"); 2132 return (error); 2133 } 2134 2135 sc->vector = vector; 2136 2137 return (0); 2138 fail: 2139 iflib_irq_free(ctx, &sc->irq); 2140 rx_que = sc->rx_queues; 2141 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 2142 iflib_irq_free(ctx, &rx_que->que_irq); 2143 2144 return (error); 2145 } /* ixgbe_if_msix_intr_assign */ 2146 2147 static inline void 2148 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que) 2149 { 2150 uint32_t newitr = 0; 2151 struct rx_ring *rxr = &que->rxr; 2152 2153 /* 2154 * Do Adaptive Interrupt Moderation: 2155 * - Write out last calculated setting 2156 * - Calculate based on average size over 2157 * the last interval. 2158 */ 2159 if (que->eitr_setting) { 2160 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), 2161 que->eitr_setting); 2162 } 2163 2164 que->eitr_setting = 0; 2165 /* Idle, do nothing */ 2166 if (rxr->bytes == 0) { 2167 return; 2168 } 2169 2170 if ((rxr->bytes) && (rxr->packets)) { 2171 newitr = (rxr->bytes / rxr->packets); 2172 } 2173 2174 newitr += 24; /* account for hardware frame, crc */ 2175 /* set an upper boundary */ 2176 newitr = min(newitr, 3000); 2177 2178 /* Be nice to the mid range */ 2179 if ((newitr > 300) && (newitr < 1200)) { 2180 newitr = (newitr / 3); 2181 } else { 2182 newitr = (newitr / 2); 2183 } 2184 2185 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2186 newitr |= newitr << 16; 2187 } else { 2188 newitr |= IXGBE_EITR_CNT_WDIS; 2189 } 2190 2191 /* save for next interrupt */ 2192 que->eitr_setting = newitr; 2193 2194 /* Reset state */ 2195 rxr->bytes = 0; 2196 rxr->packets = 0; 2197 2198 return; 2199 } 2200 2201 /********************************************************************* 2202 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2203 **********************************************************************/ 2204 static int 2205 ixgbe_msix_que(void *arg) 2206 { 2207 struct ix_rx_queue *que = arg; 2208 struct ixgbe_softc *sc = que->sc; 2209 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx); 2210 2211 /* Protect against spurious interrupts */ 2212 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2213 return (FILTER_HANDLED); 2214 2215 ixgbe_disable_queue(sc, que->msix); 2216 ++que->irqs; 2217 2218 /* Check for AIM */ 2219 if (sc->enable_aim) { 2220 ixgbe_perform_aim(sc, que); 2221 } 2222 2223 return (FILTER_SCHEDULE_THREAD); 2224 } /* ixgbe_msix_que */ 2225 2226 /************************************************************************ 2227 * ixgbe_media_status - Media Ioctl callback 2228 * 2229 * Called whenever the user queries the status of 2230 * the interface using ifconfig. 2231 ************************************************************************/ 2232 static void 2233 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2234 { 2235 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2236 struct ixgbe_hw *hw = &sc->hw; 2237 int layer; 2238 2239 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2240 2241 ifmr->ifm_status = IFM_AVALID; 2242 ifmr->ifm_active = IFM_ETHER; 2243 2244 if (!sc->link_active) 2245 return; 2246 2247 ifmr->ifm_status |= IFM_ACTIVE; 2248 layer = sc->phy_layer; 2249 2250 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2251 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2252 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2253 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2254 switch (sc->link_speed) { 2255 case IXGBE_LINK_SPEED_10GB_FULL: 2256 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2257 break; 2258 case IXGBE_LINK_SPEED_1GB_FULL: 2259 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2260 break; 2261 case IXGBE_LINK_SPEED_100_FULL: 2262 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2263 break; 2264 case IXGBE_LINK_SPEED_10_FULL: 2265 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2266 break; 2267 } 2268 if (hw->mac.type == ixgbe_mac_X550) 2269 switch (sc->link_speed) { 2270 case IXGBE_LINK_SPEED_5GB_FULL: 2271 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2272 break; 2273 case IXGBE_LINK_SPEED_2_5GB_FULL: 2274 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2275 break; 2276 } 2277 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2278 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2279 switch (sc->link_speed) { 2280 case IXGBE_LINK_SPEED_10GB_FULL: 2281 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2282 break; 2283 } 2284 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2285 switch (sc->link_speed) { 2286 case IXGBE_LINK_SPEED_10GB_FULL: 2287 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2288 break; 2289 case IXGBE_LINK_SPEED_1GB_FULL: 2290 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2291 break; 2292 } 2293 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2294 switch (sc->link_speed) { 2295 case IXGBE_LINK_SPEED_10GB_FULL: 2296 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2297 break; 2298 case IXGBE_LINK_SPEED_1GB_FULL: 2299 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2300 break; 2301 } 2302 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2303 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2304 switch (sc->link_speed) { 2305 case IXGBE_LINK_SPEED_10GB_FULL: 2306 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2307 break; 2308 case IXGBE_LINK_SPEED_1GB_FULL: 2309 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2310 break; 2311 } 2312 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2313 switch (sc->link_speed) { 2314 case IXGBE_LINK_SPEED_10GB_FULL: 2315 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2316 break; 2317 } 2318 /* 2319 * XXX: These need to use the proper media types once 2320 * they're added. 2321 */ 2322 #ifndef IFM_ETH_XTYPE 2323 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2324 switch (sc->link_speed) { 2325 case IXGBE_LINK_SPEED_10GB_FULL: 2326 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2327 break; 2328 case IXGBE_LINK_SPEED_2_5GB_FULL: 2329 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2330 break; 2331 case IXGBE_LINK_SPEED_1GB_FULL: 2332 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2333 break; 2334 } 2335 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2336 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2337 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2338 switch (sc->link_speed) { 2339 case IXGBE_LINK_SPEED_10GB_FULL: 2340 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2341 break; 2342 case IXGBE_LINK_SPEED_2_5GB_FULL: 2343 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2344 break; 2345 case IXGBE_LINK_SPEED_1GB_FULL: 2346 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2347 break; 2348 } 2349 #else 2350 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2351 switch (sc->link_speed) { 2352 case IXGBE_LINK_SPEED_10GB_FULL: 2353 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2354 break; 2355 case IXGBE_LINK_SPEED_2_5GB_FULL: 2356 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2357 break; 2358 case IXGBE_LINK_SPEED_1GB_FULL: 2359 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2360 break; 2361 } 2362 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2363 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2364 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2365 switch (sc->link_speed) { 2366 case IXGBE_LINK_SPEED_10GB_FULL: 2367 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2368 break; 2369 case IXGBE_LINK_SPEED_2_5GB_FULL: 2370 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2371 break; 2372 case IXGBE_LINK_SPEED_1GB_FULL: 2373 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2374 break; 2375 } 2376 #endif 2377 2378 /* If nothing is recognized... */ 2379 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2380 ifmr->ifm_active |= IFM_UNKNOWN; 2381 2382 /* Display current flow control setting used on link */ 2383 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2384 hw->fc.current_mode == ixgbe_fc_full) 2385 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2386 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2387 hw->fc.current_mode == ixgbe_fc_full) 2388 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2389 } /* ixgbe_media_status */ 2390 2391 /************************************************************************ 2392 * ixgbe_media_change - Media Ioctl callback 2393 * 2394 * Called when the user changes speed/duplex using 2395 * media/mediopt option with ifconfig. 2396 ************************************************************************/ 2397 static int 2398 ixgbe_if_media_change(if_ctx_t ctx) 2399 { 2400 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2401 struct ifmedia *ifm = iflib_get_media(ctx); 2402 struct ixgbe_hw *hw = &sc->hw; 2403 ixgbe_link_speed speed = 0; 2404 2405 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2406 2407 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2408 return (EINVAL); 2409 2410 if (hw->phy.media_type == ixgbe_media_type_backplane) 2411 return (EPERM); 2412 2413 /* 2414 * We don't actually need to check against the supported 2415 * media types of the adapter; ifmedia will take care of 2416 * that for us. 2417 */ 2418 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2419 case IFM_AUTO: 2420 case IFM_10G_T: 2421 speed |= IXGBE_LINK_SPEED_100_FULL; 2422 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2423 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2424 break; 2425 case IFM_10G_LRM: 2426 case IFM_10G_LR: 2427 #ifndef IFM_ETH_XTYPE 2428 case IFM_10G_SR: /* KR, too */ 2429 case IFM_10G_CX4: /* KX4 */ 2430 #else 2431 case IFM_10G_KR: 2432 case IFM_10G_KX4: 2433 #endif 2434 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2435 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2436 break; 2437 #ifndef IFM_ETH_XTYPE 2438 case IFM_1000_CX: /* KX */ 2439 #else 2440 case IFM_1000_KX: 2441 #endif 2442 case IFM_1000_LX: 2443 case IFM_1000_SX: 2444 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2445 break; 2446 case IFM_1000_T: 2447 speed |= IXGBE_LINK_SPEED_100_FULL; 2448 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2449 break; 2450 case IFM_10G_TWINAX: 2451 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2452 break; 2453 case IFM_5000_T: 2454 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2455 break; 2456 case IFM_2500_T: 2457 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2458 break; 2459 case IFM_100_TX: 2460 speed |= IXGBE_LINK_SPEED_100_FULL; 2461 break; 2462 case IFM_10_T: 2463 speed |= IXGBE_LINK_SPEED_10_FULL; 2464 break; 2465 default: 2466 goto invalid; 2467 } 2468 2469 hw->mac.autotry_restart = true; 2470 hw->mac.ops.setup_link(hw, speed, true); 2471 sc->advertise = 2472 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 2473 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 2474 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 2475 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 2476 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 2477 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 2478 2479 return (0); 2480 2481 invalid: 2482 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2483 2484 return (EINVAL); 2485 } /* ixgbe_if_media_change */ 2486 2487 /************************************************************************ 2488 * ixgbe_set_promisc 2489 ************************************************************************/ 2490 static int 2491 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2492 { 2493 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2494 struct ifnet *ifp = iflib_get_ifp(ctx); 2495 u32 rctl; 2496 int mcnt = 0; 2497 2498 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 2499 rctl &= (~IXGBE_FCTRL_UPE); 2500 if (ifp->if_flags & IFF_ALLMULTI) 2501 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2502 else { 2503 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2504 } 2505 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2506 rctl &= (~IXGBE_FCTRL_MPE); 2507 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2508 2509 if (ifp->if_flags & IFF_PROMISC) { 2510 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2511 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2512 } else if (ifp->if_flags & IFF_ALLMULTI) { 2513 rctl |= IXGBE_FCTRL_MPE; 2514 rctl &= ~IXGBE_FCTRL_UPE; 2515 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2516 } 2517 return (0); 2518 } /* ixgbe_if_promisc_set */ 2519 2520 /************************************************************************ 2521 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2522 ************************************************************************/ 2523 static int 2524 ixgbe_msix_link(void *arg) 2525 { 2526 struct ixgbe_softc *sc = arg; 2527 struct ixgbe_hw *hw = &sc->hw; 2528 u32 eicr, eicr_mask; 2529 s32 retval; 2530 2531 ++sc->link_irq; 2532 2533 /* Pause other interrupts */ 2534 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2535 2536 /* First get the cause */ 2537 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2538 /* Be sure the queue bits are not cleared */ 2539 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2540 /* Clear interrupt with write */ 2541 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2542 2543 /* Link status change */ 2544 if (eicr & IXGBE_EICR_LSC) { 2545 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2546 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; 2547 } 2548 2549 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 2550 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 2551 (eicr & IXGBE_EICR_FLOW_DIR)) { 2552 /* This is probably overkill :) */ 2553 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) 2554 return (FILTER_HANDLED); 2555 /* Disable the interrupt */ 2556 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2557 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2558 } else 2559 if (eicr & IXGBE_EICR_ECC) { 2560 device_printf(iflib_get_dev(sc->ctx), 2561 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); 2562 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2563 } 2564 2565 /* Check for over temp condition */ 2566 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2567 switch (sc->hw.mac.type) { 2568 case ixgbe_mac_X550EM_a: 2569 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2570 break; 2571 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2572 IXGBE_EICR_GPI_SDP0_X550EM_a); 2573 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2574 IXGBE_EICR_GPI_SDP0_X550EM_a); 2575 retval = hw->phy.ops.check_overtemp(hw); 2576 if (retval != IXGBE_ERR_OVERTEMP) 2577 break; 2578 device_printf(iflib_get_dev(sc->ctx), 2579 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2580 device_printf(iflib_get_dev(sc->ctx), 2581 "System shutdown required!\n"); 2582 break; 2583 default: 2584 if (!(eicr & IXGBE_EICR_TS)) 2585 break; 2586 retval = hw->phy.ops.check_overtemp(hw); 2587 if (retval != IXGBE_ERR_OVERTEMP) 2588 break; 2589 device_printf(iflib_get_dev(sc->ctx), 2590 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2591 device_printf(iflib_get_dev(sc->ctx), 2592 "System shutdown required!\n"); 2593 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2594 break; 2595 } 2596 } 2597 2598 /* Check for VF message */ 2599 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 2600 (eicr & IXGBE_EICR_MAILBOX)) 2601 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; 2602 } 2603 2604 if (ixgbe_is_sfp(hw)) { 2605 /* Pluggable optics-related interrupt */ 2606 if (hw->mac.type >= ixgbe_mac_X540) 2607 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2608 else 2609 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2610 2611 if (eicr & eicr_mask) { 2612 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2613 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 2614 } 2615 2616 if ((hw->mac.type == ixgbe_mac_82599EB) && 2617 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2618 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2619 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2620 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 2621 } 2622 } 2623 2624 /* Check for fan failure */ 2625 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2626 ixgbe_check_fan_failure(sc, eicr, true); 2627 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2628 } 2629 2630 /* External PHY interrupt */ 2631 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2632 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2633 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2634 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 2635 } 2636 2637 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2638 } /* ixgbe_msix_link */ 2639 2640 /************************************************************************ 2641 * ixgbe_sysctl_interrupt_rate_handler 2642 ************************************************************************/ 2643 static int 2644 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2645 { 2646 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2647 int error; 2648 unsigned int reg, usec, rate; 2649 2650 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); 2651 usec = ((reg & 0x0FF8) >> 3); 2652 if (usec > 0) 2653 rate = 500000 / usec; 2654 else 2655 rate = 0; 2656 error = sysctl_handle_int(oidp, &rate, 0, req); 2657 if (error || !req->newptr) 2658 return error; 2659 reg &= ~0xfff; /* default, no limitation */ 2660 ixgbe_max_interrupt_rate = 0; 2661 if (rate > 0 && rate < 500000) { 2662 if (rate < 1000) 2663 rate = 1000; 2664 ixgbe_max_interrupt_rate = rate; 2665 reg |= ((4000000/rate) & 0xff8); 2666 } 2667 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); 2668 2669 return (0); 2670 } /* ixgbe_sysctl_interrupt_rate_handler */ 2671 2672 /************************************************************************ 2673 * ixgbe_add_device_sysctls 2674 ************************************************************************/ 2675 static void 2676 ixgbe_add_device_sysctls(if_ctx_t ctx) 2677 { 2678 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2679 device_t dev = iflib_get_dev(ctx); 2680 struct ixgbe_hw *hw = &sc->hw; 2681 struct sysctl_oid_list *child; 2682 struct sysctl_ctx_list *ctx_list; 2683 2684 ctx_list = device_get_sysctl_ctx(dev); 2685 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2686 2687 /* Sysctls for all devices */ 2688 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2689 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2690 sc, 0, ixgbe_sysctl_flowcntl, "I", 2691 IXGBE_SYSCTL_DESC_SET_FC); 2692 2693 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2694 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2695 sc, 0, ixgbe_sysctl_advertise, "I", 2696 IXGBE_SYSCTL_DESC_ADV_SPEED); 2697 2698 sc->enable_aim = ixgbe_enable_aim; 2699 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2700 &sc->enable_aim, 0, "Interrupt Moderation"); 2701 2702 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", 2703 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2704 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); 2705 2706 #ifdef IXGBE_DEBUG 2707 /* testing sysctls (for all devices) */ 2708 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2709 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2710 sc, 0, ixgbe_sysctl_power_state, 2711 "I", "PCI Power State"); 2712 2713 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2714 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2715 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2716 #endif 2717 /* for X550 series devices */ 2718 if (hw->mac.type >= ixgbe_mac_X550) 2719 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2720 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2721 sc, 0, ixgbe_sysctl_dmac, 2722 "I", "DMA Coalesce"); 2723 2724 /* for WoL-capable devices */ 2725 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2726 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2727 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2728 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2729 2730 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2731 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2732 sc, 0, ixgbe_sysctl_wufc, 2733 "I", "Enable/Disable Wake Up Filters"); 2734 } 2735 2736 /* for X552/X557-AT devices */ 2737 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2738 struct sysctl_oid *phy_node; 2739 struct sysctl_oid_list *phy_list; 2740 2741 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2742 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); 2743 phy_list = SYSCTL_CHILDREN(phy_node); 2744 2745 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2746 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2747 sc, 0, ixgbe_sysctl_phy_temp, 2748 "I", "Current External PHY Temperature (Celsius)"); 2749 2750 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2751 "overtemp_occurred", 2752 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2753 ixgbe_sysctl_phy_overtemp_occurred, "I", 2754 "External PHY High Temperature Event Occurred"); 2755 } 2756 2757 if (sc->feat_cap & IXGBE_FEATURE_EEE) { 2758 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2759 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2760 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2761 } 2762 } /* ixgbe_add_device_sysctls */ 2763 2764 /************************************************************************ 2765 * ixgbe_allocate_pci_resources 2766 ************************************************************************/ 2767 static int 2768 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2769 { 2770 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2771 device_t dev = iflib_get_dev(ctx); 2772 int rid; 2773 2774 rid = PCIR_BAR(0); 2775 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2776 RF_ACTIVE); 2777 2778 if (!(sc->pci_mem)) { 2779 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2780 return (ENXIO); 2781 } 2782 2783 /* Save bus_space values for READ/WRITE_REG macros */ 2784 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 2785 sc->osdep.mem_bus_space_handle = 2786 rman_get_bushandle(sc->pci_mem); 2787 /* Set hw values for shared code */ 2788 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 2789 2790 return (0); 2791 } /* ixgbe_allocate_pci_resources */ 2792 2793 /************************************************************************ 2794 * ixgbe_detach - Device removal routine 2795 * 2796 * Called when the driver is being removed. 2797 * Stops the adapter and deallocates all the resources 2798 * that were allocated for driver operation. 2799 * 2800 * return 0 on success, positive on failure 2801 ************************************************************************/ 2802 static int 2803 ixgbe_if_detach(if_ctx_t ctx) 2804 { 2805 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2806 device_t dev = iflib_get_dev(ctx); 2807 u32 ctrl_ext; 2808 2809 INIT_DEBUGOUT("ixgbe_detach: begin"); 2810 2811 if (ixgbe_pci_iov_detach(dev) != 0) { 2812 device_printf(dev, "SR-IOV in use; detach first.\n"); 2813 return (EBUSY); 2814 } 2815 2816 ixgbe_setup_low_power_mode(ctx); 2817 2818 /* let hardware know driver is unloading */ 2819 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 2820 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2821 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 2822 2823 ixgbe_free_pci_resources(ctx); 2824 free(sc->mta, M_IXGBE); 2825 2826 return (0); 2827 } /* ixgbe_if_detach */ 2828 2829 /************************************************************************ 2830 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2831 * 2832 * Prepare the adapter/port for LPLU and/or WoL 2833 ************************************************************************/ 2834 static int 2835 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2836 { 2837 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2838 struct ixgbe_hw *hw = &sc->hw; 2839 device_t dev = iflib_get_dev(ctx); 2840 s32 error = 0; 2841 2842 if (!hw->wol_enabled) 2843 ixgbe_set_phy_power(hw, false); 2844 2845 /* Limit power management flow to X550EM baseT */ 2846 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2847 hw->phy.ops.enter_lplu) { 2848 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2849 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2850 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2851 2852 /* 2853 * Clear Wake Up Status register to prevent any previous wakeup 2854 * events from waking us up immediately after we suspend. 2855 */ 2856 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2857 2858 /* 2859 * Program the Wakeup Filter Control register with user filter 2860 * settings 2861 */ 2862 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 2863 2864 /* Enable wakeups and power management in Wakeup Control */ 2865 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2866 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2867 2868 /* X550EM baseT adapters need a special LPLU flow */ 2869 hw->phy.reset_disable = true; 2870 ixgbe_if_stop(ctx); 2871 error = hw->phy.ops.enter_lplu(hw); 2872 if (error) 2873 device_printf(dev, "Error entering LPLU: %d\n", error); 2874 hw->phy.reset_disable = false; 2875 } else { 2876 /* Just stop for other adapters */ 2877 ixgbe_if_stop(ctx); 2878 } 2879 2880 return error; 2881 } /* ixgbe_setup_low_power_mode */ 2882 2883 /************************************************************************ 2884 * ixgbe_shutdown - Shutdown entry point 2885 ************************************************************************/ 2886 static int 2887 ixgbe_if_shutdown(if_ctx_t ctx) 2888 { 2889 int error = 0; 2890 2891 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2892 2893 error = ixgbe_setup_low_power_mode(ctx); 2894 2895 return (error); 2896 } /* ixgbe_if_shutdown */ 2897 2898 /************************************************************************ 2899 * ixgbe_suspend 2900 * 2901 * From D0 to D3 2902 ************************************************************************/ 2903 static int 2904 ixgbe_if_suspend(if_ctx_t ctx) 2905 { 2906 int error = 0; 2907 2908 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2909 2910 error = ixgbe_setup_low_power_mode(ctx); 2911 2912 return (error); 2913 } /* ixgbe_if_suspend */ 2914 2915 /************************************************************************ 2916 * ixgbe_resume 2917 * 2918 * From D3 to D0 2919 ************************************************************************/ 2920 static int 2921 ixgbe_if_resume(if_ctx_t ctx) 2922 { 2923 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2924 device_t dev = iflib_get_dev(ctx); 2925 struct ifnet *ifp = iflib_get_ifp(ctx); 2926 struct ixgbe_hw *hw = &sc->hw; 2927 u32 wus; 2928 2929 INIT_DEBUGOUT("ixgbe_resume: begin"); 2930 2931 /* Read & clear WUS register */ 2932 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2933 if (wus) 2934 device_printf(dev, "Woken up by (WUS): %#010x\n", 2935 IXGBE_READ_REG(hw, IXGBE_WUS)); 2936 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2937 /* And clear WUFC until next low-power transition */ 2938 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2939 2940 /* 2941 * Required after D3->D0 transition; 2942 * will re-advertise all previous advertised speeds 2943 */ 2944 if (ifp->if_flags & IFF_UP) 2945 ixgbe_if_init(ctx); 2946 2947 return (0); 2948 } /* ixgbe_if_resume */ 2949 2950 /************************************************************************ 2951 * ixgbe_if_mtu_set - Ioctl mtu entry point 2952 * 2953 * Return 0 on success, EINVAL on failure 2954 ************************************************************************/ 2955 static int 2956 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2957 { 2958 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2959 int error = 0; 2960 2961 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2962 2963 if (mtu > IXGBE_MAX_MTU) { 2964 error = EINVAL; 2965 } else { 2966 sc->max_frame_size = mtu + IXGBE_MTU_HDR; 2967 } 2968 2969 return error; 2970 } /* ixgbe_if_mtu_set */ 2971 2972 /************************************************************************ 2973 * ixgbe_if_crcstrip_set 2974 ************************************************************************/ 2975 static void 2976 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2977 { 2978 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2979 struct ixgbe_hw *hw = &sc->hw; 2980 /* crc stripping is set in two places: 2981 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2982 * IXGBE_RDRXCTL (set by the original driver in 2983 * ixgbe_setup_hw_rsc() called in init_locked. 2984 * We disable the setting when netmap is compiled in). 2985 * We update the values here, but also in ixgbe.c because 2986 * init_locked sometimes is called outside our control. 2987 */ 2988 uint32_t hl, rxc; 2989 2990 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2991 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2992 #ifdef NETMAP 2993 if (netmap_verbose) 2994 D("%s read HLREG 0x%x rxc 0x%x", 2995 onoff ? "enter" : "exit", hl, rxc); 2996 #endif 2997 /* hw requirements ... */ 2998 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2999 rxc |= IXGBE_RDRXCTL_RSCACKC; 3000 if (onoff && !crcstrip) { 3001 /* keep the crc. Fast rx */ 3002 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 3003 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 3004 } else { 3005 /* reset default mode */ 3006 hl |= IXGBE_HLREG0_RXCRCSTRP; 3007 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 3008 } 3009 #ifdef NETMAP 3010 if (netmap_verbose) 3011 D("%s write HLREG 0x%x rxc 0x%x", 3012 onoff ? "enter" : "exit", hl, rxc); 3013 #endif 3014 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 3015 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 3016 } /* ixgbe_if_crcstrip_set */ 3017 3018 /********************************************************************* 3019 * ixgbe_if_init - Init entry point 3020 * 3021 * Used in two ways: It is used by the stack as an init 3022 * entry point in network interface structure. It is also 3023 * used by the driver as a hw/sw initialization routine to 3024 * get to a consistent state. 3025 * 3026 * Return 0 on success, positive on failure 3027 **********************************************************************/ 3028 void 3029 ixgbe_if_init(if_ctx_t ctx) 3030 { 3031 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3032 struct ifnet *ifp = iflib_get_ifp(ctx); 3033 device_t dev = iflib_get_dev(ctx); 3034 struct ixgbe_hw *hw = &sc->hw; 3035 struct ix_rx_queue *rx_que; 3036 struct ix_tx_queue *tx_que; 3037 u32 txdctl, mhadd; 3038 u32 rxdctl, rxctrl; 3039 u32 ctrl_ext; 3040 3041 int i, j, err; 3042 3043 INIT_DEBUGOUT("ixgbe_if_init: begin"); 3044 3045 /* Queue indices may change with IOV mode */ 3046 ixgbe_align_all_queue_indices(sc); 3047 3048 /* reprogram the RAR[0] in case user changed it. */ 3049 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 3050 3051 /* Get the latest mac address, User can use a LAA */ 3052 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3053 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 3054 hw->addr_ctrl.rar_used_count = 1; 3055 3056 ixgbe_init_hw(hw); 3057 3058 ixgbe_initialize_iov(sc); 3059 3060 ixgbe_initialize_transmit_units(ctx); 3061 3062 /* Setup Multicast table */ 3063 ixgbe_if_multi_set(ctx); 3064 3065 /* Determine the correct mbuf pool, based on frame size */ 3066 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 3067 3068 /* Configure RX settings */ 3069 ixgbe_initialize_receive_units(ctx); 3070 3071 /* 3072 * Initialize variable holding task enqueue requests 3073 * from MSI-X interrupts 3074 */ 3075 sc->task_requests = 0; 3076 3077 /* Enable SDP & MSI-X interrupts based on adapter */ 3078 ixgbe_config_gpie(sc); 3079 3080 /* Set MTU size */ 3081 if (ifp->if_mtu > ETHERMTU) { 3082 /* aka IXGBE_MAXFRS on 82599 and newer */ 3083 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3084 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3085 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3086 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3087 } 3088 3089 /* Now enable all the queues */ 3090 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 3091 struct tx_ring *txr = &tx_que->txr; 3092 3093 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3094 txdctl |= IXGBE_TXDCTL_ENABLE; 3095 /* Set WTHRESH to 8, burst writeback */ 3096 txdctl |= (8 << 16); 3097 /* 3098 * When the internal queue falls below PTHRESH (32), 3099 * start prefetching as long as there are at least 3100 * HTHRESH (1) buffers ready. The values are taken 3101 * from the Intel linux driver 3.8.21. 3102 * Prefetching enables tx line rate even with 1 queue. 3103 */ 3104 txdctl |= (32 << 0) | (1 << 8); 3105 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 3106 } 3107 3108 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 3109 struct rx_ring *rxr = &rx_que->rxr; 3110 3111 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3112 if (hw->mac.type == ixgbe_mac_82598EB) { 3113 /* 3114 * PTHRESH = 21 3115 * HTHRESH = 4 3116 * WTHRESH = 8 3117 */ 3118 rxdctl &= ~0x3FFFFF; 3119 rxdctl |= 0x080420; 3120 } 3121 rxdctl |= IXGBE_RXDCTL_ENABLE; 3122 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 3123 for (j = 0; j < 10; j++) { 3124 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 3125 IXGBE_RXDCTL_ENABLE) 3126 break; 3127 else 3128 msec_delay(1); 3129 } 3130 wmb(); 3131 } 3132 3133 /* Enable Receive engine */ 3134 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3135 if (hw->mac.type == ixgbe_mac_82598EB) 3136 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3137 rxctrl |= IXGBE_RXCTRL_RXEN; 3138 ixgbe_enable_rx_dma(hw, rxctrl); 3139 3140 /* Set up MSI/MSI-X routing */ 3141 if (ixgbe_enable_msix) { 3142 ixgbe_configure_ivars(sc); 3143 /* Set up auto-mask */ 3144 if (hw->mac.type == ixgbe_mac_82598EB) 3145 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3146 else { 3147 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3148 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3149 } 3150 } else { /* Simple settings for Legacy/MSI */ 3151 ixgbe_set_ivar(sc, 0, 0, 0); 3152 ixgbe_set_ivar(sc, 0, 0, 1); 3153 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3154 } 3155 3156 ixgbe_init_fdir(sc); 3157 3158 /* 3159 * Check on any SFP devices that 3160 * need to be kick-started 3161 */ 3162 if (hw->phy.type == ixgbe_phy_none) { 3163 err = hw->phy.ops.identify(hw); 3164 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3165 device_printf(dev, 3166 "Unsupported SFP+ module type was detected.\n"); 3167 return; 3168 } 3169 } 3170 3171 /* Set moderation on the Link interrupt */ 3172 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); 3173 3174 /* Enable power to the phy. */ 3175 ixgbe_set_phy_power(hw, true); 3176 3177 /* Config/Enable Link */ 3178 ixgbe_config_link(ctx); 3179 3180 /* Hardware Packet Buffer & Flow Control setup */ 3181 ixgbe_config_delay_values(sc); 3182 3183 /* Initialize the FC settings */ 3184 ixgbe_start_hw(hw); 3185 3186 /* Set up VLAN support and filter */ 3187 ixgbe_setup_vlan_hw_support(ctx); 3188 3189 /* Setup DMA Coalescing */ 3190 ixgbe_config_dmac(sc); 3191 3192 /* And now turn on interrupts */ 3193 ixgbe_if_enable_intr(ctx); 3194 3195 /* Enable the use of the MBX by the VF's */ 3196 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 3197 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3198 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3199 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3200 } 3201 3202 } /* ixgbe_init_locked */ 3203 3204 /************************************************************************ 3205 * ixgbe_set_ivar 3206 * 3207 * Setup the correct IVAR register for a particular MSI-X interrupt 3208 * (yes this is all very magic and confusing :) 3209 * - entry is the register array entry 3210 * - vector is the MSI-X vector for this queue 3211 * - type is RX/TX/MISC 3212 ************************************************************************/ 3213 static void 3214 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 3215 { 3216 struct ixgbe_hw *hw = &sc->hw; 3217 u32 ivar, index; 3218 3219 vector |= IXGBE_IVAR_ALLOC_VAL; 3220 3221 switch (hw->mac.type) { 3222 case ixgbe_mac_82598EB: 3223 if (type == -1) 3224 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3225 else 3226 entry += (type * 64); 3227 index = (entry >> 2) & 0x1F; 3228 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3229 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3230 ivar |= (vector << (8 * (entry & 0x3))); 3231 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 3232 break; 3233 case ixgbe_mac_82599EB: 3234 case ixgbe_mac_X540: 3235 case ixgbe_mac_X550: 3236 case ixgbe_mac_X550EM_x: 3237 case ixgbe_mac_X550EM_a: 3238 if (type == -1) { /* MISC IVAR */ 3239 index = (entry & 1) * 8; 3240 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3241 ivar &= ~(0xFF << index); 3242 ivar |= (vector << index); 3243 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3244 } else { /* RX/TX IVARS */ 3245 index = (16 * (entry & 1)) + (8 * type); 3246 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3247 ivar &= ~(0xFF << index); 3248 ivar |= (vector << index); 3249 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3250 } 3251 default: 3252 break; 3253 } 3254 } /* ixgbe_set_ivar */ 3255 3256 /************************************************************************ 3257 * ixgbe_configure_ivars 3258 ************************************************************************/ 3259 static void 3260 ixgbe_configure_ivars(struct ixgbe_softc *sc) 3261 { 3262 struct ix_rx_queue *rx_que = sc->rx_queues; 3263 struct ix_tx_queue *tx_que = sc->tx_queues; 3264 u32 newitr; 3265 3266 if (ixgbe_max_interrupt_rate > 0) 3267 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3268 else { 3269 /* 3270 * Disable DMA coalescing if interrupt moderation is 3271 * disabled. 3272 */ 3273 sc->dmac = 0; 3274 newitr = 0; 3275 } 3276 3277 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 3278 struct rx_ring *rxr = &rx_que->rxr; 3279 3280 /* First the RX queue entry */ 3281 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); 3282 3283 /* Set an Initial EITR value */ 3284 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); 3285 } 3286 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 3287 struct tx_ring *txr = &tx_que->txr; 3288 3289 /* ... and the TX */ 3290 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); 3291 } 3292 /* For the Link interrupt */ 3293 ixgbe_set_ivar(sc, 1, sc->vector, -1); 3294 } /* ixgbe_configure_ivars */ 3295 3296 /************************************************************************ 3297 * ixgbe_config_gpie 3298 ************************************************************************/ 3299 static void 3300 ixgbe_config_gpie(struct ixgbe_softc *sc) 3301 { 3302 struct ixgbe_hw *hw = &sc->hw; 3303 u32 gpie; 3304 3305 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3306 3307 if (sc->intr_type == IFLIB_INTR_MSIX) { 3308 /* Enable Enhanced MSI-X mode */ 3309 gpie |= IXGBE_GPIE_MSIX_MODE 3310 | IXGBE_GPIE_EIAME 3311 | IXGBE_GPIE_PBA_SUPPORT 3312 | IXGBE_GPIE_OCD; 3313 } 3314 3315 /* Fan Failure Interrupt */ 3316 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3317 gpie |= IXGBE_SDP1_GPIEN; 3318 3319 /* Thermal Sensor Interrupt */ 3320 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3321 gpie |= IXGBE_SDP0_GPIEN_X540; 3322 3323 /* Link detection */ 3324 switch (hw->mac.type) { 3325 case ixgbe_mac_82599EB: 3326 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3327 break; 3328 case ixgbe_mac_X550EM_x: 3329 case ixgbe_mac_X550EM_a: 3330 gpie |= IXGBE_SDP0_GPIEN_X540; 3331 break; 3332 default: 3333 break; 3334 } 3335 3336 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3337 3338 } /* ixgbe_config_gpie */ 3339 3340 /************************************************************************ 3341 * ixgbe_config_delay_values 3342 * 3343 * Requires sc->max_frame_size to be set. 3344 ************************************************************************/ 3345 static void 3346 ixgbe_config_delay_values(struct ixgbe_softc *sc) 3347 { 3348 struct ixgbe_hw *hw = &sc->hw; 3349 u32 rxpb, frame, size, tmp; 3350 3351 frame = sc->max_frame_size; 3352 3353 /* Calculate High Water */ 3354 switch (hw->mac.type) { 3355 case ixgbe_mac_X540: 3356 case ixgbe_mac_X550: 3357 case ixgbe_mac_X550EM_x: 3358 case ixgbe_mac_X550EM_a: 3359 tmp = IXGBE_DV_X540(frame, frame); 3360 break; 3361 default: 3362 tmp = IXGBE_DV(frame, frame); 3363 break; 3364 } 3365 size = IXGBE_BT2KB(tmp); 3366 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3367 hw->fc.high_water[0] = rxpb - size; 3368 3369 /* Now calculate Low Water */ 3370 switch (hw->mac.type) { 3371 case ixgbe_mac_X540: 3372 case ixgbe_mac_X550: 3373 case ixgbe_mac_X550EM_x: 3374 case ixgbe_mac_X550EM_a: 3375 tmp = IXGBE_LOW_DV_X540(frame); 3376 break; 3377 default: 3378 tmp = IXGBE_LOW_DV(frame); 3379 break; 3380 } 3381 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3382 3383 hw->fc.pause_time = IXGBE_FC_PAUSE; 3384 hw->fc.send_xon = true; 3385 } /* ixgbe_config_delay_values */ 3386 3387 /************************************************************************ 3388 * ixgbe_set_multi - Multicast Update 3389 * 3390 * Called whenever multicast address list is updated. 3391 ************************************************************************/ 3392 static u_int 3393 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) 3394 { 3395 struct ixgbe_softc *sc = arg; 3396 struct ixgbe_mc_addr *mta = sc->mta; 3397 3398 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 3399 return (0); 3400 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3401 mta[idx].vmdq = sc->pool; 3402 3403 return (1); 3404 } /* ixgbe_mc_filter_apply */ 3405 3406 static void 3407 ixgbe_if_multi_set(if_ctx_t ctx) 3408 { 3409 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3410 struct ixgbe_mc_addr *mta; 3411 struct ifnet *ifp = iflib_get_ifp(ctx); 3412 u8 *update_ptr; 3413 u32 fctrl; 3414 u_int mcnt; 3415 3416 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3417 3418 mta = sc->mta; 3419 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3420 3421 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc); 3422 3423 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 3424 3425 if (ifp->if_flags & IFF_PROMISC) 3426 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3427 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3428 ifp->if_flags & IFF_ALLMULTI) { 3429 fctrl |= IXGBE_FCTRL_MPE; 3430 fctrl &= ~IXGBE_FCTRL_UPE; 3431 } else 3432 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3433 3434 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 3435 3436 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3437 update_ptr = (u8 *)mta; 3438 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 3439 ixgbe_mc_array_itr, true); 3440 } 3441 3442 } /* ixgbe_if_multi_set */ 3443 3444 /************************************************************************ 3445 * ixgbe_mc_array_itr 3446 * 3447 * An iterator function needed by the multicast shared code. 3448 * It feeds the shared code routine the addresses in the 3449 * array of ixgbe_set_multi() one by one. 3450 ************************************************************************/ 3451 static u8 * 3452 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3453 { 3454 struct ixgbe_mc_addr *mta; 3455 3456 mta = (struct ixgbe_mc_addr *)*update_ptr; 3457 *vmdq = mta->vmdq; 3458 3459 *update_ptr = (u8*)(mta + 1); 3460 3461 return (mta->addr); 3462 } /* ixgbe_mc_array_itr */ 3463 3464 /************************************************************************ 3465 * ixgbe_local_timer - Timer routine 3466 * 3467 * Checks for link status, updates statistics, 3468 * and runs the watchdog check. 3469 ************************************************************************/ 3470 static void 3471 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3472 { 3473 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3474 3475 if (qid != 0) 3476 return; 3477 3478 /* Check for pluggable optics */ 3479 if (sc->sfp_probe) 3480 if (!ixgbe_sfp_probe(ctx)) 3481 return; /* Nothing to do */ 3482 3483 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3484 3485 /* Fire off the adminq task */ 3486 iflib_admin_intr_deferred(ctx); 3487 3488 } /* ixgbe_if_timer */ 3489 3490 /************************************************************************ 3491 * ixgbe_sfp_probe 3492 * 3493 * Determine if a port had optics inserted. 3494 ************************************************************************/ 3495 static bool 3496 ixgbe_sfp_probe(if_ctx_t ctx) 3497 { 3498 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3499 struct ixgbe_hw *hw = &sc->hw; 3500 device_t dev = iflib_get_dev(ctx); 3501 bool result = false; 3502 3503 if ((hw->phy.type == ixgbe_phy_nl) && 3504 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3505 s32 ret = hw->phy.ops.identify_sfp(hw); 3506 if (ret) 3507 goto out; 3508 ret = hw->phy.ops.reset(hw); 3509 sc->sfp_probe = false; 3510 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3511 device_printf(dev, "Unsupported SFP+ module detected!"); 3512 device_printf(dev, 3513 "Reload driver with supported module.\n"); 3514 goto out; 3515 } else 3516 device_printf(dev, "SFP+ module detected!\n"); 3517 /* We now have supported optics */ 3518 result = true; 3519 } 3520 out: 3521 3522 return (result); 3523 } /* ixgbe_sfp_probe */ 3524 3525 /************************************************************************ 3526 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3527 ************************************************************************/ 3528 static void 3529 ixgbe_handle_mod(void *context) 3530 { 3531 if_ctx_t ctx = context; 3532 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3533 struct ixgbe_hw *hw = &sc->hw; 3534 device_t dev = iflib_get_dev(ctx); 3535 u32 err, cage_full = 0; 3536 3537 if (sc->hw.need_crosstalk_fix) { 3538 switch (hw->mac.type) { 3539 case ixgbe_mac_82599EB: 3540 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3541 IXGBE_ESDP_SDP2; 3542 break; 3543 case ixgbe_mac_X550EM_x: 3544 case ixgbe_mac_X550EM_a: 3545 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3546 IXGBE_ESDP_SDP0; 3547 break; 3548 default: 3549 break; 3550 } 3551 3552 if (!cage_full) 3553 goto handle_mod_out; 3554 } 3555 3556 err = hw->phy.ops.identify_sfp(hw); 3557 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3558 device_printf(dev, 3559 "Unsupported SFP+ module type was detected.\n"); 3560 goto handle_mod_out; 3561 } 3562 3563 if (hw->mac.type == ixgbe_mac_82598EB) 3564 err = hw->phy.ops.reset(hw); 3565 else 3566 err = hw->mac.ops.setup_sfp(hw); 3567 3568 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3569 device_printf(dev, 3570 "Setup failure - unsupported SFP+ module type.\n"); 3571 goto handle_mod_out; 3572 } 3573 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3574 return; 3575 3576 handle_mod_out: 3577 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3578 } /* ixgbe_handle_mod */ 3579 3580 3581 /************************************************************************ 3582 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3583 ************************************************************************/ 3584 static void 3585 ixgbe_handle_msf(void *context) 3586 { 3587 if_ctx_t ctx = context; 3588 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3589 struct ixgbe_hw *hw = &sc->hw; 3590 u32 autoneg; 3591 bool negotiate; 3592 3593 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3594 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 3595 3596 autoneg = hw->phy.autoneg_advertised; 3597 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3598 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3599 if (hw->mac.ops.setup_link) 3600 hw->mac.ops.setup_link(hw, autoneg, true); 3601 3602 /* Adjust media types shown in ifconfig */ 3603 ifmedia_removeall(sc->media); 3604 ixgbe_add_media_types(sc->ctx); 3605 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 3606 } /* ixgbe_handle_msf */ 3607 3608 /************************************************************************ 3609 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3610 ************************************************************************/ 3611 static void 3612 ixgbe_handle_phy(void *context) 3613 { 3614 if_ctx_t ctx = context; 3615 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3616 struct ixgbe_hw *hw = &sc->hw; 3617 int error; 3618 3619 error = hw->phy.ops.handle_lasi(hw); 3620 if (error == IXGBE_ERR_OVERTEMP) 3621 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3622 else if (error) 3623 device_printf(sc->dev, 3624 "Error handling LASI interrupt: %d\n", error); 3625 } /* ixgbe_handle_phy */ 3626 3627 /************************************************************************ 3628 * ixgbe_if_stop - Stop the hardware 3629 * 3630 * Disables all traffic on the adapter by issuing a 3631 * global reset on the MAC and deallocates TX/RX buffers. 3632 ************************************************************************/ 3633 static void 3634 ixgbe_if_stop(if_ctx_t ctx) 3635 { 3636 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3637 struct ixgbe_hw *hw = &sc->hw; 3638 3639 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3640 3641 ixgbe_reset_hw(hw); 3642 hw->adapter_stopped = false; 3643 ixgbe_stop_adapter(hw); 3644 if (hw->mac.type == ixgbe_mac_82599EB) 3645 ixgbe_stop_mac_link_on_d3_82599(hw); 3646 /* Turn off the laser - noop with no optics */ 3647 ixgbe_disable_tx_laser(hw); 3648 3649 /* Update the stack */ 3650 sc->link_up = false; 3651 ixgbe_if_update_admin_status(ctx); 3652 3653 /* reprogram the RAR[0] in case user changed it. */ 3654 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 3655 3656 return; 3657 } /* ixgbe_if_stop */ 3658 3659 /************************************************************************ 3660 * ixgbe_update_link_status - Update OS on link state 3661 * 3662 * Note: Only updates the OS on the cached link state. 3663 * The real check of the hardware only happens with 3664 * a link interrupt. 3665 ************************************************************************/ 3666 static void 3667 ixgbe_if_update_admin_status(if_ctx_t ctx) 3668 { 3669 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3670 device_t dev = iflib_get_dev(ctx); 3671 3672 if (sc->link_up) { 3673 if (sc->link_active == false) { 3674 if (bootverbose) 3675 device_printf(dev, "Link is up %d Gbps %s \n", 3676 ((sc->link_speed == 128) ? 10 : 1), 3677 "Full Duplex"); 3678 sc->link_active = true; 3679 /* Update any Flow Control changes */ 3680 ixgbe_fc_enable(&sc->hw); 3681 /* Update DMA coalescing config */ 3682 ixgbe_config_dmac(sc); 3683 /* should actually be negotiated value */ 3684 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3685 3686 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3687 ixgbe_ping_all_vfs(sc); 3688 } 3689 } else { /* Link down */ 3690 if (sc->link_active == true) { 3691 if (bootverbose) 3692 device_printf(dev, "Link is Down\n"); 3693 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3694 sc->link_active = false; 3695 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3696 ixgbe_ping_all_vfs(sc); 3697 } 3698 } 3699 3700 /* Handle task requests from msix_link() */ 3701 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) 3702 ixgbe_handle_mod(ctx); 3703 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) 3704 ixgbe_handle_msf(ctx); 3705 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) 3706 ixgbe_handle_mbx(ctx); 3707 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) 3708 ixgbe_reinit_fdir(ctx); 3709 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) 3710 ixgbe_handle_phy(ctx); 3711 sc->task_requests = 0; 3712 3713 ixgbe_update_stats_counters(sc); 3714 } /* ixgbe_if_update_admin_status */ 3715 3716 /************************************************************************ 3717 * ixgbe_config_dmac - Configure DMA Coalescing 3718 ************************************************************************/ 3719 static void 3720 ixgbe_config_dmac(struct ixgbe_softc *sc) 3721 { 3722 struct ixgbe_hw *hw = &sc->hw; 3723 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3724 3725 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3726 return; 3727 3728 if (dcfg->watchdog_timer ^ sc->dmac || 3729 dcfg->link_speed ^ sc->link_speed) { 3730 dcfg->watchdog_timer = sc->dmac; 3731 dcfg->fcoe_en = false; 3732 dcfg->link_speed = sc->link_speed; 3733 dcfg->num_tcs = 1; 3734 3735 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3736 dcfg->watchdog_timer, dcfg->link_speed); 3737 3738 hw->mac.ops.dmac_config(hw); 3739 } 3740 } /* ixgbe_config_dmac */ 3741 3742 /************************************************************************ 3743 * ixgbe_if_enable_intr 3744 ************************************************************************/ 3745 void 3746 ixgbe_if_enable_intr(if_ctx_t ctx) 3747 { 3748 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3749 struct ixgbe_hw *hw = &sc->hw; 3750 struct ix_rx_queue *que = sc->rx_queues; 3751 u32 mask, fwsm; 3752 3753 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3754 3755 switch (sc->hw.mac.type) { 3756 case ixgbe_mac_82599EB: 3757 mask |= IXGBE_EIMS_ECC; 3758 /* Temperature sensor on some scs */ 3759 mask |= IXGBE_EIMS_GPI_SDP0; 3760 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3761 mask |= IXGBE_EIMS_GPI_SDP1; 3762 mask |= IXGBE_EIMS_GPI_SDP2; 3763 break; 3764 case ixgbe_mac_X540: 3765 /* Detect if Thermal Sensor is enabled */ 3766 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3767 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3768 mask |= IXGBE_EIMS_TS; 3769 mask |= IXGBE_EIMS_ECC; 3770 break; 3771 case ixgbe_mac_X550: 3772 /* MAC thermal sensor is automatically enabled */ 3773 mask |= IXGBE_EIMS_TS; 3774 mask |= IXGBE_EIMS_ECC; 3775 break; 3776 case ixgbe_mac_X550EM_x: 3777 case ixgbe_mac_X550EM_a: 3778 /* Some devices use SDP0 for important information */ 3779 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3780 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3781 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3782 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3783 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3784 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3785 mask |= IXGBE_EICR_GPI_SDP0_X540; 3786 mask |= IXGBE_EIMS_ECC; 3787 break; 3788 default: 3789 break; 3790 } 3791 3792 /* Enable Fan Failure detection */ 3793 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3794 mask |= IXGBE_EIMS_GPI_SDP1; 3795 /* Enable SR-IOV */ 3796 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3797 mask |= IXGBE_EIMS_MAILBOX; 3798 /* Enable Flow Director */ 3799 if (sc->feat_en & IXGBE_FEATURE_FDIR) 3800 mask |= IXGBE_EIMS_FLOW_DIR; 3801 3802 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3803 3804 /* With MSI-X we use auto clear */ 3805 if (sc->intr_type == IFLIB_INTR_MSIX) { 3806 mask = IXGBE_EIMS_ENABLE_MASK; 3807 /* Don't autoclear Link */ 3808 mask &= ~IXGBE_EIMS_OTHER; 3809 mask &= ~IXGBE_EIMS_LSC; 3810 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 3811 mask &= ~IXGBE_EIMS_MAILBOX; 3812 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3813 } 3814 3815 /* 3816 * Now enable all queues, this is done separately to 3817 * allow for handling the extended (beyond 32) MSI-X 3818 * vectors that can be used by 82599 3819 */ 3820 for (int i = 0; i < sc->num_rx_queues; i++, que++) 3821 ixgbe_enable_queue(sc, que->msix); 3822 3823 IXGBE_WRITE_FLUSH(hw); 3824 3825 } /* ixgbe_if_enable_intr */ 3826 3827 /************************************************************************ 3828 * ixgbe_disable_intr 3829 ************************************************************************/ 3830 static void 3831 ixgbe_if_disable_intr(if_ctx_t ctx) 3832 { 3833 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3834 3835 if (sc->intr_type == IFLIB_INTR_MSIX) 3836 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 3837 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 3838 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 3839 } else { 3840 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 3841 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 3842 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 3843 } 3844 IXGBE_WRITE_FLUSH(&sc->hw); 3845 3846 } /* ixgbe_if_disable_intr */ 3847 3848 /************************************************************************ 3849 * ixgbe_link_intr_enable 3850 ************************************************************************/ 3851 static void 3852 ixgbe_link_intr_enable(if_ctx_t ctx) 3853 { 3854 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; 3855 3856 /* Re-enable other interrupts */ 3857 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3858 } /* ixgbe_link_intr_enable */ 3859 3860 /************************************************************************ 3861 * ixgbe_if_rx_queue_intr_enable 3862 ************************************************************************/ 3863 static int 3864 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3865 { 3866 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3867 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 3868 3869 ixgbe_enable_queue(sc, que->msix); 3870 3871 return (0); 3872 } /* ixgbe_if_rx_queue_intr_enable */ 3873 3874 /************************************************************************ 3875 * ixgbe_enable_queue 3876 ************************************************************************/ 3877 static void 3878 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 3879 { 3880 struct ixgbe_hw *hw = &sc->hw; 3881 u64 queue = 1ULL << vector; 3882 u32 mask; 3883 3884 if (hw->mac.type == ixgbe_mac_82598EB) { 3885 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3886 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3887 } else { 3888 mask = (queue & 0xFFFFFFFF); 3889 if (mask) 3890 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3891 mask = (queue >> 32); 3892 if (mask) 3893 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3894 } 3895 } /* ixgbe_enable_queue */ 3896 3897 /************************************************************************ 3898 * ixgbe_disable_queue 3899 ************************************************************************/ 3900 static void 3901 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 3902 { 3903 struct ixgbe_hw *hw = &sc->hw; 3904 u64 queue = 1ULL << vector; 3905 u32 mask; 3906 3907 if (hw->mac.type == ixgbe_mac_82598EB) { 3908 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3909 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3910 } else { 3911 mask = (queue & 0xFFFFFFFF); 3912 if (mask) 3913 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3914 mask = (queue >> 32); 3915 if (mask) 3916 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3917 } 3918 } /* ixgbe_disable_queue */ 3919 3920 /************************************************************************ 3921 * ixgbe_intr - Legacy Interrupt Service Routine 3922 ************************************************************************/ 3923 int 3924 ixgbe_intr(void *arg) 3925 { 3926 struct ixgbe_softc *sc = arg; 3927 struct ix_rx_queue *que = sc->rx_queues; 3928 struct ixgbe_hw *hw = &sc->hw; 3929 if_ctx_t ctx = sc->ctx; 3930 u32 eicr, eicr_mask; 3931 3932 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3933 3934 ++que->irqs; 3935 if (eicr == 0) { 3936 ixgbe_if_enable_intr(ctx); 3937 return (FILTER_HANDLED); 3938 } 3939 3940 /* Check for fan failure */ 3941 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3942 (eicr & IXGBE_EICR_GPI_SDP1)) { 3943 device_printf(sc->dev, 3944 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3945 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3946 } 3947 3948 /* Link status change */ 3949 if (eicr & IXGBE_EICR_LSC) { 3950 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3951 iflib_admin_intr_deferred(ctx); 3952 } 3953 3954 if (ixgbe_is_sfp(hw)) { 3955 /* Pluggable optics-related interrupt */ 3956 if (hw->mac.type >= ixgbe_mac_X540) 3957 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3958 else 3959 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3960 3961 if (eicr & eicr_mask) { 3962 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3963 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 3964 } 3965 3966 if ((hw->mac.type == ixgbe_mac_82599EB) && 3967 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3968 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3969 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3970 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3971 } 3972 } 3973 3974 /* External PHY interrupt */ 3975 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3976 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3977 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 3978 3979 return (FILTER_SCHEDULE_THREAD); 3980 } /* ixgbe_intr */ 3981 3982 /************************************************************************ 3983 * ixgbe_free_pci_resources 3984 ************************************************************************/ 3985 static void 3986 ixgbe_free_pci_resources(if_ctx_t ctx) 3987 { 3988 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3989 struct ix_rx_queue *que = sc->rx_queues; 3990 device_t dev = iflib_get_dev(ctx); 3991 3992 /* Release all MSI-X queue resources */ 3993 if (sc->intr_type == IFLIB_INTR_MSIX) 3994 iflib_irq_free(ctx, &sc->irq); 3995 3996 if (que != NULL) { 3997 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 3998 iflib_irq_free(ctx, &que->que_irq); 3999 } 4000 } 4001 4002 if (sc->pci_mem != NULL) 4003 bus_release_resource(dev, SYS_RES_MEMORY, 4004 rman_get_rid(sc->pci_mem), sc->pci_mem); 4005 } /* ixgbe_free_pci_resources */ 4006 4007 /************************************************************************ 4008 * ixgbe_sysctl_flowcntl 4009 * 4010 * SYSCTL wrapper around setting Flow Control 4011 ************************************************************************/ 4012 static int 4013 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 4014 { 4015 struct ixgbe_softc *sc; 4016 int error, fc; 4017 4018 sc = (struct ixgbe_softc *)arg1; 4019 fc = sc->hw.fc.current_mode; 4020 4021 error = sysctl_handle_int(oidp, &fc, 0, req); 4022 if ((error) || (req->newptr == NULL)) 4023 return (error); 4024 4025 /* Don't bother if it's not changed */ 4026 if (fc == sc->hw.fc.current_mode) 4027 return (0); 4028 4029 return ixgbe_set_flowcntl(sc, fc); 4030 } /* ixgbe_sysctl_flowcntl */ 4031 4032 /************************************************************************ 4033 * ixgbe_set_flowcntl - Set flow control 4034 * 4035 * Flow control values: 4036 * 0 - off 4037 * 1 - rx pause 4038 * 2 - tx pause 4039 * 3 - full 4040 ************************************************************************/ 4041 static int 4042 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 4043 { 4044 switch (fc) { 4045 case ixgbe_fc_rx_pause: 4046 case ixgbe_fc_tx_pause: 4047 case ixgbe_fc_full: 4048 sc->hw.fc.requested_mode = fc; 4049 if (sc->num_rx_queues > 1) 4050 ixgbe_disable_rx_drop(sc); 4051 break; 4052 case ixgbe_fc_none: 4053 sc->hw.fc.requested_mode = ixgbe_fc_none; 4054 if (sc->num_rx_queues > 1) 4055 ixgbe_enable_rx_drop(sc); 4056 break; 4057 default: 4058 return (EINVAL); 4059 } 4060 4061 /* Don't autoneg if forcing a value */ 4062 sc->hw.fc.disable_fc_autoneg = true; 4063 ixgbe_fc_enable(&sc->hw); 4064 4065 return (0); 4066 } /* ixgbe_set_flowcntl */ 4067 4068 /************************************************************************ 4069 * ixgbe_enable_rx_drop 4070 * 4071 * Enable the hardware to drop packets when the buffer is 4072 * full. This is useful with multiqueue, so that no single 4073 * queue being full stalls the entire RX engine. We only 4074 * enable this when Multiqueue is enabled AND Flow Control 4075 * is disabled. 4076 ************************************************************************/ 4077 static void 4078 ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 4079 { 4080 struct ixgbe_hw *hw = &sc->hw; 4081 struct rx_ring *rxr; 4082 u32 srrctl; 4083 4084 for (int i = 0; i < sc->num_rx_queues; i++) { 4085 rxr = &sc->rx_queues[i].rxr; 4086 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4087 srrctl |= IXGBE_SRRCTL_DROP_EN; 4088 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4089 } 4090 4091 /* enable drop for each vf */ 4092 for (int i = 0; i < sc->num_vfs; i++) { 4093 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4094 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4095 IXGBE_QDE_ENABLE)); 4096 } 4097 } /* ixgbe_enable_rx_drop */ 4098 4099 /************************************************************************ 4100 * ixgbe_disable_rx_drop 4101 ************************************************************************/ 4102 static void 4103 ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 4104 { 4105 struct ixgbe_hw *hw = &sc->hw; 4106 struct rx_ring *rxr; 4107 u32 srrctl; 4108 4109 for (int i = 0; i < sc->num_rx_queues; i++) { 4110 rxr = &sc->rx_queues[i].rxr; 4111 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4112 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4113 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4114 } 4115 4116 /* disable drop for each vf */ 4117 for (int i = 0; i < sc->num_vfs; i++) { 4118 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4119 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4120 } 4121 } /* ixgbe_disable_rx_drop */ 4122 4123 /************************************************************************ 4124 * ixgbe_sysctl_advertise 4125 * 4126 * SYSCTL wrapper around setting advertised speed 4127 ************************************************************************/ 4128 static int 4129 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4130 { 4131 struct ixgbe_softc *sc; 4132 int error, advertise; 4133 4134 sc = (struct ixgbe_softc *)arg1; 4135 advertise = sc->advertise; 4136 4137 error = sysctl_handle_int(oidp, &advertise, 0, req); 4138 if ((error) || (req->newptr == NULL)) 4139 return (error); 4140 4141 return ixgbe_set_advertise(sc, advertise); 4142 } /* ixgbe_sysctl_advertise */ 4143 4144 /************************************************************************ 4145 * ixgbe_set_advertise - Control advertised link speed 4146 * 4147 * Flags: 4148 * 0x1 - advertise 100 Mb 4149 * 0x2 - advertise 1G 4150 * 0x4 - advertise 10G 4151 * 0x8 - advertise 10 Mb (yes, Mb) 4152 * 0x10 - advertise 2.5G (disabled by default) 4153 * 0x20 - advertise 5G (disabled by default) 4154 * 4155 ************************************************************************/ 4156 static int 4157 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 4158 { 4159 device_t dev = iflib_get_dev(sc->ctx); 4160 struct ixgbe_hw *hw; 4161 ixgbe_link_speed speed = 0; 4162 ixgbe_link_speed link_caps = 0; 4163 s32 err = IXGBE_NOT_IMPLEMENTED; 4164 bool negotiate = false; 4165 4166 /* Checks to validate new value */ 4167 if (sc->advertise == advertise) /* no change */ 4168 return (0); 4169 4170 hw = &sc->hw; 4171 4172 /* No speed changes for backplane media */ 4173 if (hw->phy.media_type == ixgbe_media_type_backplane) 4174 return (ENODEV); 4175 4176 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4177 (hw->phy.multispeed_fiber))) { 4178 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4179 return (EINVAL); 4180 } 4181 4182 if (advertise < 0x1 || advertise > 0x3F) { 4183 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n"); 4184 return (EINVAL); 4185 } 4186 4187 if (hw->mac.ops.get_link_capabilities) { 4188 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4189 &negotiate); 4190 if (err != IXGBE_SUCCESS) { 4191 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4192 return (ENODEV); 4193 } 4194 } 4195 4196 /* Set new value and report new advertised mode */ 4197 if (advertise & 0x1) { 4198 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4199 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4200 return (EINVAL); 4201 } 4202 speed |= IXGBE_LINK_SPEED_100_FULL; 4203 } 4204 if (advertise & 0x2) { 4205 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4206 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4207 return (EINVAL); 4208 } 4209 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4210 } 4211 if (advertise & 0x4) { 4212 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4213 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4214 return (EINVAL); 4215 } 4216 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4217 } 4218 if (advertise & 0x8) { 4219 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4220 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4221 return (EINVAL); 4222 } 4223 speed |= IXGBE_LINK_SPEED_10_FULL; 4224 } 4225 if (advertise & 0x10) { 4226 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 4227 device_printf(dev, "Interface does not support 2.5G advertised speed\n"); 4228 return (EINVAL); 4229 } 4230 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 4231 } 4232 if (advertise & 0x20) { 4233 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 4234 device_printf(dev, "Interface does not support 5G advertised speed\n"); 4235 return (EINVAL); 4236 } 4237 speed |= IXGBE_LINK_SPEED_5GB_FULL; 4238 } 4239 4240 hw->mac.autotry_restart = true; 4241 hw->mac.ops.setup_link(hw, speed, true); 4242 sc->advertise = advertise; 4243 4244 return (0); 4245 } /* ixgbe_set_advertise */ 4246 4247 /************************************************************************ 4248 * ixgbe_get_default_advertise - Get default advertised speed settings 4249 * 4250 * Formatted for sysctl usage. 4251 * Flags: 4252 * 0x1 - advertise 100 Mb 4253 * 0x2 - advertise 1G 4254 * 0x4 - advertise 10G 4255 * 0x8 - advertise 10 Mb (yes, Mb) 4256 * 0x10 - advertise 2.5G (disabled by default) 4257 * 0x20 - advertise 5G (disabled by default) 4258 ************************************************************************/ 4259 static int 4260 ixgbe_get_default_advertise(struct ixgbe_softc *sc) 4261 { 4262 struct ixgbe_hw *hw = &sc->hw; 4263 int speed; 4264 ixgbe_link_speed link_caps = 0; 4265 s32 err; 4266 bool negotiate = false; 4267 4268 /* 4269 * Advertised speed means nothing unless it's copper or 4270 * multi-speed fiber 4271 */ 4272 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4273 !(hw->phy.multispeed_fiber)) 4274 return (0); 4275 4276 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4277 if (err != IXGBE_SUCCESS) 4278 return (0); 4279 4280 if (hw->mac.type == ixgbe_mac_X550) { 4281 /* 4282 * 2.5G and 5G autonegotiation speeds on X550 4283 * are disabled by default due to reported 4284 * interoperability issues with some switches. 4285 */ 4286 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 4287 IXGBE_LINK_SPEED_5GB_FULL); 4288 } 4289 4290 speed = 4291 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 4292 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 4293 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 4294 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 4295 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 4296 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 4297 4298 return speed; 4299 } /* ixgbe_get_default_advertise */ 4300 4301 /************************************************************************ 4302 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4303 * 4304 * Control values: 4305 * 0/1 - off / on (use default value of 1000) 4306 * 4307 * Legal timer values are: 4308 * 50,100,250,500,1000,2000,5000,10000 4309 * 4310 * Turning off interrupt moderation will also turn this off. 4311 ************************************************************************/ 4312 static int 4313 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4314 { 4315 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4316 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4317 int error; 4318 u16 newval; 4319 4320 newval = sc->dmac; 4321 error = sysctl_handle_16(oidp, &newval, 0, req); 4322 if ((error) || (req->newptr == NULL)) 4323 return (error); 4324 4325 switch (newval) { 4326 case 0: 4327 /* Disabled */ 4328 sc->dmac = 0; 4329 break; 4330 case 1: 4331 /* Enable and use default */ 4332 sc->dmac = 1000; 4333 break; 4334 case 50: 4335 case 100: 4336 case 250: 4337 case 500: 4338 case 1000: 4339 case 2000: 4340 case 5000: 4341 case 10000: 4342 /* Legal values - allow */ 4343 sc->dmac = newval; 4344 break; 4345 default: 4346 /* Do nothing, illegal value */ 4347 return (EINVAL); 4348 } 4349 4350 /* Re-initialize hardware if it's already running */ 4351 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4352 ifp->if_init(ifp); 4353 4354 return (0); 4355 } /* ixgbe_sysctl_dmac */ 4356 4357 #ifdef IXGBE_DEBUG 4358 /************************************************************************ 4359 * ixgbe_sysctl_power_state 4360 * 4361 * Sysctl to test power states 4362 * Values: 4363 * 0 - set device to D0 4364 * 3 - set device to D3 4365 * (none) - get current device power state 4366 ************************************************************************/ 4367 static int 4368 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4369 { 4370 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4371 device_t dev = sc->dev; 4372 int curr_ps, new_ps, error = 0; 4373 4374 curr_ps = new_ps = pci_get_powerstate(dev); 4375 4376 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4377 if ((error) || (req->newptr == NULL)) 4378 return (error); 4379 4380 if (new_ps == curr_ps) 4381 return (0); 4382 4383 if (new_ps == 3 && curr_ps == 0) 4384 error = DEVICE_SUSPEND(dev); 4385 else if (new_ps == 0 && curr_ps == 3) 4386 error = DEVICE_RESUME(dev); 4387 else 4388 return (EINVAL); 4389 4390 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4391 4392 return (error); 4393 } /* ixgbe_sysctl_power_state */ 4394 #endif 4395 4396 /************************************************************************ 4397 * ixgbe_sysctl_wol_enable 4398 * 4399 * Sysctl to enable/disable the WoL capability, 4400 * if supported by the adapter. 4401 * 4402 * Values: 4403 * 0 - disabled 4404 * 1 - enabled 4405 ************************************************************************/ 4406 static int 4407 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4408 { 4409 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4410 struct ixgbe_hw *hw = &sc->hw; 4411 int new_wol_enabled; 4412 int error = 0; 4413 4414 new_wol_enabled = hw->wol_enabled; 4415 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4416 if ((error) || (req->newptr == NULL)) 4417 return (error); 4418 new_wol_enabled = !!(new_wol_enabled); 4419 if (new_wol_enabled == hw->wol_enabled) 4420 return (0); 4421 4422 if (new_wol_enabled > 0 && !sc->wol_support) 4423 return (ENODEV); 4424 else 4425 hw->wol_enabled = new_wol_enabled; 4426 4427 return (0); 4428 } /* ixgbe_sysctl_wol_enable */ 4429 4430 /************************************************************************ 4431 * ixgbe_sysctl_wufc - Wake Up Filter Control 4432 * 4433 * Sysctl to enable/disable the types of packets that the 4434 * adapter will wake up on upon receipt. 4435 * Flags: 4436 * 0x1 - Link Status Change 4437 * 0x2 - Magic Packet 4438 * 0x4 - Direct Exact 4439 * 0x8 - Directed Multicast 4440 * 0x10 - Broadcast 4441 * 0x20 - ARP/IPv4 Request Packet 4442 * 0x40 - Direct IPv4 Packet 4443 * 0x80 - Direct IPv6 Packet 4444 * 4445 * Settings not listed above will cause the sysctl to return an error. 4446 ************************************************************************/ 4447 static int 4448 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4449 { 4450 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4451 int error = 0; 4452 u32 new_wufc; 4453 4454 new_wufc = sc->wufc; 4455 4456 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4457 if ((error) || (req->newptr == NULL)) 4458 return (error); 4459 if (new_wufc == sc->wufc) 4460 return (0); 4461 4462 if (new_wufc & 0xffffff00) 4463 return (EINVAL); 4464 4465 new_wufc &= 0xff; 4466 new_wufc |= (0xffffff & sc->wufc); 4467 sc->wufc = new_wufc; 4468 4469 return (0); 4470 } /* ixgbe_sysctl_wufc */ 4471 4472 #ifdef IXGBE_DEBUG 4473 /************************************************************************ 4474 * ixgbe_sysctl_print_rss_config 4475 ************************************************************************/ 4476 static int 4477 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4478 { 4479 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4480 struct ixgbe_hw *hw = &sc->hw; 4481 device_t dev = sc->dev; 4482 struct sbuf *buf; 4483 int error = 0, reta_size; 4484 u32 reg; 4485 4486 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4487 if (!buf) { 4488 device_printf(dev, "Could not allocate sbuf for output.\n"); 4489 return (ENOMEM); 4490 } 4491 4492 // TODO: use sbufs to make a string to print out 4493 /* Set multiplier for RETA setup and table size based on MAC */ 4494 switch (sc->hw.mac.type) { 4495 case ixgbe_mac_X550: 4496 case ixgbe_mac_X550EM_x: 4497 case ixgbe_mac_X550EM_a: 4498 reta_size = 128; 4499 break; 4500 default: 4501 reta_size = 32; 4502 break; 4503 } 4504 4505 /* Print out the redirection table */ 4506 sbuf_cat(buf, "\n"); 4507 for (int i = 0; i < reta_size; i++) { 4508 if (i < 32) { 4509 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4510 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4511 } else { 4512 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4513 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4514 } 4515 } 4516 4517 // TODO: print more config 4518 4519 error = sbuf_finish(buf); 4520 if (error) 4521 device_printf(dev, "Error finishing sbuf: %d\n", error); 4522 4523 sbuf_delete(buf); 4524 4525 return (0); 4526 } /* ixgbe_sysctl_print_rss_config */ 4527 #endif /* IXGBE_DEBUG */ 4528 4529 /************************************************************************ 4530 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4531 * 4532 * For X552/X557-AT devices using an external PHY 4533 ************************************************************************/ 4534 static int 4535 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4536 { 4537 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4538 struct ixgbe_hw *hw = &sc->hw; 4539 u16 reg; 4540 4541 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4542 device_printf(iflib_get_dev(sc->ctx), 4543 "Device has no supported external thermal sensor.\n"); 4544 return (ENODEV); 4545 } 4546 4547 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4548 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4549 device_printf(iflib_get_dev(sc->ctx), 4550 "Error reading from PHY's current temperature register\n"); 4551 return (EAGAIN); 4552 } 4553 4554 /* Shift temp for output */ 4555 reg = reg >> 8; 4556 4557 return (sysctl_handle_16(oidp, NULL, reg, req)); 4558 } /* ixgbe_sysctl_phy_temp */ 4559 4560 /************************************************************************ 4561 * ixgbe_sysctl_phy_overtemp_occurred 4562 * 4563 * Reports (directly from the PHY) whether the current PHY 4564 * temperature is over the overtemp threshold. 4565 ************************************************************************/ 4566 static int 4567 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4568 { 4569 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4570 struct ixgbe_hw *hw = &sc->hw; 4571 u16 reg; 4572 4573 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4574 device_printf(iflib_get_dev(sc->ctx), 4575 "Device has no supported external thermal sensor.\n"); 4576 return (ENODEV); 4577 } 4578 4579 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4580 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4581 device_printf(iflib_get_dev(sc->ctx), 4582 "Error reading from PHY's temperature status register\n"); 4583 return (EAGAIN); 4584 } 4585 4586 /* Get occurrence bit */ 4587 reg = !!(reg & 0x4000); 4588 4589 return (sysctl_handle_16(oidp, 0, reg, req)); 4590 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4591 4592 /************************************************************************ 4593 * ixgbe_sysctl_eee_state 4594 * 4595 * Sysctl to set EEE power saving feature 4596 * Values: 4597 * 0 - disable EEE 4598 * 1 - enable EEE 4599 * (none) - get current device EEE state 4600 ************************************************************************/ 4601 static int 4602 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4603 { 4604 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4605 device_t dev = sc->dev; 4606 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4607 int curr_eee, new_eee, error = 0; 4608 s32 retval; 4609 4610 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 4611 4612 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4613 if ((error) || (req->newptr == NULL)) 4614 return (error); 4615 4616 /* Nothing to do */ 4617 if (new_eee == curr_eee) 4618 return (0); 4619 4620 /* Not supported */ 4621 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 4622 return (EINVAL); 4623 4624 /* Bounds checking */ 4625 if ((new_eee < 0) || (new_eee > 1)) 4626 return (EINVAL); 4627 4628 retval = ixgbe_setup_eee(&sc->hw, new_eee); 4629 if (retval) { 4630 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4631 return (EINVAL); 4632 } 4633 4634 /* Restart auto-neg */ 4635 ifp->if_init(ifp); 4636 4637 device_printf(dev, "New EEE state: %d\n", new_eee); 4638 4639 /* Cache new value */ 4640 if (new_eee) 4641 sc->feat_en |= IXGBE_FEATURE_EEE; 4642 else 4643 sc->feat_en &= ~IXGBE_FEATURE_EEE; 4644 4645 return (error); 4646 } /* ixgbe_sysctl_eee_state */ 4647 4648 /************************************************************************ 4649 * ixgbe_init_device_features 4650 ************************************************************************/ 4651 static void 4652 ixgbe_init_device_features(struct ixgbe_softc *sc) 4653 { 4654 sc->feat_cap = IXGBE_FEATURE_NETMAP 4655 | IXGBE_FEATURE_RSS 4656 | IXGBE_FEATURE_MSI 4657 | IXGBE_FEATURE_MSIX 4658 | IXGBE_FEATURE_LEGACY_IRQ; 4659 4660 /* Set capabilities first... */ 4661 switch (sc->hw.mac.type) { 4662 case ixgbe_mac_82598EB: 4663 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 4664 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4665 break; 4666 case ixgbe_mac_X540: 4667 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4668 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4669 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4670 (sc->hw.bus.func == 0)) 4671 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4672 break; 4673 case ixgbe_mac_X550: 4674 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4675 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4676 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4677 break; 4678 case ixgbe_mac_X550EM_x: 4679 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4680 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4681 break; 4682 case ixgbe_mac_X550EM_a: 4683 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4684 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4685 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4686 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4687 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4688 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4689 sc->feat_cap |= IXGBE_FEATURE_EEE; 4690 } 4691 break; 4692 case ixgbe_mac_82599EB: 4693 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4694 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4695 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4696 (sc->hw.bus.func == 0)) 4697 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4698 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4699 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4700 break; 4701 default: 4702 break; 4703 } 4704 4705 /* Enabled by default... */ 4706 /* Fan failure detection */ 4707 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4708 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4709 /* Netmap */ 4710 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 4711 sc->feat_en |= IXGBE_FEATURE_NETMAP; 4712 /* EEE */ 4713 if (sc->feat_cap & IXGBE_FEATURE_EEE) 4714 sc->feat_en |= IXGBE_FEATURE_EEE; 4715 /* Thermal Sensor */ 4716 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4717 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4718 4719 /* Enabled via global sysctl... */ 4720 /* Flow Director */ 4721 if (ixgbe_enable_fdir) { 4722 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 4723 sc->feat_en |= IXGBE_FEATURE_FDIR; 4724 else 4725 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled."); 4726 } 4727 /* 4728 * Message Signal Interrupts - Extended (MSI-X) 4729 * Normal MSI is only enabled if MSI-X calls fail. 4730 */ 4731 if (!ixgbe_enable_msix) 4732 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 4733 /* Receive-Side Scaling (RSS) */ 4734 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4735 sc->feat_en |= IXGBE_FEATURE_RSS; 4736 4737 /* Disable features with unmet dependencies... */ 4738 /* No MSI-X */ 4739 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 4740 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 4741 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4742 sc->feat_en &= ~IXGBE_FEATURE_RSS; 4743 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 4744 } 4745 } /* ixgbe_init_device_features */ 4746 4747 /************************************************************************ 4748 * ixgbe_check_fan_failure 4749 ************************************************************************/ 4750 static void 4751 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 4752 { 4753 u32 mask; 4754 4755 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 4756 IXGBE_ESDP_SDP1; 4757 4758 if (reg & mask) 4759 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4760 } /* ixgbe_check_fan_failure */ 4761 4762 /************************************************************************ 4763 * ixgbe_sbuf_fw_version 4764 ************************************************************************/ 4765 static void 4766 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) 4767 { 4768 struct ixgbe_nvm_version nvm_ver = {0}; 4769 uint16_t phyfw = 0; 4770 int status; 4771 const char *space = ""; 4772 4773 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ 4774 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ 4775 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ 4776 status = ixgbe_get_phy_firmware_version(hw, &phyfw); 4777 4778 if (nvm_ver.oem_valid) { 4779 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, 4780 nvm_ver.oem_minor, nvm_ver.oem_release); 4781 space = " "; 4782 } 4783 4784 if (nvm_ver.or_valid) { 4785 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 4786 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); 4787 space = " "; 4788 } 4789 4790 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | 4791 NVM_VER_INVALID)) { 4792 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); 4793 space = " "; 4794 } 4795 4796 if (phyfw != 0 && status == IXGBE_SUCCESS) 4797 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); 4798 } /* ixgbe_sbuf_fw_version */ 4799 4800 /************************************************************************ 4801 * ixgbe_print_fw_version 4802 ************************************************************************/ 4803 static void 4804 ixgbe_print_fw_version(if_ctx_t ctx) 4805 { 4806 struct ixgbe_softc *sc = iflib_get_softc(ctx); 4807 struct ixgbe_hw *hw = &sc->hw; 4808 device_t dev = sc->dev; 4809 struct sbuf *buf; 4810 int error = 0; 4811 4812 buf = sbuf_new_auto(); 4813 if (!buf) { 4814 device_printf(dev, "Could not allocate sbuf for output.\n"); 4815 return; 4816 } 4817 4818 ixgbe_sbuf_fw_version(hw, buf); 4819 4820 error = sbuf_finish(buf); 4821 if (error) 4822 device_printf(dev, "Error finishing sbuf: %d\n", error); 4823 else if (sbuf_len(buf)) 4824 device_printf(dev, "%s\n", sbuf_data(buf)); 4825 4826 sbuf_delete(buf); 4827 } /* ixgbe_print_fw_version */ 4828 4829 /************************************************************************ 4830 * ixgbe_sysctl_print_fw_version 4831 ************************************************************************/ 4832 static int 4833 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 4834 { 4835 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4836 struct ixgbe_hw *hw = &sc->hw; 4837 device_t dev = sc->dev; 4838 struct sbuf *buf; 4839 int error = 0; 4840 4841 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4842 if (!buf) { 4843 device_printf(dev, "Could not allocate sbuf for output.\n"); 4844 return (ENOMEM); 4845 } 4846 4847 ixgbe_sbuf_fw_version(hw, buf); 4848 4849 error = sbuf_finish(buf); 4850 if (error) 4851 device_printf(dev, "Error finishing sbuf: %d\n", error); 4852 4853 sbuf_delete(buf); 4854 4855 return (0); 4856 } /* ixgbe_sysctl_print_fw_version */ 4857