1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_rss.h" 38 39 #include "ixgbe.h" 40 #include "ixgbe_sriov.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixgbe_driver_version[] = "4.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixgbe_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixgbe_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), 106 /* required last entry */ 107 PVID_END 108 }; 109 110 static void *ixgbe_register(device_t); 111 static int ixgbe_if_attach_pre(if_ctx_t); 112 static int ixgbe_if_attach_post(if_ctx_t); 113 static int ixgbe_if_detach(if_ctx_t); 114 static int ixgbe_if_shutdown(if_ctx_t); 115 static int ixgbe_if_suspend(if_ctx_t); 116 static int ixgbe_if_resume(if_ctx_t); 117 118 static void ixgbe_if_stop(if_ctx_t); 119 void ixgbe_if_enable_intr(if_ctx_t); 120 static void ixgbe_if_disable_intr(if_ctx_t); 121 static void ixgbe_link_intr_enable(if_ctx_t); 122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); 124 static int ixgbe_if_media_change(if_ctx_t); 125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); 127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); 128 static void ixgbe_if_multi_set(if_ctx_t); 129 static int ixgbe_if_promisc_set(if_ctx_t, int); 130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 132 static void ixgbe_if_queues_free(if_ctx_t); 133 static void ixgbe_if_timer(if_ctx_t, uint16_t); 134 static void ixgbe_if_update_admin_status(if_ctx_t); 135 static void ixgbe_if_vlan_register(if_ctx_t, u16); 136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16); 137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); 138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 139 int ixgbe_intr(void *); 140 141 /************************************************************************ 142 * Function prototypes 143 ************************************************************************/ 144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 145 146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32); 147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32); 148 static void ixgbe_add_device_sysctls(if_ctx_t); 149 static int ixgbe_allocate_pci_resources(if_ctx_t); 150 static int ixgbe_setup_low_power_mode(if_ctx_t); 151 152 static void ixgbe_config_dmac(struct ixgbe_softc *); 153 static void ixgbe_configure_ivars(struct ixgbe_softc *); 154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 156 static bool ixgbe_sfp_probe(if_ctx_t); 157 158 static void ixgbe_free_pci_resources(if_ctx_t); 159 160 static int ixgbe_msix_link(void *); 161 static int ixgbe_msix_que(void *); 162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 163 static void ixgbe_initialize_receive_units(if_ctx_t); 164 static void ixgbe_initialize_transmit_units(if_ctx_t); 165 166 static int ixgbe_setup_interface(if_ctx_t); 167 static void ixgbe_init_device_features(struct ixgbe_softc *); 168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); 170 static void ixgbe_print_fw_version(if_ctx_t); 171 static void ixgbe_add_media_types(if_ctx_t); 172 static void ixgbe_update_stats_counters(struct ixgbe_softc *); 173 static void ixgbe_config_link(if_ctx_t); 174 static void ixgbe_get_slot_info(struct ixgbe_softc *); 175 static void ixgbe_check_wol_support(struct ixgbe_softc *); 176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 178 179 static void ixgbe_add_hw_stats(struct ixgbe_softc *); 180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 181 static int ixgbe_set_advertise(struct ixgbe_softc *, int); 182 static int ixgbe_get_advertise(struct ixgbe_softc *); 183 static void ixgbe_setup_vlan_hw_support(if_ctx_t); 184 static void ixgbe_config_gpie(struct ixgbe_softc *); 185 static void ixgbe_config_delay_values(struct ixgbe_softc *); 186 187 /* Sysctl handlers */ 188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 195 #ifdef IXGBE_DEBUG 196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 198 #endif 199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 206 207 /* Deferred interrupt tasklets */ 208 static void ixgbe_handle_msf(void *); 209 static void ixgbe_handle_mod(void *); 210 static void ixgbe_handle_phy(void *); 211 212 /************************************************************************ 213 * FreeBSD Device Interface Entry Points 214 ************************************************************************/ 215 static device_method_t ix_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_register, ixgbe_register), 218 DEVMETHOD(device_probe, iflib_device_probe), 219 DEVMETHOD(device_attach, iflib_device_attach), 220 DEVMETHOD(device_detach, iflib_device_detach), 221 DEVMETHOD(device_shutdown, iflib_device_shutdown), 222 DEVMETHOD(device_suspend, iflib_device_suspend), 223 DEVMETHOD(device_resume, iflib_device_resume), 224 #ifdef PCI_IOV 225 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 228 #endif /* PCI_IOV */ 229 DEVMETHOD_END 230 }; 231 232 static driver_t ix_driver = { 233 "ix", ix_methods, sizeof(struct ixgbe_softc), 234 }; 235 236 devclass_t ix_devclass; 237 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 239 MODULE_DEPEND(ix, pci, 1, 1, 1); 240 MODULE_DEPEND(ix, ether, 1, 1, 1); 241 MODULE_DEPEND(ix, iflib, 1, 1, 1); 242 243 static device_method_t ixgbe_if_methods[] = { 244 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 245 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 246 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 247 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 248 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 249 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 250 DEVMETHOD(ifdi_init, ixgbe_if_init), 251 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 252 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 253 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 254 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 255 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 256 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 257 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 258 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 259 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 260 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 261 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 262 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 263 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 264 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 265 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 266 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 267 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 268 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 269 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 270 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 271 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 272 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 273 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), 274 #ifdef PCI_IOV 275 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 276 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 277 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 278 #endif /* PCI_IOV */ 279 DEVMETHOD_END 280 }; 281 282 /* 283 * TUNEABLE PARAMETERS: 284 */ 285 286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 287 "IXGBE driver parameters"); 288 static driver_t ixgbe_if_driver = { 289 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) 290 }; 291 292 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 293 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 294 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 295 296 /* Flow control setting, default to full */ 297 static int ixgbe_flow_control = ixgbe_fc_full; 298 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 299 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 300 301 /* Advertise Speed, default to 0 (auto) */ 302 static int ixgbe_advertise_speed = 0; 303 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 304 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 305 306 /* 307 * Smart speed setting, default to on 308 * this only works as a compile option 309 * right now as its during attach, set 310 * this to 'ixgbe_smart_speed_off' to 311 * disable. 312 */ 313 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 314 315 /* 316 * MSI-X should be the default for best performance, 317 * but this allows it to be forced off for testing. 318 */ 319 static int ixgbe_enable_msix = 1; 320 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 321 "Enable MSI-X interrupts"); 322 323 /* 324 * Defining this on will allow the use 325 * of unsupported SFP+ modules, note that 326 * doing so you are on your own :) 327 */ 328 static int allow_unsupported_sfp = false; 329 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 330 &allow_unsupported_sfp, 0, 331 "Allow unsupported SFP modules...use at your own risk"); 332 333 /* 334 * Not sure if Flow Director is fully baked, 335 * so we'll default to turning it off. 336 */ 337 static int ixgbe_enable_fdir = 0; 338 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 339 "Enable Flow Director"); 340 341 /* Receive-Side Scaling */ 342 static int ixgbe_enable_rss = 1; 343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 344 "Enable Receive-Side Scaling (RSS)"); 345 346 /* 347 * AIM: Adaptive Interrupt Moderation 348 * which means that the interrupt rate 349 * is varied over time based on the 350 * traffic for that interrupt vector 351 */ 352 static int ixgbe_enable_aim = false; 353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 354 "Enable adaptive interrupt moderation"); 355 356 #if 0 357 /* Keep running tab on them for sanity check */ 358 static int ixgbe_total_ports; 359 #endif 360 361 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 362 363 /* 364 * For Flow Director: this is the number of TX packets we sample 365 * for the filter pool, this means every 20th packet will be probed. 366 * 367 * This feature can be disabled by setting this to 0. 368 */ 369 static int atr_sample_rate = 20; 370 371 extern struct if_txrx ixgbe_txrx; 372 373 static struct if_shared_ctx ixgbe_sctx_init = { 374 .isc_magic = IFLIB_MAGIC, 375 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 376 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 377 .isc_tx_maxsegsize = PAGE_SIZE, 378 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 379 .isc_tso_maxsegsize = PAGE_SIZE, 380 .isc_rx_maxsize = PAGE_SIZE*4, 381 .isc_rx_nsegments = 1, 382 .isc_rx_maxsegsize = PAGE_SIZE*4, 383 .isc_nfl = 1, 384 .isc_ntxqs = 1, 385 .isc_nrxqs = 1, 386 387 .isc_admin_intrcnt = 1, 388 .isc_vendor_info = ixgbe_vendor_info_array, 389 .isc_driver_version = ixgbe_driver_version, 390 .isc_driver = &ixgbe_if_driver, 391 .isc_flags = IFLIB_TSO_INIT_IP, 392 393 .isc_nrxd_min = {MIN_RXD}, 394 .isc_ntxd_min = {MIN_TXD}, 395 .isc_nrxd_max = {MAX_RXD}, 396 .isc_ntxd_max = {MAX_TXD}, 397 .isc_nrxd_default = {DEFAULT_RXD}, 398 .isc_ntxd_default = {DEFAULT_TXD}, 399 }; 400 401 /************************************************************************ 402 * ixgbe_if_tx_queues_alloc 403 ************************************************************************/ 404 static int 405 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 406 int ntxqs, int ntxqsets) 407 { 408 struct ixgbe_softc *sc = iflib_get_softc(ctx); 409 if_softc_ctx_t scctx = sc->shared; 410 struct ix_tx_queue *que; 411 int i, j, error; 412 413 MPASS(sc->num_tx_queues > 0); 414 MPASS(sc->num_tx_queues == ntxqsets); 415 MPASS(ntxqs == 1); 416 417 /* Allocate queue structure memory */ 418 sc->tx_queues = 419 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 420 M_IXGBE, M_NOWAIT | M_ZERO); 421 if (!sc->tx_queues) { 422 device_printf(iflib_get_dev(ctx), 423 "Unable to allocate TX ring memory\n"); 424 return (ENOMEM); 425 } 426 427 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 428 struct tx_ring *txr = &que->txr; 429 430 /* In case SR-IOV is enabled, align the index properly */ 431 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 432 i); 433 434 txr->sc = que->sc = sc; 435 436 /* Allocate report status array */ 437 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 438 if (txr->tx_rsq == NULL) { 439 error = ENOMEM; 440 goto fail; 441 } 442 for (j = 0; j < scctx->isc_ntxd[0]; j++) 443 txr->tx_rsq[j] = QIDX_INVALID; 444 /* get the virtual and physical address of the hardware queues */ 445 txr->tail = IXGBE_TDT(txr->me); 446 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 447 txr->tx_paddr = paddrs[i]; 448 449 txr->bytes = 0; 450 txr->total_packets = 0; 451 452 /* Set the rate at which we sample packets */ 453 if (sc->feat_en & IXGBE_FEATURE_FDIR) 454 txr->atr_sample = atr_sample_rate; 455 456 } 457 458 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 459 sc->num_tx_queues); 460 461 return (0); 462 463 fail: 464 ixgbe_if_queues_free(ctx); 465 466 return (error); 467 } /* ixgbe_if_tx_queues_alloc */ 468 469 /************************************************************************ 470 * ixgbe_if_rx_queues_alloc 471 ************************************************************************/ 472 static int 473 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 474 int nrxqs, int nrxqsets) 475 { 476 struct ixgbe_softc *sc = iflib_get_softc(ctx); 477 struct ix_rx_queue *que; 478 int i; 479 480 MPASS(sc->num_rx_queues > 0); 481 MPASS(sc->num_rx_queues == nrxqsets); 482 MPASS(nrxqs == 1); 483 484 /* Allocate queue structure memory */ 485 sc->rx_queues = 486 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 487 M_IXGBE, M_NOWAIT | M_ZERO); 488 if (!sc->rx_queues) { 489 device_printf(iflib_get_dev(ctx), 490 "Unable to allocate TX ring memory\n"); 491 return (ENOMEM); 492 } 493 494 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 495 struct rx_ring *rxr = &que->rxr; 496 497 /* In case SR-IOV is enabled, align the index properly */ 498 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 499 i); 500 501 rxr->sc = que->sc = sc; 502 503 /* get the virtual and physical address of the hw queues */ 504 rxr->tail = IXGBE_RDT(rxr->me); 505 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 506 rxr->rx_paddr = paddrs[i]; 507 rxr->bytes = 0; 508 rxr->que = que; 509 } 510 511 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 512 sc->num_rx_queues); 513 514 return (0); 515 } /* ixgbe_if_rx_queues_alloc */ 516 517 /************************************************************************ 518 * ixgbe_if_queues_free 519 ************************************************************************/ 520 static void 521 ixgbe_if_queues_free(if_ctx_t ctx) 522 { 523 struct ixgbe_softc *sc = iflib_get_softc(ctx); 524 struct ix_tx_queue *tx_que = sc->tx_queues; 525 struct ix_rx_queue *rx_que = sc->rx_queues; 526 int i; 527 528 if (tx_que != NULL) { 529 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { 530 struct tx_ring *txr = &tx_que->txr; 531 if (txr->tx_rsq == NULL) 532 break; 533 534 free(txr->tx_rsq, M_IXGBE); 535 txr->tx_rsq = NULL; 536 } 537 538 free(sc->tx_queues, M_IXGBE); 539 sc->tx_queues = NULL; 540 } 541 if (rx_que != NULL) { 542 free(sc->rx_queues, M_IXGBE); 543 sc->rx_queues = NULL; 544 } 545 } /* ixgbe_if_queues_free */ 546 547 /************************************************************************ 548 * ixgbe_initialize_rss_mapping 549 ************************************************************************/ 550 static void 551 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 552 { 553 struct ixgbe_hw *hw = &sc->hw; 554 u32 reta = 0, mrqc, rss_key[10]; 555 int queue_id, table_size, index_mult; 556 int i, j; 557 u32 rss_hash_config; 558 559 if (sc->feat_en & IXGBE_FEATURE_RSS) { 560 /* Fetch the configured RSS key */ 561 rss_getkey((uint8_t *)&rss_key); 562 } else { 563 /* set up random bits */ 564 arc4rand(&rss_key, sizeof(rss_key), 0); 565 } 566 567 /* Set multiplier for RETA setup and table size based on MAC */ 568 index_mult = 0x1; 569 table_size = 128; 570 switch (sc->hw.mac.type) { 571 case ixgbe_mac_82598EB: 572 index_mult = 0x11; 573 break; 574 case ixgbe_mac_X550: 575 case ixgbe_mac_X550EM_x: 576 case ixgbe_mac_X550EM_a: 577 table_size = 512; 578 break; 579 default: 580 break; 581 } 582 583 /* Set up the redirection table */ 584 for (i = 0, j = 0; i < table_size; i++, j++) { 585 if (j == sc->num_rx_queues) 586 j = 0; 587 588 if (sc->feat_en & IXGBE_FEATURE_RSS) { 589 /* 590 * Fetch the RSS bucket id for the given indirection 591 * entry. Cap it at the number of configured buckets 592 * (which is num_rx_queues.) 593 */ 594 queue_id = rss_get_indirection_to_bucket(i); 595 queue_id = queue_id % sc->num_rx_queues; 596 } else 597 queue_id = (j * index_mult); 598 599 /* 600 * The low 8 bits are for hash value (n+0); 601 * The next 8 bits are for hash value (n+1), etc. 602 */ 603 reta = reta >> 8; 604 reta = reta | (((uint32_t)queue_id) << 24); 605 if ((i & 3) == 3) { 606 if (i < 128) 607 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 608 else 609 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 610 reta); 611 reta = 0; 612 } 613 } 614 615 /* Now fill our hash function seeds */ 616 for (i = 0; i < 10; i++) 617 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 618 619 /* Perform hash on these packet types */ 620 if (sc->feat_en & IXGBE_FEATURE_RSS) 621 rss_hash_config = rss_gethashconfig(); 622 else { 623 /* 624 * Disable UDP - IP fragments aren't currently being handled 625 * and so we end up with a mix of 2-tuple and 4-tuple 626 * traffic. 627 */ 628 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 629 | RSS_HASHTYPE_RSS_TCP_IPV4 630 | RSS_HASHTYPE_RSS_IPV6 631 | RSS_HASHTYPE_RSS_TCP_IPV6 632 | RSS_HASHTYPE_RSS_IPV6_EX 633 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 634 } 635 636 mrqc = IXGBE_MRQC_RSSEN; 637 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 639 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 641 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 643 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 645 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 647 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 649 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 651 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 653 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 655 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 656 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 657 } /* ixgbe_initialize_rss_mapping */ 658 659 /************************************************************************ 660 * ixgbe_initialize_receive_units - Setup receive registers and features. 661 ************************************************************************/ 662 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 663 664 static void 665 ixgbe_initialize_receive_units(if_ctx_t ctx) 666 { 667 struct ixgbe_softc *sc = iflib_get_softc(ctx); 668 if_softc_ctx_t scctx = sc->shared; 669 struct ixgbe_hw *hw = &sc->hw; 670 struct ifnet *ifp = iflib_get_ifp(ctx); 671 struct ix_rx_queue *que; 672 int i, j; 673 u32 bufsz, fctrl, srrctl, rxcsum; 674 u32 hlreg; 675 676 /* 677 * Make sure receives are disabled while 678 * setting up the descriptor ring 679 */ 680 ixgbe_disable_rx(hw); 681 682 /* Enable broadcasts */ 683 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 684 fctrl |= IXGBE_FCTRL_BAM; 685 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 686 fctrl |= IXGBE_FCTRL_DPF; 687 fctrl |= IXGBE_FCTRL_PMCF; 688 } 689 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 690 691 /* Set for Jumbo Frames? */ 692 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 693 if (ifp->if_mtu > ETHERMTU) 694 hlreg |= IXGBE_HLREG0_JUMBOEN; 695 else 696 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 697 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 698 699 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 700 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 701 702 /* Setup the Base and Length of the Rx Descriptor Ring */ 703 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { 704 struct rx_ring *rxr = &que->rxr; 705 u64 rdba = rxr->rx_paddr; 706 707 j = rxr->me; 708 709 /* Setup the Base and Length of the Rx Descriptor Ring */ 710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 711 (rdba & 0x00000000ffffffffULL)); 712 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 713 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 714 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 715 716 /* Set up the SRRCTL register */ 717 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 718 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 719 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 720 srrctl |= bufsz; 721 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 722 723 /* 724 * Set DROP_EN iff we have no flow control and >1 queue. 725 * Note that srrctl was cleared shortly before during reset, 726 * so we do not need to clear the bit, but do it just in case 727 * this code is moved elsewhere. 728 */ 729 if (sc->num_rx_queues > 1 && 730 sc->hw.fc.requested_mode == ixgbe_fc_none) { 731 srrctl |= IXGBE_SRRCTL_DROP_EN; 732 } else { 733 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 734 } 735 736 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 737 738 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 739 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 740 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 741 742 /* Set the driver rx tail address */ 743 rxr->tail = IXGBE_RDT(rxr->me); 744 } 745 746 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 747 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 748 | IXGBE_PSRTYPE_UDPHDR 749 | IXGBE_PSRTYPE_IPV4HDR 750 | IXGBE_PSRTYPE_IPV6HDR; 751 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 752 } 753 754 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 755 756 ixgbe_initialize_rss_mapping(sc); 757 758 if (sc->num_rx_queues > 1) { 759 /* RSS and RX IPP Checksum are mutually exclusive */ 760 rxcsum |= IXGBE_RXCSUM_PCSD; 761 } 762 763 if (ifp->if_capenable & IFCAP_RXCSUM) 764 rxcsum |= IXGBE_RXCSUM_PCSD; 765 766 /* This is useful for calculating UDP/IP fragment checksums */ 767 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 768 rxcsum |= IXGBE_RXCSUM_IPPCSE; 769 770 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 771 772 } /* ixgbe_initialize_receive_units */ 773 774 /************************************************************************ 775 * ixgbe_initialize_transmit_units - Enable transmit units. 776 ************************************************************************/ 777 static void 778 ixgbe_initialize_transmit_units(if_ctx_t ctx) 779 { 780 struct ixgbe_softc *sc = iflib_get_softc(ctx); 781 struct ixgbe_hw *hw = &sc->hw; 782 if_softc_ctx_t scctx = sc->shared; 783 struct ix_tx_queue *que; 784 int i; 785 786 /* Setup the Base and Length of the Tx Descriptor Ring */ 787 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; 788 i++, que++) { 789 struct tx_ring *txr = &que->txr; 790 u64 tdba = txr->tx_paddr; 791 u32 txctrl = 0; 792 int j = txr->me; 793 794 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 795 (tdba & 0x00000000ffffffffULL)); 796 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 797 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 798 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 799 800 /* Setup the HW Tx Head and Tail descriptor pointers */ 801 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 802 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 803 804 /* Cache the tail address */ 805 txr->tail = IXGBE_TDT(txr->me); 806 807 txr->tx_rs_cidx = txr->tx_rs_pidx; 808 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 809 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 810 txr->tx_rsq[k] = QIDX_INVALID; 811 812 /* Disable Head Writeback */ 813 /* 814 * Note: for X550 series devices, these registers are actually 815 * prefixed with TPH_ isntead of DCA_, but the addresses and 816 * fields remain the same. 817 */ 818 switch (hw->mac.type) { 819 case ixgbe_mac_82598EB: 820 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 821 break; 822 default: 823 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 824 break; 825 } 826 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 827 switch (hw->mac.type) { 828 case ixgbe_mac_82598EB: 829 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 830 break; 831 default: 832 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 833 break; 834 } 835 836 } 837 838 if (hw->mac.type != ixgbe_mac_82598EB) { 839 u32 dmatxctl, rttdcs; 840 841 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 842 dmatxctl |= IXGBE_DMATXCTL_TE; 843 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 844 /* Disable arbiter to set MTQC */ 845 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 846 rttdcs |= IXGBE_RTTDCS_ARBDIS; 847 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 848 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 849 ixgbe_get_mtqc(sc->iov_mode)); 850 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 851 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 852 } 853 854 } /* ixgbe_initialize_transmit_units */ 855 856 /************************************************************************ 857 * ixgbe_register 858 ************************************************************************/ 859 static void * 860 ixgbe_register(device_t dev) 861 { 862 return (&ixgbe_sctx_init); 863 } /* ixgbe_register */ 864 865 /************************************************************************ 866 * ixgbe_if_attach_pre - Device initialization routine, part 1 867 * 868 * Called when the driver is being loaded. 869 * Identifies the type of hardware, initializes the hardware, 870 * and initializes iflib structures. 871 * 872 * return 0 on success, positive on failure 873 ************************************************************************/ 874 static int 875 ixgbe_if_attach_pre(if_ctx_t ctx) 876 { 877 struct ixgbe_softc *sc; 878 device_t dev; 879 if_softc_ctx_t scctx; 880 struct ixgbe_hw *hw; 881 int error = 0; 882 u32 ctrl_ext; 883 884 INIT_DEBUGOUT("ixgbe_attach: begin"); 885 886 /* Allocate, clear, and link in our adapter structure */ 887 dev = iflib_get_dev(ctx); 888 sc = iflib_get_softc(ctx); 889 sc->hw.back = sc; 890 sc->ctx = ctx; 891 sc->dev = dev; 892 scctx = sc->shared = iflib_get_softc_ctx(ctx); 893 sc->media = iflib_get_media(ctx); 894 hw = &sc->hw; 895 896 /* Determine hardware revision */ 897 hw->vendor_id = pci_get_vendor(dev); 898 hw->device_id = pci_get_device(dev); 899 hw->revision_id = pci_get_revid(dev); 900 hw->subsystem_vendor_id = pci_get_subvendor(dev); 901 hw->subsystem_device_id = pci_get_subdevice(dev); 902 903 /* Do base PCI setup - map BAR0 */ 904 if (ixgbe_allocate_pci_resources(ctx)) { 905 device_printf(dev, "Allocation of PCI resources failed\n"); 906 return (ENXIO); 907 } 908 909 /* let hardware know driver is loaded */ 910 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 911 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 912 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 913 914 /* 915 * Initialize the shared code 916 */ 917 if (ixgbe_init_shared_code(hw) != 0) { 918 device_printf(dev, "Unable to initialize the shared code\n"); 919 error = ENXIO; 920 goto err_pci; 921 } 922 923 if (hw->mbx.ops.init_params) 924 hw->mbx.ops.init_params(hw); 925 926 hw->allow_unsupported_sfp = allow_unsupported_sfp; 927 928 if (hw->mac.type != ixgbe_mac_82598EB) 929 hw->phy.smart_speed = ixgbe_smart_speed; 930 931 ixgbe_init_device_features(sc); 932 933 /* Enable WoL (if supported) */ 934 ixgbe_check_wol_support(sc); 935 936 /* Verify adapter fan is still functional (if applicable) */ 937 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 938 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 939 ixgbe_check_fan_failure(sc, esdp, false); 940 } 941 942 /* Ensure SW/FW semaphore is free */ 943 ixgbe_init_swfw_semaphore(hw); 944 945 /* Set an initial default flow control value */ 946 hw->fc.requested_mode = ixgbe_flow_control; 947 948 hw->phy.reset_if_overtemp = true; 949 error = ixgbe_reset_hw(hw); 950 hw->phy.reset_if_overtemp = false; 951 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 952 /* 953 * No optics in this port, set up 954 * so the timer routine will probe 955 * for later insertion. 956 */ 957 sc->sfp_probe = true; 958 error = 0; 959 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 960 device_printf(dev, "Unsupported SFP+ module detected!\n"); 961 error = EIO; 962 goto err_pci; 963 } else if (error) { 964 device_printf(dev, "Hardware initialization failed\n"); 965 error = EIO; 966 goto err_pci; 967 } 968 969 /* Make sure we have a good EEPROM before we read from it */ 970 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 971 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 972 error = EIO; 973 goto err_pci; 974 } 975 976 error = ixgbe_start_hw(hw); 977 switch (error) { 978 case IXGBE_ERR_EEPROM_VERSION: 979 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 980 break; 981 case IXGBE_ERR_SFP_NOT_SUPPORTED: 982 device_printf(dev, "Unsupported SFP+ Module\n"); 983 error = EIO; 984 goto err_pci; 985 case IXGBE_ERR_SFP_NOT_PRESENT: 986 device_printf(dev, "No SFP+ Module found\n"); 987 /* falls thru */ 988 default: 989 break; 990 } 991 992 /* Most of the iflib initialization... */ 993 994 iflib_set_mac(ctx, hw->mac.addr); 995 switch (sc->hw.mac.type) { 996 case ixgbe_mac_X550: 997 case ixgbe_mac_X550EM_x: 998 case ixgbe_mac_X550EM_a: 999 scctx->isc_rss_table_size = 512; 1000 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1001 break; 1002 default: 1003 scctx->isc_rss_table_size = 128; 1004 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1005 } 1006 1007 /* Allow legacy interrupts */ 1008 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1009 1010 scctx->isc_txqsizes[0] = 1011 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1012 sizeof(u32), DBA_ALIGN), 1013 scctx->isc_rxqsizes[0] = 1014 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1015 DBA_ALIGN); 1016 1017 /* XXX */ 1018 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1019 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1020 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 1021 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1022 } else { 1023 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1024 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1025 } 1026 1027 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1028 1029 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1030 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1031 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1032 1033 scctx->isc_txrx = &ixgbe_txrx; 1034 1035 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1036 1037 return (0); 1038 1039 err_pci: 1040 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1041 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1042 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1043 ixgbe_free_pci_resources(ctx); 1044 1045 return (error); 1046 } /* ixgbe_if_attach_pre */ 1047 1048 /********************************************************************* 1049 * ixgbe_if_attach_post - Device initialization routine, part 2 1050 * 1051 * Called during driver load, but after interrupts and 1052 * resources have been allocated and configured. 1053 * Sets up some data structures not relevant to iflib. 1054 * 1055 * return 0 on success, positive on failure 1056 *********************************************************************/ 1057 static int 1058 ixgbe_if_attach_post(if_ctx_t ctx) 1059 { 1060 device_t dev; 1061 struct ixgbe_softc *sc; 1062 struct ixgbe_hw *hw; 1063 int error = 0; 1064 1065 dev = iflib_get_dev(ctx); 1066 sc = iflib_get_softc(ctx); 1067 hw = &sc->hw; 1068 1069 1070 if (sc->intr_type == IFLIB_INTR_LEGACY && 1071 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1072 device_printf(dev, "Device does not support legacy interrupts"); 1073 error = ENXIO; 1074 goto err; 1075 } 1076 1077 /* Allocate multicast array memory. */ 1078 sc->mta = malloc(sizeof(*sc->mta) * 1079 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1080 if (sc->mta == NULL) { 1081 device_printf(dev, "Can not allocate multicast setup array\n"); 1082 error = ENOMEM; 1083 goto err; 1084 } 1085 1086 /* hw.ix defaults init */ 1087 ixgbe_set_advertise(sc, ixgbe_advertise_speed); 1088 1089 /* Enable the optics for 82599 SFP+ fiber */ 1090 ixgbe_enable_tx_laser(hw); 1091 1092 /* Enable power to the phy. */ 1093 ixgbe_set_phy_power(hw, true); 1094 1095 ixgbe_initialize_iov(sc); 1096 1097 error = ixgbe_setup_interface(ctx); 1098 if (error) { 1099 device_printf(dev, "Interface setup failed: %d\n", error); 1100 goto err; 1101 } 1102 1103 ixgbe_if_update_admin_status(ctx); 1104 1105 /* Initialize statistics */ 1106 ixgbe_update_stats_counters(sc); 1107 ixgbe_add_hw_stats(sc); 1108 1109 /* Check PCIE slot type/speed/width */ 1110 ixgbe_get_slot_info(sc); 1111 1112 /* 1113 * Do time init and sysctl init here, but 1114 * only on the first port of a bypass sc. 1115 */ 1116 ixgbe_bypass_init(sc); 1117 1118 /* Display NVM and Option ROM versions */ 1119 ixgbe_print_fw_version(ctx); 1120 1121 /* Set an initial dmac value */ 1122 sc->dmac = 0; 1123 /* Set initial advertised speeds (if applicable) */ 1124 sc->advertise = ixgbe_get_advertise(sc); 1125 1126 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1127 ixgbe_define_iov_schemas(dev, &error); 1128 1129 /* Add sysctls */ 1130 ixgbe_add_device_sysctls(ctx); 1131 1132 return (0); 1133 err: 1134 return (error); 1135 } /* ixgbe_if_attach_post */ 1136 1137 /************************************************************************ 1138 * ixgbe_check_wol_support 1139 * 1140 * Checks whether the adapter's ports are capable of 1141 * Wake On LAN by reading the adapter's NVM. 1142 * 1143 * Sets each port's hw->wol_enabled value depending 1144 * on the value read here. 1145 ************************************************************************/ 1146 static void 1147 ixgbe_check_wol_support(struct ixgbe_softc *sc) 1148 { 1149 struct ixgbe_hw *hw = &sc->hw; 1150 u16 dev_caps = 0; 1151 1152 /* Find out WoL support for port */ 1153 sc->wol_support = hw->wol_enabled = 0; 1154 ixgbe_get_device_caps(hw, &dev_caps); 1155 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1156 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1157 hw->bus.func == 0)) 1158 sc->wol_support = hw->wol_enabled = 1; 1159 1160 /* Save initial wake up filter configuration */ 1161 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1162 1163 return; 1164 } /* ixgbe_check_wol_support */ 1165 1166 /************************************************************************ 1167 * ixgbe_setup_interface 1168 * 1169 * Setup networking device structure and register an interface. 1170 ************************************************************************/ 1171 static int 1172 ixgbe_setup_interface(if_ctx_t ctx) 1173 { 1174 struct ifnet *ifp = iflib_get_ifp(ctx); 1175 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1176 1177 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1178 1179 if_setbaudrate(ifp, IF_Gbps(10)); 1180 1181 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1182 1183 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1184 1185 ixgbe_add_media_types(ctx); 1186 1187 /* Autoselect media by default */ 1188 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1189 1190 return (0); 1191 } /* ixgbe_setup_interface */ 1192 1193 /************************************************************************ 1194 * ixgbe_if_get_counter 1195 ************************************************************************/ 1196 static uint64_t 1197 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1198 { 1199 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1200 if_t ifp = iflib_get_ifp(ctx); 1201 1202 switch (cnt) { 1203 case IFCOUNTER_IPACKETS: 1204 return (sc->ipackets); 1205 case IFCOUNTER_OPACKETS: 1206 return (sc->opackets); 1207 case IFCOUNTER_IBYTES: 1208 return (sc->ibytes); 1209 case IFCOUNTER_OBYTES: 1210 return (sc->obytes); 1211 case IFCOUNTER_IMCASTS: 1212 return (sc->imcasts); 1213 case IFCOUNTER_OMCASTS: 1214 return (sc->omcasts); 1215 case IFCOUNTER_COLLISIONS: 1216 return (0); 1217 case IFCOUNTER_IQDROPS: 1218 return (sc->iqdrops); 1219 case IFCOUNTER_OQDROPS: 1220 return (0); 1221 case IFCOUNTER_IERRORS: 1222 return (sc->ierrors); 1223 default: 1224 return (if_get_counter_default(ifp, cnt)); 1225 } 1226 } /* ixgbe_if_get_counter */ 1227 1228 /************************************************************************ 1229 * ixgbe_if_i2c_req 1230 ************************************************************************/ 1231 static int 1232 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1233 { 1234 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1235 struct ixgbe_hw *hw = &sc->hw; 1236 int i; 1237 1238 1239 if (hw->phy.ops.read_i2c_byte == NULL) 1240 return (ENXIO); 1241 for (i = 0; i < req->len; i++) 1242 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1243 req->dev_addr, &req->data[i]); 1244 return (0); 1245 } /* ixgbe_if_i2c_req */ 1246 1247 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1248 * @ctx: iflib context 1249 * @event: event code to check 1250 * 1251 * Defaults to returning true for unknown events. 1252 * 1253 * @returns true if iflib needs to reinit the interface 1254 */ 1255 static bool 1256 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1257 { 1258 switch (event) { 1259 case IFLIB_RESTART_VLAN_CONFIG: 1260 return (false); 1261 default: 1262 return (true); 1263 } 1264 } 1265 1266 /************************************************************************ 1267 * ixgbe_add_media_types 1268 ************************************************************************/ 1269 static void 1270 ixgbe_add_media_types(if_ctx_t ctx) 1271 { 1272 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1273 struct ixgbe_hw *hw = &sc->hw; 1274 device_t dev = iflib_get_dev(ctx); 1275 u64 layer; 1276 1277 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 1278 1279 /* Media types with matching FreeBSD media defines */ 1280 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1281 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1282 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1283 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1284 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1285 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1286 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1287 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1288 1289 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1290 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1291 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1292 NULL); 1293 1294 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1295 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1296 if (hw->phy.multispeed_fiber) 1297 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, 1298 NULL); 1299 } 1300 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1301 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1302 if (hw->phy.multispeed_fiber) 1303 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, 1304 NULL); 1305 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1306 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1307 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1308 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1309 1310 #ifdef IFM_ETH_XTYPE 1311 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1312 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1313 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1314 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1315 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1316 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1317 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1318 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1319 #else 1320 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1321 device_printf(dev, "Media supported: 10GbaseKR\n"); 1322 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1323 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1324 } 1325 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1326 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1327 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1328 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1329 } 1330 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1331 device_printf(dev, "Media supported: 1000baseKX\n"); 1332 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1333 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1334 } 1335 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1336 device_printf(dev, "Media supported: 2500baseKX\n"); 1337 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1338 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1339 } 1340 #endif 1341 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1342 device_printf(dev, "Media supported: 1000baseBX\n"); 1343 1344 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1345 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1346 0, NULL); 1347 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1348 } 1349 1350 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1351 } /* ixgbe_add_media_types */ 1352 1353 /************************************************************************ 1354 * ixgbe_is_sfp 1355 ************************************************************************/ 1356 static inline bool 1357 ixgbe_is_sfp(struct ixgbe_hw *hw) 1358 { 1359 switch (hw->mac.type) { 1360 case ixgbe_mac_82598EB: 1361 if (hw->phy.type == ixgbe_phy_nl) 1362 return (true); 1363 return (false); 1364 case ixgbe_mac_82599EB: 1365 switch (hw->mac.ops.get_media_type(hw)) { 1366 case ixgbe_media_type_fiber: 1367 case ixgbe_media_type_fiber_qsfp: 1368 return (true); 1369 default: 1370 return (false); 1371 } 1372 case ixgbe_mac_X550EM_x: 1373 case ixgbe_mac_X550EM_a: 1374 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1375 return (true); 1376 return (false); 1377 default: 1378 return (false); 1379 } 1380 } /* ixgbe_is_sfp */ 1381 1382 /************************************************************************ 1383 * ixgbe_config_link 1384 ************************************************************************/ 1385 static void 1386 ixgbe_config_link(if_ctx_t ctx) 1387 { 1388 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1389 struct ixgbe_hw *hw = &sc->hw; 1390 u32 autoneg, err = 0; 1391 bool sfp, negotiate; 1392 1393 sfp = ixgbe_is_sfp(hw); 1394 1395 if (sfp) { 1396 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 1397 iflib_admin_intr_deferred(ctx); 1398 } else { 1399 if (hw->mac.ops.check_link) 1400 err = ixgbe_check_link(hw, &sc->link_speed, 1401 &sc->link_up, false); 1402 if (err) 1403 return; 1404 autoneg = hw->phy.autoneg_advertised; 1405 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1406 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1407 &negotiate); 1408 if (err) 1409 return; 1410 if (hw->mac.ops.setup_link) 1411 err = hw->mac.ops.setup_link(hw, autoneg, 1412 sc->link_up); 1413 } 1414 } /* ixgbe_config_link */ 1415 1416 /************************************************************************ 1417 * ixgbe_update_stats_counters - Update board statistics counters. 1418 ************************************************************************/ 1419 static void 1420 ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1421 { 1422 struct ixgbe_hw *hw = &sc->hw; 1423 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1424 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1425 u32 lxoffrxc; 1426 u64 total_missed_rx = 0; 1427 1428 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1429 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1430 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1431 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1432 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1433 1434 for (int i = 0; i < 16; i++) { 1435 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1436 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1437 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1438 } 1439 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1440 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1441 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1442 1443 /* Hardware workaround, gprc counts missed packets */ 1444 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1445 stats->gprc -= missed_rx; 1446 1447 if (hw->mac.type != ixgbe_mac_82598EB) { 1448 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1449 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1450 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1451 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1452 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1453 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1454 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1455 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1456 stats->lxoffrxc += lxoffrxc; 1457 } else { 1458 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1459 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1460 stats->lxoffrxc += lxoffrxc; 1461 /* 82598 only has a counter in the high register */ 1462 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1463 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1464 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1465 } 1466 1467 /* 1468 * For watchdog management we need to know if we have been paused 1469 * during the last interval, so capture that here. 1470 */ 1471 if (lxoffrxc) 1472 sc->shared->isc_pause_frames = 1; 1473 1474 /* 1475 * Workaround: mprc hardware is incorrectly counting 1476 * broadcasts, so for now we subtract those. 1477 */ 1478 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1479 stats->bprc += bprc; 1480 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1481 if (hw->mac.type == ixgbe_mac_82598EB) 1482 stats->mprc -= bprc; 1483 1484 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1485 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1486 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1487 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1488 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1489 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1490 1491 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1492 stats->lxontxc += lxon; 1493 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1494 stats->lxofftxc += lxoff; 1495 total = lxon + lxoff; 1496 1497 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1498 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1499 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1500 stats->gptc -= total; 1501 stats->mptc -= total; 1502 stats->ptc64 -= total; 1503 stats->gotc -= total * ETHER_MIN_LEN; 1504 1505 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1506 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1507 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1508 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1509 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1510 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1511 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1512 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1513 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1514 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1515 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1516 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1517 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1518 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1519 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1520 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1521 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1522 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1523 /* Only read FCOE on 82599 */ 1524 if (hw->mac.type != ixgbe_mac_82598EB) { 1525 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1526 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1527 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1528 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1529 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1530 } 1531 1532 /* Fill out the OS statistics structure */ 1533 IXGBE_SET_IPACKETS(sc, stats->gprc); 1534 IXGBE_SET_OPACKETS(sc, stats->gptc); 1535 IXGBE_SET_IBYTES(sc, stats->gorc); 1536 IXGBE_SET_OBYTES(sc, stats->gotc); 1537 IXGBE_SET_IMCASTS(sc, stats->mprc); 1538 IXGBE_SET_OMCASTS(sc, stats->mptc); 1539 IXGBE_SET_COLLISIONS(sc, 0); 1540 IXGBE_SET_IQDROPS(sc, total_missed_rx); 1541 1542 /* 1543 * Aggregate following types of errors as RX errors: 1544 * - CRC error count, 1545 * - illegal byte error count, 1546 * - checksum error count, 1547 * - missed packets count, 1548 * - length error count, 1549 * - undersized packets count, 1550 * - fragmented packets count, 1551 * - oversized packets count, 1552 * - jabber count. 1553 */ 1554 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + stats->xec + 1555 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + 1556 stats->rjc); 1557 } /* ixgbe_update_stats_counters */ 1558 1559 /************************************************************************ 1560 * ixgbe_add_hw_stats 1561 * 1562 * Add sysctl variables, one per statistic, to the system. 1563 ************************************************************************/ 1564 static void 1565 ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1566 { 1567 device_t dev = iflib_get_dev(sc->ctx); 1568 struct ix_rx_queue *rx_que; 1569 struct ix_tx_queue *tx_que; 1570 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1571 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1572 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1573 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1574 struct sysctl_oid *stat_node, *queue_node; 1575 struct sysctl_oid_list *stat_list, *queue_list; 1576 int i; 1577 1578 #define QUEUE_NAME_LEN 32 1579 char namebuf[QUEUE_NAME_LEN]; 1580 1581 /* Driver Statistics */ 1582 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1583 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); 1584 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1585 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1586 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1587 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1588 1589 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 1590 struct tx_ring *txr = &tx_que->txr; 1591 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1592 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1593 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1594 queue_list = SYSCTL_CHILDREN(queue_node); 1595 1596 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1597 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1598 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1599 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1600 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1601 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1602 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1603 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1604 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1605 CTLFLAG_RD, &txr->total_packets, 1606 "Queue Packets Transmitted"); 1607 } 1608 1609 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 1610 struct rx_ring *rxr = &rx_que->rxr; 1611 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1612 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1613 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1614 queue_list = SYSCTL_CHILDREN(queue_node); 1615 1616 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1617 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 1618 &sc->rx_queues[i], 0, 1619 ixgbe_sysctl_interrupt_rate_handler, "IU", 1620 "Interrupt Rate"); 1621 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1622 CTLFLAG_RD, &(sc->rx_queues[i].irqs), 1623 "irqs on this queue"); 1624 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1625 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1626 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1627 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1628 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1629 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1630 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1631 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1632 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1633 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1634 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1635 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1636 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1637 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1638 } 1639 1640 /* MAC stats get their own sub node */ 1641 1642 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1643 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); 1644 stat_list = SYSCTL_CHILDREN(stat_node); 1645 1646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", 1647 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); 1648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1649 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1651 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1653 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1655 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1657 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1659 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1661 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1663 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1664 1665 /* Flow Control stats */ 1666 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1667 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1669 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1671 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1673 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1674 1675 /* Packet Reception Stats */ 1676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1677 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1679 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1681 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1683 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1685 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1687 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1689 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1691 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1693 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1695 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1697 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1698 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1699 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1701 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1702 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1703 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1704 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1705 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1707 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1708 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1709 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1710 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1711 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1713 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1714 1715 /* Packet Transmission Stats */ 1716 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1717 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1719 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1721 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1722 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1723 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1725 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1727 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1728 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1729 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1731 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1733 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1734 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1735 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1737 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1739 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1740 } /* ixgbe_add_hw_stats */ 1741 1742 /************************************************************************ 1743 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1744 * 1745 * Retrieves the TDH value from the hardware 1746 ************************************************************************/ 1747 static int 1748 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1749 { 1750 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1751 int error; 1752 unsigned int val; 1753 1754 if (!txr) 1755 return (0); 1756 1757 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); 1758 error = sysctl_handle_int(oidp, &val, 0, req); 1759 if (error || !req->newptr) 1760 return error; 1761 1762 return (0); 1763 } /* ixgbe_sysctl_tdh_handler */ 1764 1765 /************************************************************************ 1766 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1767 * 1768 * Retrieves the TDT value from the hardware 1769 ************************************************************************/ 1770 static int 1771 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1772 { 1773 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1774 int error; 1775 unsigned int val; 1776 1777 if (!txr) 1778 return (0); 1779 1780 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); 1781 error = sysctl_handle_int(oidp, &val, 0, req); 1782 if (error || !req->newptr) 1783 return error; 1784 1785 return (0); 1786 } /* ixgbe_sysctl_tdt_handler */ 1787 1788 /************************************************************************ 1789 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1790 * 1791 * Retrieves the RDH value from the hardware 1792 ************************************************************************/ 1793 static int 1794 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1795 { 1796 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1797 int error; 1798 unsigned int val; 1799 1800 if (!rxr) 1801 return (0); 1802 1803 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); 1804 error = sysctl_handle_int(oidp, &val, 0, req); 1805 if (error || !req->newptr) 1806 return error; 1807 1808 return (0); 1809 } /* ixgbe_sysctl_rdh_handler */ 1810 1811 /************************************************************************ 1812 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1813 * 1814 * Retrieves the RDT value from the hardware 1815 ************************************************************************/ 1816 static int 1817 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1818 { 1819 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1820 int error; 1821 unsigned int val; 1822 1823 if (!rxr) 1824 return (0); 1825 1826 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); 1827 error = sysctl_handle_int(oidp, &val, 0, req); 1828 if (error || !req->newptr) 1829 return error; 1830 1831 return (0); 1832 } /* ixgbe_sysctl_rdt_handler */ 1833 1834 /************************************************************************ 1835 * ixgbe_if_vlan_register 1836 * 1837 * Run via vlan config EVENT, it enables us to use the 1838 * HW Filter table since we can get the vlan id. This 1839 * just creates the entry in the soft version of the 1840 * VFTA, init will repopulate the real table. 1841 ************************************************************************/ 1842 static void 1843 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1844 { 1845 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1846 u16 index, bit; 1847 1848 index = (vtag >> 5) & 0x7F; 1849 bit = vtag & 0x1F; 1850 sc->shadow_vfta[index] |= (1 << bit); 1851 ++sc->num_vlans; 1852 ixgbe_setup_vlan_hw_support(ctx); 1853 } /* ixgbe_if_vlan_register */ 1854 1855 /************************************************************************ 1856 * ixgbe_if_vlan_unregister 1857 * 1858 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1859 ************************************************************************/ 1860 static void 1861 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1862 { 1863 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1864 u16 index, bit; 1865 1866 index = (vtag >> 5) & 0x7F; 1867 bit = vtag & 0x1F; 1868 sc->shadow_vfta[index] &= ~(1 << bit); 1869 --sc->num_vlans; 1870 /* Re-init to load the changes */ 1871 ixgbe_setup_vlan_hw_support(ctx); 1872 } /* ixgbe_if_vlan_unregister */ 1873 1874 /************************************************************************ 1875 * ixgbe_setup_vlan_hw_support 1876 ************************************************************************/ 1877 static void 1878 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1879 { 1880 struct ifnet *ifp = iflib_get_ifp(ctx); 1881 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1882 struct ixgbe_hw *hw = &sc->hw; 1883 struct rx_ring *rxr; 1884 int i; 1885 u32 ctrl; 1886 1887 1888 /* 1889 * We get here thru init_locked, meaning 1890 * a soft reset, this has already cleared 1891 * the VFTA and other state, so if there 1892 * have been no vlan's registered do nothing. 1893 */ 1894 if (sc->num_vlans == 0) 1895 return; 1896 1897 /* Setup the queues for vlans */ 1898 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1899 for (i = 0; i < sc->num_rx_queues; i++) { 1900 rxr = &sc->rx_queues[i].rxr; 1901 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1902 if (hw->mac.type != ixgbe_mac_82598EB) { 1903 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1904 ctrl |= IXGBE_RXDCTL_VME; 1905 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1906 } 1907 rxr->vtag_strip = true; 1908 } 1909 } 1910 1911 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1912 return; 1913 /* 1914 * A soft reset zero's out the VFTA, so 1915 * we need to repopulate it now. 1916 */ 1917 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1918 if (sc->shadow_vfta[i] != 0) 1919 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1920 sc->shadow_vfta[i]); 1921 1922 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1923 /* Enable the Filter Table if enabled */ 1924 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1925 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1926 ctrl |= IXGBE_VLNCTRL_VFE; 1927 } 1928 if (hw->mac.type == ixgbe_mac_82598EB) 1929 ctrl |= IXGBE_VLNCTRL_VME; 1930 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1931 } /* ixgbe_setup_vlan_hw_support */ 1932 1933 /************************************************************************ 1934 * ixgbe_get_slot_info 1935 * 1936 * Get the width and transaction speed of 1937 * the slot this adapter is plugged into. 1938 ************************************************************************/ 1939 static void 1940 ixgbe_get_slot_info(struct ixgbe_softc *sc) 1941 { 1942 device_t dev = iflib_get_dev(sc->ctx); 1943 struct ixgbe_hw *hw = &sc->hw; 1944 int bus_info_valid = true; 1945 u32 offset; 1946 u16 link; 1947 1948 /* Some devices are behind an internal bridge */ 1949 switch (hw->device_id) { 1950 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1951 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1952 goto get_parent_info; 1953 default: 1954 break; 1955 } 1956 1957 ixgbe_get_bus_info(hw); 1958 1959 /* 1960 * Some devices don't use PCI-E, but there is no need 1961 * to display "Unknown" for bus speed and width. 1962 */ 1963 switch (hw->mac.type) { 1964 case ixgbe_mac_X550EM_x: 1965 case ixgbe_mac_X550EM_a: 1966 return; 1967 default: 1968 goto display; 1969 } 1970 1971 get_parent_info: 1972 /* 1973 * For the Quad port adapter we need to parse back 1974 * up the PCI tree to find the speed of the expansion 1975 * slot into which this adapter is plugged. A bit more work. 1976 */ 1977 dev = device_get_parent(device_get_parent(dev)); 1978 #ifdef IXGBE_DEBUG 1979 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 1980 pci_get_slot(dev), pci_get_function(dev)); 1981 #endif 1982 dev = device_get_parent(device_get_parent(dev)); 1983 #ifdef IXGBE_DEBUG 1984 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 1985 pci_get_slot(dev), pci_get_function(dev)); 1986 #endif 1987 /* Now get the PCI Express Capabilities offset */ 1988 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 1989 /* 1990 * Hmm...can't get PCI-Express capabilities. 1991 * Falling back to default method. 1992 */ 1993 bus_info_valid = false; 1994 ixgbe_get_bus_info(hw); 1995 goto display; 1996 } 1997 /* ...and read the Link Status Register */ 1998 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 1999 ixgbe_set_pci_config_data_generic(hw, link); 2000 2001 display: 2002 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2003 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2004 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2005 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2006 "Unknown"), 2007 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 2008 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 2009 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 2010 "Unknown")); 2011 2012 if (bus_info_valid) { 2013 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2014 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2015 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2016 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2017 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 2018 } 2019 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2020 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2021 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2022 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2023 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 2024 } 2025 } else 2026 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2027 2028 return; 2029 } /* ixgbe_get_slot_info */ 2030 2031 /************************************************************************ 2032 * ixgbe_if_msix_intr_assign 2033 * 2034 * Setup MSI-X Interrupt resources and handlers 2035 ************************************************************************/ 2036 static int 2037 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2038 { 2039 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2040 struct ix_rx_queue *rx_que = sc->rx_queues; 2041 struct ix_tx_queue *tx_que; 2042 int error, rid, vector = 0; 2043 int cpu_id = 0; 2044 char buf[16]; 2045 2046 /* Admin Que is vector 0*/ 2047 rid = vector + 1; 2048 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 2049 rid = vector + 1; 2050 2051 snprintf(buf, sizeof(buf), "rxq%d", i); 2052 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2053 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2054 2055 if (error) { 2056 device_printf(iflib_get_dev(ctx), 2057 "Failed to allocate que int %d err: %d", i, error); 2058 sc->num_rx_queues = i + 1; 2059 goto fail; 2060 } 2061 2062 rx_que->msix = vector; 2063 if (sc->feat_en & IXGBE_FEATURE_RSS) { 2064 /* 2065 * The queue ID is used as the RSS layer bucket ID. 2066 * We look up the queue ID -> RSS CPU ID and select 2067 * that. 2068 */ 2069 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2070 } else { 2071 /* 2072 * Bind the MSI-X vector, and thus the 2073 * rings to the corresponding cpu. 2074 * 2075 * This just happens to match the default RSS 2076 * round-robin bucket -> queue -> CPU allocation. 2077 */ 2078 if (sc->num_rx_queues > 1) 2079 cpu_id = i; 2080 } 2081 2082 } 2083 for (int i = 0; i < sc->num_tx_queues; i++) { 2084 snprintf(buf, sizeof(buf), "txq%d", i); 2085 tx_que = &sc->tx_queues[i]; 2086 tx_que->msix = i % sc->num_rx_queues; 2087 iflib_softirq_alloc_generic(ctx, 2088 &sc->rx_queues[tx_que->msix].que_irq, 2089 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2090 } 2091 rid = vector + 1; 2092 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 2093 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq"); 2094 if (error) { 2095 device_printf(iflib_get_dev(ctx), 2096 "Failed to register admin handler"); 2097 return (error); 2098 } 2099 2100 sc->vector = vector; 2101 2102 return (0); 2103 fail: 2104 iflib_irq_free(ctx, &sc->irq); 2105 rx_que = sc->rx_queues; 2106 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 2107 iflib_irq_free(ctx, &rx_que->que_irq); 2108 2109 return (error); 2110 } /* ixgbe_if_msix_intr_assign */ 2111 2112 static inline void 2113 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que) 2114 { 2115 uint32_t newitr = 0; 2116 struct rx_ring *rxr = &que->rxr; 2117 2118 /* 2119 * Do Adaptive Interrupt Moderation: 2120 * - Write out last calculated setting 2121 * - Calculate based on average size over 2122 * the last interval. 2123 */ 2124 if (que->eitr_setting) { 2125 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), 2126 que->eitr_setting); 2127 } 2128 2129 que->eitr_setting = 0; 2130 /* Idle, do nothing */ 2131 if (rxr->bytes == 0) { 2132 return; 2133 } 2134 2135 if ((rxr->bytes) && (rxr->packets)) { 2136 newitr = (rxr->bytes / rxr->packets); 2137 } 2138 2139 newitr += 24; /* account for hardware frame, crc */ 2140 /* set an upper boundary */ 2141 newitr = min(newitr, 3000); 2142 2143 /* Be nice to the mid range */ 2144 if ((newitr > 300) && (newitr < 1200)) { 2145 newitr = (newitr / 3); 2146 } else { 2147 newitr = (newitr / 2); 2148 } 2149 2150 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2151 newitr |= newitr << 16; 2152 } else { 2153 newitr |= IXGBE_EITR_CNT_WDIS; 2154 } 2155 2156 /* save for next interrupt */ 2157 que->eitr_setting = newitr; 2158 2159 /* Reset state */ 2160 rxr->bytes = 0; 2161 rxr->packets = 0; 2162 2163 return; 2164 } 2165 2166 /********************************************************************* 2167 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2168 **********************************************************************/ 2169 static int 2170 ixgbe_msix_que(void *arg) 2171 { 2172 struct ix_rx_queue *que = arg; 2173 struct ixgbe_softc *sc = que->sc; 2174 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx); 2175 2176 /* Protect against spurious interrupts */ 2177 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2178 return (FILTER_HANDLED); 2179 2180 ixgbe_disable_queue(sc, que->msix); 2181 ++que->irqs; 2182 2183 /* Check for AIM */ 2184 if (sc->enable_aim) { 2185 ixgbe_perform_aim(sc, que); 2186 } 2187 2188 return (FILTER_SCHEDULE_THREAD); 2189 } /* ixgbe_msix_que */ 2190 2191 /************************************************************************ 2192 * ixgbe_media_status - Media Ioctl callback 2193 * 2194 * Called whenever the user queries the status of 2195 * the interface using ifconfig. 2196 ************************************************************************/ 2197 static void 2198 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2199 { 2200 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2201 struct ixgbe_hw *hw = &sc->hw; 2202 int layer; 2203 2204 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2205 2206 ifmr->ifm_status = IFM_AVALID; 2207 ifmr->ifm_active = IFM_ETHER; 2208 2209 if (!sc->link_active) 2210 return; 2211 2212 ifmr->ifm_status |= IFM_ACTIVE; 2213 layer = sc->phy_layer; 2214 2215 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2216 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2217 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2218 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2219 switch (sc->link_speed) { 2220 case IXGBE_LINK_SPEED_10GB_FULL: 2221 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2222 break; 2223 case IXGBE_LINK_SPEED_1GB_FULL: 2224 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2225 break; 2226 case IXGBE_LINK_SPEED_100_FULL: 2227 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2228 break; 2229 case IXGBE_LINK_SPEED_10_FULL: 2230 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2231 break; 2232 } 2233 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2234 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2235 switch (sc->link_speed) { 2236 case IXGBE_LINK_SPEED_10GB_FULL: 2237 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2238 break; 2239 } 2240 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2241 switch (sc->link_speed) { 2242 case IXGBE_LINK_SPEED_10GB_FULL: 2243 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2244 break; 2245 case IXGBE_LINK_SPEED_1GB_FULL: 2246 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2247 break; 2248 } 2249 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2250 switch (sc->link_speed) { 2251 case IXGBE_LINK_SPEED_10GB_FULL: 2252 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2253 break; 2254 case IXGBE_LINK_SPEED_1GB_FULL: 2255 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2256 break; 2257 } 2258 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2259 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2260 switch (sc->link_speed) { 2261 case IXGBE_LINK_SPEED_10GB_FULL: 2262 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2263 break; 2264 case IXGBE_LINK_SPEED_1GB_FULL: 2265 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2266 break; 2267 } 2268 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2269 switch (sc->link_speed) { 2270 case IXGBE_LINK_SPEED_10GB_FULL: 2271 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2272 break; 2273 } 2274 /* 2275 * XXX: These need to use the proper media types once 2276 * they're added. 2277 */ 2278 #ifndef IFM_ETH_XTYPE 2279 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2280 switch (sc->link_speed) { 2281 case IXGBE_LINK_SPEED_10GB_FULL: 2282 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2283 break; 2284 case IXGBE_LINK_SPEED_2_5GB_FULL: 2285 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2286 break; 2287 case IXGBE_LINK_SPEED_1GB_FULL: 2288 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2289 break; 2290 } 2291 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2292 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2293 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2294 switch (sc->link_speed) { 2295 case IXGBE_LINK_SPEED_10GB_FULL: 2296 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2297 break; 2298 case IXGBE_LINK_SPEED_2_5GB_FULL: 2299 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2300 break; 2301 case IXGBE_LINK_SPEED_1GB_FULL: 2302 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2303 break; 2304 } 2305 #else 2306 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2307 switch (sc->link_speed) { 2308 case IXGBE_LINK_SPEED_10GB_FULL: 2309 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2310 break; 2311 case IXGBE_LINK_SPEED_2_5GB_FULL: 2312 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2313 break; 2314 case IXGBE_LINK_SPEED_1GB_FULL: 2315 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2316 break; 2317 } 2318 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2319 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2320 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2321 switch (sc->link_speed) { 2322 case IXGBE_LINK_SPEED_10GB_FULL: 2323 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2324 break; 2325 case IXGBE_LINK_SPEED_2_5GB_FULL: 2326 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2327 break; 2328 case IXGBE_LINK_SPEED_1GB_FULL: 2329 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2330 break; 2331 } 2332 #endif 2333 2334 /* If nothing is recognized... */ 2335 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2336 ifmr->ifm_active |= IFM_UNKNOWN; 2337 2338 /* Display current flow control setting used on link */ 2339 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2340 hw->fc.current_mode == ixgbe_fc_full) 2341 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2342 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2343 hw->fc.current_mode == ixgbe_fc_full) 2344 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2345 } /* ixgbe_media_status */ 2346 2347 /************************************************************************ 2348 * ixgbe_media_change - Media Ioctl callback 2349 * 2350 * Called when the user changes speed/duplex using 2351 * media/mediopt option with ifconfig. 2352 ************************************************************************/ 2353 static int 2354 ixgbe_if_media_change(if_ctx_t ctx) 2355 { 2356 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2357 struct ifmedia *ifm = iflib_get_media(ctx); 2358 struct ixgbe_hw *hw = &sc->hw; 2359 ixgbe_link_speed speed = 0; 2360 2361 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2362 2363 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2364 return (EINVAL); 2365 2366 if (hw->phy.media_type == ixgbe_media_type_backplane) 2367 return (EPERM); 2368 2369 /* 2370 * We don't actually need to check against the supported 2371 * media types of the adapter; ifmedia will take care of 2372 * that for us. 2373 */ 2374 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2375 case IFM_AUTO: 2376 case IFM_10G_T: 2377 speed |= IXGBE_LINK_SPEED_100_FULL; 2378 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2379 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2380 break; 2381 case IFM_10G_LRM: 2382 case IFM_10G_LR: 2383 #ifndef IFM_ETH_XTYPE 2384 case IFM_10G_SR: /* KR, too */ 2385 case IFM_10G_CX4: /* KX4 */ 2386 #else 2387 case IFM_10G_KR: 2388 case IFM_10G_KX4: 2389 #endif 2390 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2391 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2392 break; 2393 #ifndef IFM_ETH_XTYPE 2394 case IFM_1000_CX: /* KX */ 2395 #else 2396 case IFM_1000_KX: 2397 #endif 2398 case IFM_1000_LX: 2399 case IFM_1000_SX: 2400 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2401 break; 2402 case IFM_1000_T: 2403 speed |= IXGBE_LINK_SPEED_100_FULL; 2404 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2405 break; 2406 case IFM_10G_TWINAX: 2407 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2408 break; 2409 case IFM_100_TX: 2410 speed |= IXGBE_LINK_SPEED_100_FULL; 2411 break; 2412 case IFM_10_T: 2413 speed |= IXGBE_LINK_SPEED_10_FULL; 2414 break; 2415 default: 2416 goto invalid; 2417 } 2418 2419 hw->mac.autotry_restart = true; 2420 hw->mac.ops.setup_link(hw, speed, true); 2421 sc->advertise = 2422 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 2423 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 2424 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 2425 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 2426 2427 return (0); 2428 2429 invalid: 2430 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2431 2432 return (EINVAL); 2433 } /* ixgbe_if_media_change */ 2434 2435 /************************************************************************ 2436 * ixgbe_set_promisc 2437 ************************************************************************/ 2438 static int 2439 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2440 { 2441 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2442 struct ifnet *ifp = iflib_get_ifp(ctx); 2443 u32 rctl; 2444 int mcnt = 0; 2445 2446 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 2447 rctl &= (~IXGBE_FCTRL_UPE); 2448 if (ifp->if_flags & IFF_ALLMULTI) 2449 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2450 else { 2451 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2452 } 2453 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2454 rctl &= (~IXGBE_FCTRL_MPE); 2455 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2456 2457 if (ifp->if_flags & IFF_PROMISC) { 2458 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2459 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2460 } else if (ifp->if_flags & IFF_ALLMULTI) { 2461 rctl |= IXGBE_FCTRL_MPE; 2462 rctl &= ~IXGBE_FCTRL_UPE; 2463 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2464 } 2465 return (0); 2466 } /* ixgbe_if_promisc_set */ 2467 2468 /************************************************************************ 2469 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2470 ************************************************************************/ 2471 static int 2472 ixgbe_msix_link(void *arg) 2473 { 2474 struct ixgbe_softc *sc = arg; 2475 struct ixgbe_hw *hw = &sc->hw; 2476 u32 eicr, eicr_mask; 2477 s32 retval; 2478 2479 ++sc->link_irq; 2480 2481 /* Pause other interrupts */ 2482 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2483 2484 /* First get the cause */ 2485 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2486 /* Be sure the queue bits are not cleared */ 2487 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2488 /* Clear interrupt with write */ 2489 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2490 2491 /* Link status change */ 2492 if (eicr & IXGBE_EICR_LSC) { 2493 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2494 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; 2495 } 2496 2497 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 2498 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 2499 (eicr & IXGBE_EICR_FLOW_DIR)) { 2500 /* This is probably overkill :) */ 2501 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) 2502 return (FILTER_HANDLED); 2503 /* Disable the interrupt */ 2504 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2505 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2506 } else 2507 if (eicr & IXGBE_EICR_ECC) { 2508 device_printf(iflib_get_dev(sc->ctx), 2509 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); 2510 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2511 } 2512 2513 /* Check for over temp condition */ 2514 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2515 switch (sc->hw.mac.type) { 2516 case ixgbe_mac_X550EM_a: 2517 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2518 break; 2519 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2520 IXGBE_EICR_GPI_SDP0_X550EM_a); 2521 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2522 IXGBE_EICR_GPI_SDP0_X550EM_a); 2523 retval = hw->phy.ops.check_overtemp(hw); 2524 if (retval != IXGBE_ERR_OVERTEMP) 2525 break; 2526 device_printf(iflib_get_dev(sc->ctx), 2527 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2528 device_printf(iflib_get_dev(sc->ctx), 2529 "System shutdown required!\n"); 2530 break; 2531 default: 2532 if (!(eicr & IXGBE_EICR_TS)) 2533 break; 2534 retval = hw->phy.ops.check_overtemp(hw); 2535 if (retval != IXGBE_ERR_OVERTEMP) 2536 break; 2537 device_printf(iflib_get_dev(sc->ctx), 2538 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2539 device_printf(iflib_get_dev(sc->ctx), 2540 "System shutdown required!\n"); 2541 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2542 break; 2543 } 2544 } 2545 2546 /* Check for VF message */ 2547 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 2548 (eicr & IXGBE_EICR_MAILBOX)) 2549 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; 2550 } 2551 2552 if (ixgbe_is_sfp(hw)) { 2553 /* Pluggable optics-related interrupt */ 2554 if (hw->mac.type >= ixgbe_mac_X540) 2555 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2556 else 2557 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2558 2559 if (eicr & eicr_mask) { 2560 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2561 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 2562 } 2563 2564 if ((hw->mac.type == ixgbe_mac_82599EB) && 2565 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2566 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2567 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2568 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 2569 } 2570 } 2571 2572 /* Check for fan failure */ 2573 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2574 ixgbe_check_fan_failure(sc, eicr, true); 2575 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2576 } 2577 2578 /* External PHY interrupt */ 2579 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2580 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2581 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2582 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 2583 } 2584 2585 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2586 } /* ixgbe_msix_link */ 2587 2588 /************************************************************************ 2589 * ixgbe_sysctl_interrupt_rate_handler 2590 ************************************************************************/ 2591 static int 2592 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2593 { 2594 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2595 int error; 2596 unsigned int reg, usec, rate; 2597 2598 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); 2599 usec = ((reg & 0x0FF8) >> 3); 2600 if (usec > 0) 2601 rate = 500000 / usec; 2602 else 2603 rate = 0; 2604 error = sysctl_handle_int(oidp, &rate, 0, req); 2605 if (error || !req->newptr) 2606 return error; 2607 reg &= ~0xfff; /* default, no limitation */ 2608 ixgbe_max_interrupt_rate = 0; 2609 if (rate > 0 && rate < 500000) { 2610 if (rate < 1000) 2611 rate = 1000; 2612 ixgbe_max_interrupt_rate = rate; 2613 reg |= ((4000000/rate) & 0xff8); 2614 } 2615 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); 2616 2617 return (0); 2618 } /* ixgbe_sysctl_interrupt_rate_handler */ 2619 2620 /************************************************************************ 2621 * ixgbe_add_device_sysctls 2622 ************************************************************************/ 2623 static void 2624 ixgbe_add_device_sysctls(if_ctx_t ctx) 2625 { 2626 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2627 device_t dev = iflib_get_dev(ctx); 2628 struct ixgbe_hw *hw = &sc->hw; 2629 struct sysctl_oid_list *child; 2630 struct sysctl_ctx_list *ctx_list; 2631 2632 ctx_list = device_get_sysctl_ctx(dev); 2633 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2634 2635 /* Sysctls for all devices */ 2636 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2637 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2638 sc, 0, ixgbe_sysctl_flowcntl, "I", 2639 IXGBE_SYSCTL_DESC_SET_FC); 2640 2641 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2642 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2643 sc, 0, ixgbe_sysctl_advertise, "I", 2644 IXGBE_SYSCTL_DESC_ADV_SPEED); 2645 2646 sc->enable_aim = ixgbe_enable_aim; 2647 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2648 &sc->enable_aim, 0, "Interrupt Moderation"); 2649 2650 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", 2651 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2652 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); 2653 2654 #ifdef IXGBE_DEBUG 2655 /* testing sysctls (for all devices) */ 2656 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2657 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2658 sc, 0, ixgbe_sysctl_power_state, 2659 "I", "PCI Power State"); 2660 2661 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2662 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2663 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2664 #endif 2665 /* for X550 series devices */ 2666 if (hw->mac.type >= ixgbe_mac_X550) 2667 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2668 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2669 sc, 0, ixgbe_sysctl_dmac, 2670 "I", "DMA Coalesce"); 2671 2672 /* for WoL-capable devices */ 2673 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2674 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2675 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2676 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2677 2678 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2679 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2680 sc, 0, ixgbe_sysctl_wufc, 2681 "I", "Enable/Disable Wake Up Filters"); 2682 } 2683 2684 /* for X552/X557-AT devices */ 2685 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2686 struct sysctl_oid *phy_node; 2687 struct sysctl_oid_list *phy_list; 2688 2689 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2690 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); 2691 phy_list = SYSCTL_CHILDREN(phy_node); 2692 2693 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2694 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2695 sc, 0, ixgbe_sysctl_phy_temp, 2696 "I", "Current External PHY Temperature (Celsius)"); 2697 2698 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2699 "overtemp_occurred", 2700 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2701 ixgbe_sysctl_phy_overtemp_occurred, "I", 2702 "External PHY High Temperature Event Occurred"); 2703 } 2704 2705 if (sc->feat_cap & IXGBE_FEATURE_EEE) { 2706 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2707 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2708 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2709 } 2710 } /* ixgbe_add_device_sysctls */ 2711 2712 /************************************************************************ 2713 * ixgbe_allocate_pci_resources 2714 ************************************************************************/ 2715 static int 2716 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2717 { 2718 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2719 device_t dev = iflib_get_dev(ctx); 2720 int rid; 2721 2722 rid = PCIR_BAR(0); 2723 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2724 RF_ACTIVE); 2725 2726 if (!(sc->pci_mem)) { 2727 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2728 return (ENXIO); 2729 } 2730 2731 /* Save bus_space values for READ/WRITE_REG macros */ 2732 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 2733 sc->osdep.mem_bus_space_handle = 2734 rman_get_bushandle(sc->pci_mem); 2735 /* Set hw values for shared code */ 2736 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 2737 2738 return (0); 2739 } /* ixgbe_allocate_pci_resources */ 2740 2741 /************************************************************************ 2742 * ixgbe_detach - Device removal routine 2743 * 2744 * Called when the driver is being removed. 2745 * Stops the adapter and deallocates all the resources 2746 * that were allocated for driver operation. 2747 * 2748 * return 0 on success, positive on failure 2749 ************************************************************************/ 2750 static int 2751 ixgbe_if_detach(if_ctx_t ctx) 2752 { 2753 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2754 device_t dev = iflib_get_dev(ctx); 2755 u32 ctrl_ext; 2756 2757 INIT_DEBUGOUT("ixgbe_detach: begin"); 2758 2759 if (ixgbe_pci_iov_detach(dev) != 0) { 2760 device_printf(dev, "SR-IOV in use; detach first.\n"); 2761 return (EBUSY); 2762 } 2763 2764 ixgbe_setup_low_power_mode(ctx); 2765 2766 /* let hardware know driver is unloading */ 2767 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 2768 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2769 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 2770 2771 ixgbe_free_pci_resources(ctx); 2772 free(sc->mta, M_IXGBE); 2773 2774 return (0); 2775 } /* ixgbe_if_detach */ 2776 2777 /************************************************************************ 2778 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2779 * 2780 * Prepare the adapter/port for LPLU and/or WoL 2781 ************************************************************************/ 2782 static int 2783 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2784 { 2785 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2786 struct ixgbe_hw *hw = &sc->hw; 2787 device_t dev = iflib_get_dev(ctx); 2788 s32 error = 0; 2789 2790 if (!hw->wol_enabled) 2791 ixgbe_set_phy_power(hw, false); 2792 2793 /* Limit power management flow to X550EM baseT */ 2794 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2795 hw->phy.ops.enter_lplu) { 2796 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2797 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2798 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2799 2800 /* 2801 * Clear Wake Up Status register to prevent any previous wakeup 2802 * events from waking us up immediately after we suspend. 2803 */ 2804 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2805 2806 /* 2807 * Program the Wakeup Filter Control register with user filter 2808 * settings 2809 */ 2810 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 2811 2812 /* Enable wakeups and power management in Wakeup Control */ 2813 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2814 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2815 2816 /* X550EM baseT adapters need a special LPLU flow */ 2817 hw->phy.reset_disable = true; 2818 ixgbe_if_stop(ctx); 2819 error = hw->phy.ops.enter_lplu(hw); 2820 if (error) 2821 device_printf(dev, "Error entering LPLU: %d\n", error); 2822 hw->phy.reset_disable = false; 2823 } else { 2824 /* Just stop for other adapters */ 2825 ixgbe_if_stop(ctx); 2826 } 2827 2828 return error; 2829 } /* ixgbe_setup_low_power_mode */ 2830 2831 /************************************************************************ 2832 * ixgbe_shutdown - Shutdown entry point 2833 ************************************************************************/ 2834 static int 2835 ixgbe_if_shutdown(if_ctx_t ctx) 2836 { 2837 int error = 0; 2838 2839 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2840 2841 error = ixgbe_setup_low_power_mode(ctx); 2842 2843 return (error); 2844 } /* ixgbe_if_shutdown */ 2845 2846 /************************************************************************ 2847 * ixgbe_suspend 2848 * 2849 * From D0 to D3 2850 ************************************************************************/ 2851 static int 2852 ixgbe_if_suspend(if_ctx_t ctx) 2853 { 2854 int error = 0; 2855 2856 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2857 2858 error = ixgbe_setup_low_power_mode(ctx); 2859 2860 return (error); 2861 } /* ixgbe_if_suspend */ 2862 2863 /************************************************************************ 2864 * ixgbe_resume 2865 * 2866 * From D3 to D0 2867 ************************************************************************/ 2868 static int 2869 ixgbe_if_resume(if_ctx_t ctx) 2870 { 2871 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2872 device_t dev = iflib_get_dev(ctx); 2873 struct ifnet *ifp = iflib_get_ifp(ctx); 2874 struct ixgbe_hw *hw = &sc->hw; 2875 u32 wus; 2876 2877 INIT_DEBUGOUT("ixgbe_resume: begin"); 2878 2879 /* Read & clear WUS register */ 2880 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2881 if (wus) 2882 device_printf(dev, "Woken up by (WUS): %#010x\n", 2883 IXGBE_READ_REG(hw, IXGBE_WUS)); 2884 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2885 /* And clear WUFC until next low-power transition */ 2886 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2887 2888 /* 2889 * Required after D3->D0 transition; 2890 * will re-advertise all previous advertised speeds 2891 */ 2892 if (ifp->if_flags & IFF_UP) 2893 ixgbe_if_init(ctx); 2894 2895 return (0); 2896 } /* ixgbe_if_resume */ 2897 2898 /************************************************************************ 2899 * ixgbe_if_mtu_set - Ioctl mtu entry point 2900 * 2901 * Return 0 on success, EINVAL on failure 2902 ************************************************************************/ 2903 static int 2904 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2905 { 2906 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2907 int error = 0; 2908 2909 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2910 2911 if (mtu > IXGBE_MAX_MTU) { 2912 error = EINVAL; 2913 } else { 2914 sc->max_frame_size = mtu + IXGBE_MTU_HDR; 2915 } 2916 2917 return error; 2918 } /* ixgbe_if_mtu_set */ 2919 2920 /************************************************************************ 2921 * ixgbe_if_crcstrip_set 2922 ************************************************************************/ 2923 static void 2924 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2925 { 2926 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2927 struct ixgbe_hw *hw = &sc->hw; 2928 /* crc stripping is set in two places: 2929 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2930 * IXGBE_RDRXCTL (set by the original driver in 2931 * ixgbe_setup_hw_rsc() called in init_locked. 2932 * We disable the setting when netmap is compiled in). 2933 * We update the values here, but also in ixgbe.c because 2934 * init_locked sometimes is called outside our control. 2935 */ 2936 uint32_t hl, rxc; 2937 2938 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2939 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2940 #ifdef NETMAP 2941 if (netmap_verbose) 2942 D("%s read HLREG 0x%x rxc 0x%x", 2943 onoff ? "enter" : "exit", hl, rxc); 2944 #endif 2945 /* hw requirements ... */ 2946 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2947 rxc |= IXGBE_RDRXCTL_RSCACKC; 2948 if (onoff && !crcstrip) { 2949 /* keep the crc. Fast rx */ 2950 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 2951 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 2952 } else { 2953 /* reset default mode */ 2954 hl |= IXGBE_HLREG0_RXCRCSTRP; 2955 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 2956 } 2957 #ifdef NETMAP 2958 if (netmap_verbose) 2959 D("%s write HLREG 0x%x rxc 0x%x", 2960 onoff ? "enter" : "exit", hl, rxc); 2961 #endif 2962 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 2963 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 2964 } /* ixgbe_if_crcstrip_set */ 2965 2966 /********************************************************************* 2967 * ixgbe_if_init - Init entry point 2968 * 2969 * Used in two ways: It is used by the stack as an init 2970 * entry point in network interface structure. It is also 2971 * used by the driver as a hw/sw initialization routine to 2972 * get to a consistent state. 2973 * 2974 * Return 0 on success, positive on failure 2975 **********************************************************************/ 2976 void 2977 ixgbe_if_init(if_ctx_t ctx) 2978 { 2979 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2980 struct ifnet *ifp = iflib_get_ifp(ctx); 2981 device_t dev = iflib_get_dev(ctx); 2982 struct ixgbe_hw *hw = &sc->hw; 2983 struct ix_rx_queue *rx_que; 2984 struct ix_tx_queue *tx_que; 2985 u32 txdctl, mhadd; 2986 u32 rxdctl, rxctrl; 2987 u32 ctrl_ext; 2988 2989 int i, j, err; 2990 2991 INIT_DEBUGOUT("ixgbe_if_init: begin"); 2992 2993 /* Queue indices may change with IOV mode */ 2994 ixgbe_align_all_queue_indices(sc); 2995 2996 /* reprogram the RAR[0] in case user changed it. */ 2997 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 2998 2999 /* Get the latest mac address, User can use a LAA */ 3000 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3001 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 3002 hw->addr_ctrl.rar_used_count = 1; 3003 3004 ixgbe_init_hw(hw); 3005 3006 ixgbe_initialize_iov(sc); 3007 3008 ixgbe_initialize_transmit_units(ctx); 3009 3010 /* Setup Multicast table */ 3011 ixgbe_if_multi_set(ctx); 3012 3013 /* Determine the correct mbuf pool, based on frame size */ 3014 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 3015 3016 /* Configure RX settings */ 3017 ixgbe_initialize_receive_units(ctx); 3018 3019 /* 3020 * Initialize variable holding task enqueue requests 3021 * from MSI-X interrupts 3022 */ 3023 sc->task_requests = 0; 3024 3025 /* Enable SDP & MSI-X interrupts based on adapter */ 3026 ixgbe_config_gpie(sc); 3027 3028 /* Set MTU size */ 3029 if (ifp->if_mtu > ETHERMTU) { 3030 /* aka IXGBE_MAXFRS on 82599 and newer */ 3031 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3032 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3033 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3034 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3035 } 3036 3037 /* Now enable all the queues */ 3038 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 3039 struct tx_ring *txr = &tx_que->txr; 3040 3041 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3042 txdctl |= IXGBE_TXDCTL_ENABLE; 3043 /* Set WTHRESH to 8, burst writeback */ 3044 txdctl |= (8 << 16); 3045 /* 3046 * When the internal queue falls below PTHRESH (32), 3047 * start prefetching as long as there are at least 3048 * HTHRESH (1) buffers ready. The values are taken 3049 * from the Intel linux driver 3.8.21. 3050 * Prefetching enables tx line rate even with 1 queue. 3051 */ 3052 txdctl |= (32 << 0) | (1 << 8); 3053 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 3054 } 3055 3056 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 3057 struct rx_ring *rxr = &rx_que->rxr; 3058 3059 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3060 if (hw->mac.type == ixgbe_mac_82598EB) { 3061 /* 3062 * PTHRESH = 21 3063 * HTHRESH = 4 3064 * WTHRESH = 8 3065 */ 3066 rxdctl &= ~0x3FFFFF; 3067 rxdctl |= 0x080420; 3068 } 3069 rxdctl |= IXGBE_RXDCTL_ENABLE; 3070 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 3071 for (j = 0; j < 10; j++) { 3072 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 3073 IXGBE_RXDCTL_ENABLE) 3074 break; 3075 else 3076 msec_delay(1); 3077 } 3078 wmb(); 3079 } 3080 3081 /* Enable Receive engine */ 3082 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3083 if (hw->mac.type == ixgbe_mac_82598EB) 3084 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3085 rxctrl |= IXGBE_RXCTRL_RXEN; 3086 ixgbe_enable_rx_dma(hw, rxctrl); 3087 3088 /* Set up MSI/MSI-X routing */ 3089 if (ixgbe_enable_msix) { 3090 ixgbe_configure_ivars(sc); 3091 /* Set up auto-mask */ 3092 if (hw->mac.type == ixgbe_mac_82598EB) 3093 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3094 else { 3095 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3096 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3097 } 3098 } else { /* Simple settings for Legacy/MSI */ 3099 ixgbe_set_ivar(sc, 0, 0, 0); 3100 ixgbe_set_ivar(sc, 0, 0, 1); 3101 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3102 } 3103 3104 ixgbe_init_fdir(sc); 3105 3106 /* 3107 * Check on any SFP devices that 3108 * need to be kick-started 3109 */ 3110 if (hw->phy.type == ixgbe_phy_none) { 3111 err = hw->phy.ops.identify(hw); 3112 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3113 device_printf(dev, 3114 "Unsupported SFP+ module type was detected.\n"); 3115 return; 3116 } 3117 } 3118 3119 /* Set moderation on the Link interrupt */ 3120 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); 3121 3122 /* Enable power to the phy. */ 3123 ixgbe_set_phy_power(hw, true); 3124 3125 /* Config/Enable Link */ 3126 ixgbe_config_link(ctx); 3127 3128 /* Hardware Packet Buffer & Flow Control setup */ 3129 ixgbe_config_delay_values(sc); 3130 3131 /* Initialize the FC settings */ 3132 ixgbe_start_hw(hw); 3133 3134 /* Set up VLAN support and filter */ 3135 ixgbe_setup_vlan_hw_support(ctx); 3136 3137 /* Setup DMA Coalescing */ 3138 ixgbe_config_dmac(sc); 3139 3140 /* And now turn on interrupts */ 3141 ixgbe_if_enable_intr(ctx); 3142 3143 /* Enable the use of the MBX by the VF's */ 3144 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 3145 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3146 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3147 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3148 } 3149 3150 } /* ixgbe_init_locked */ 3151 3152 /************************************************************************ 3153 * ixgbe_set_ivar 3154 * 3155 * Setup the correct IVAR register for a particular MSI-X interrupt 3156 * (yes this is all very magic and confusing :) 3157 * - entry is the register array entry 3158 * - vector is the MSI-X vector for this queue 3159 * - type is RX/TX/MISC 3160 ************************************************************************/ 3161 static void 3162 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 3163 { 3164 struct ixgbe_hw *hw = &sc->hw; 3165 u32 ivar, index; 3166 3167 vector |= IXGBE_IVAR_ALLOC_VAL; 3168 3169 switch (hw->mac.type) { 3170 case ixgbe_mac_82598EB: 3171 if (type == -1) 3172 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3173 else 3174 entry += (type * 64); 3175 index = (entry >> 2) & 0x1F; 3176 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3177 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3178 ivar |= (vector << (8 * (entry & 0x3))); 3179 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 3180 break; 3181 case ixgbe_mac_82599EB: 3182 case ixgbe_mac_X540: 3183 case ixgbe_mac_X550: 3184 case ixgbe_mac_X550EM_x: 3185 case ixgbe_mac_X550EM_a: 3186 if (type == -1) { /* MISC IVAR */ 3187 index = (entry & 1) * 8; 3188 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3189 ivar &= ~(0xFF << index); 3190 ivar |= (vector << index); 3191 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3192 } else { /* RX/TX IVARS */ 3193 index = (16 * (entry & 1)) + (8 * type); 3194 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3195 ivar &= ~(0xFF << index); 3196 ivar |= (vector << index); 3197 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3198 } 3199 default: 3200 break; 3201 } 3202 } /* ixgbe_set_ivar */ 3203 3204 /************************************************************************ 3205 * ixgbe_configure_ivars 3206 ************************************************************************/ 3207 static void 3208 ixgbe_configure_ivars(struct ixgbe_softc *sc) 3209 { 3210 struct ix_rx_queue *rx_que = sc->rx_queues; 3211 struct ix_tx_queue *tx_que = sc->tx_queues; 3212 u32 newitr; 3213 3214 if (ixgbe_max_interrupt_rate > 0) 3215 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3216 else { 3217 /* 3218 * Disable DMA coalescing if interrupt moderation is 3219 * disabled. 3220 */ 3221 sc->dmac = 0; 3222 newitr = 0; 3223 } 3224 3225 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 3226 struct rx_ring *rxr = &rx_que->rxr; 3227 3228 /* First the RX queue entry */ 3229 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); 3230 3231 /* Set an Initial EITR value */ 3232 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); 3233 } 3234 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 3235 struct tx_ring *txr = &tx_que->txr; 3236 3237 /* ... and the TX */ 3238 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); 3239 } 3240 /* For the Link interrupt */ 3241 ixgbe_set_ivar(sc, 1, sc->vector, -1); 3242 } /* ixgbe_configure_ivars */ 3243 3244 /************************************************************************ 3245 * ixgbe_config_gpie 3246 ************************************************************************/ 3247 static void 3248 ixgbe_config_gpie(struct ixgbe_softc *sc) 3249 { 3250 struct ixgbe_hw *hw = &sc->hw; 3251 u32 gpie; 3252 3253 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3254 3255 if (sc->intr_type == IFLIB_INTR_MSIX) { 3256 /* Enable Enhanced MSI-X mode */ 3257 gpie |= IXGBE_GPIE_MSIX_MODE 3258 | IXGBE_GPIE_EIAME 3259 | IXGBE_GPIE_PBA_SUPPORT 3260 | IXGBE_GPIE_OCD; 3261 } 3262 3263 /* Fan Failure Interrupt */ 3264 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3265 gpie |= IXGBE_SDP1_GPIEN; 3266 3267 /* Thermal Sensor Interrupt */ 3268 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3269 gpie |= IXGBE_SDP0_GPIEN_X540; 3270 3271 /* Link detection */ 3272 switch (hw->mac.type) { 3273 case ixgbe_mac_82599EB: 3274 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3275 break; 3276 case ixgbe_mac_X550EM_x: 3277 case ixgbe_mac_X550EM_a: 3278 gpie |= IXGBE_SDP0_GPIEN_X540; 3279 break; 3280 default: 3281 break; 3282 } 3283 3284 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3285 3286 } /* ixgbe_config_gpie */ 3287 3288 /************************************************************************ 3289 * ixgbe_config_delay_values 3290 * 3291 * Requires sc->max_frame_size to be set. 3292 ************************************************************************/ 3293 static void 3294 ixgbe_config_delay_values(struct ixgbe_softc *sc) 3295 { 3296 struct ixgbe_hw *hw = &sc->hw; 3297 u32 rxpb, frame, size, tmp; 3298 3299 frame = sc->max_frame_size; 3300 3301 /* Calculate High Water */ 3302 switch (hw->mac.type) { 3303 case ixgbe_mac_X540: 3304 case ixgbe_mac_X550: 3305 case ixgbe_mac_X550EM_x: 3306 case ixgbe_mac_X550EM_a: 3307 tmp = IXGBE_DV_X540(frame, frame); 3308 break; 3309 default: 3310 tmp = IXGBE_DV(frame, frame); 3311 break; 3312 } 3313 size = IXGBE_BT2KB(tmp); 3314 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3315 hw->fc.high_water[0] = rxpb - size; 3316 3317 /* Now calculate Low Water */ 3318 switch (hw->mac.type) { 3319 case ixgbe_mac_X540: 3320 case ixgbe_mac_X550: 3321 case ixgbe_mac_X550EM_x: 3322 case ixgbe_mac_X550EM_a: 3323 tmp = IXGBE_LOW_DV_X540(frame); 3324 break; 3325 default: 3326 tmp = IXGBE_LOW_DV(frame); 3327 break; 3328 } 3329 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3330 3331 hw->fc.pause_time = IXGBE_FC_PAUSE; 3332 hw->fc.send_xon = true; 3333 } /* ixgbe_config_delay_values */ 3334 3335 /************************************************************************ 3336 * ixgbe_set_multi - Multicast Update 3337 * 3338 * Called whenever multicast address list is updated. 3339 ************************************************************************/ 3340 static u_int 3341 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) 3342 { 3343 struct ixgbe_softc *sc = arg; 3344 struct ixgbe_mc_addr *mta = sc->mta; 3345 3346 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 3347 return (0); 3348 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3349 mta[idx].vmdq = sc->pool; 3350 3351 return (1); 3352 } /* ixgbe_mc_filter_apply */ 3353 3354 static void 3355 ixgbe_if_multi_set(if_ctx_t ctx) 3356 { 3357 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3358 struct ixgbe_mc_addr *mta; 3359 struct ifnet *ifp = iflib_get_ifp(ctx); 3360 u8 *update_ptr; 3361 u32 fctrl; 3362 u_int mcnt; 3363 3364 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3365 3366 mta = sc->mta; 3367 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3368 3369 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc); 3370 3371 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 3372 3373 if (ifp->if_flags & IFF_PROMISC) 3374 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3375 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3376 ifp->if_flags & IFF_ALLMULTI) { 3377 fctrl |= IXGBE_FCTRL_MPE; 3378 fctrl &= ~IXGBE_FCTRL_UPE; 3379 } else 3380 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3381 3382 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 3383 3384 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3385 update_ptr = (u8 *)mta; 3386 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 3387 ixgbe_mc_array_itr, true); 3388 } 3389 3390 } /* ixgbe_if_multi_set */ 3391 3392 /************************************************************************ 3393 * ixgbe_mc_array_itr 3394 * 3395 * An iterator function needed by the multicast shared code. 3396 * It feeds the shared code routine the addresses in the 3397 * array of ixgbe_set_multi() one by one. 3398 ************************************************************************/ 3399 static u8 * 3400 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3401 { 3402 struct ixgbe_mc_addr *mta; 3403 3404 mta = (struct ixgbe_mc_addr *)*update_ptr; 3405 *vmdq = mta->vmdq; 3406 3407 *update_ptr = (u8*)(mta + 1); 3408 3409 return (mta->addr); 3410 } /* ixgbe_mc_array_itr */ 3411 3412 /************************************************************************ 3413 * ixgbe_local_timer - Timer routine 3414 * 3415 * Checks for link status, updates statistics, 3416 * and runs the watchdog check. 3417 ************************************************************************/ 3418 static void 3419 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3420 { 3421 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3422 3423 if (qid != 0) 3424 return; 3425 3426 /* Check for pluggable optics */ 3427 if (sc->sfp_probe) 3428 if (!ixgbe_sfp_probe(ctx)) 3429 return; /* Nothing to do */ 3430 3431 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3432 3433 /* Fire off the adminq task */ 3434 iflib_admin_intr_deferred(ctx); 3435 3436 } /* ixgbe_if_timer */ 3437 3438 /************************************************************************ 3439 * ixgbe_sfp_probe 3440 * 3441 * Determine if a port had optics inserted. 3442 ************************************************************************/ 3443 static bool 3444 ixgbe_sfp_probe(if_ctx_t ctx) 3445 { 3446 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3447 struct ixgbe_hw *hw = &sc->hw; 3448 device_t dev = iflib_get_dev(ctx); 3449 bool result = false; 3450 3451 if ((hw->phy.type == ixgbe_phy_nl) && 3452 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3453 s32 ret = hw->phy.ops.identify_sfp(hw); 3454 if (ret) 3455 goto out; 3456 ret = hw->phy.ops.reset(hw); 3457 sc->sfp_probe = false; 3458 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3459 device_printf(dev, "Unsupported SFP+ module detected!"); 3460 device_printf(dev, 3461 "Reload driver with supported module.\n"); 3462 goto out; 3463 } else 3464 device_printf(dev, "SFP+ module detected!\n"); 3465 /* We now have supported optics */ 3466 result = true; 3467 } 3468 out: 3469 3470 return (result); 3471 } /* ixgbe_sfp_probe */ 3472 3473 /************************************************************************ 3474 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3475 ************************************************************************/ 3476 static void 3477 ixgbe_handle_mod(void *context) 3478 { 3479 if_ctx_t ctx = context; 3480 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3481 struct ixgbe_hw *hw = &sc->hw; 3482 device_t dev = iflib_get_dev(ctx); 3483 u32 err, cage_full = 0; 3484 3485 if (sc->hw.need_crosstalk_fix) { 3486 switch (hw->mac.type) { 3487 case ixgbe_mac_82599EB: 3488 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3489 IXGBE_ESDP_SDP2; 3490 break; 3491 case ixgbe_mac_X550EM_x: 3492 case ixgbe_mac_X550EM_a: 3493 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3494 IXGBE_ESDP_SDP0; 3495 break; 3496 default: 3497 break; 3498 } 3499 3500 if (!cage_full) 3501 goto handle_mod_out; 3502 } 3503 3504 err = hw->phy.ops.identify_sfp(hw); 3505 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3506 device_printf(dev, 3507 "Unsupported SFP+ module type was detected.\n"); 3508 goto handle_mod_out; 3509 } 3510 3511 if (hw->mac.type == ixgbe_mac_82598EB) 3512 err = hw->phy.ops.reset(hw); 3513 else 3514 err = hw->mac.ops.setup_sfp(hw); 3515 3516 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3517 device_printf(dev, 3518 "Setup failure - unsupported SFP+ module type.\n"); 3519 goto handle_mod_out; 3520 } 3521 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3522 return; 3523 3524 handle_mod_out: 3525 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3526 } /* ixgbe_handle_mod */ 3527 3528 3529 /************************************************************************ 3530 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3531 ************************************************************************/ 3532 static void 3533 ixgbe_handle_msf(void *context) 3534 { 3535 if_ctx_t ctx = context; 3536 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3537 struct ixgbe_hw *hw = &sc->hw; 3538 u32 autoneg; 3539 bool negotiate; 3540 3541 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3542 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 3543 3544 autoneg = hw->phy.autoneg_advertised; 3545 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3546 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3547 if (hw->mac.ops.setup_link) 3548 hw->mac.ops.setup_link(hw, autoneg, true); 3549 3550 /* Adjust media types shown in ifconfig */ 3551 ifmedia_removeall(sc->media); 3552 ixgbe_add_media_types(sc->ctx); 3553 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 3554 } /* ixgbe_handle_msf */ 3555 3556 /************************************************************************ 3557 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3558 ************************************************************************/ 3559 static void 3560 ixgbe_handle_phy(void *context) 3561 { 3562 if_ctx_t ctx = context; 3563 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3564 struct ixgbe_hw *hw = &sc->hw; 3565 int error; 3566 3567 error = hw->phy.ops.handle_lasi(hw); 3568 if (error == IXGBE_ERR_OVERTEMP) 3569 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3570 else if (error) 3571 device_printf(sc->dev, 3572 "Error handling LASI interrupt: %d\n", error); 3573 } /* ixgbe_handle_phy */ 3574 3575 /************************************************************************ 3576 * ixgbe_if_stop - Stop the hardware 3577 * 3578 * Disables all traffic on the adapter by issuing a 3579 * global reset on the MAC and deallocates TX/RX buffers. 3580 ************************************************************************/ 3581 static void 3582 ixgbe_if_stop(if_ctx_t ctx) 3583 { 3584 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3585 struct ixgbe_hw *hw = &sc->hw; 3586 3587 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3588 3589 ixgbe_reset_hw(hw); 3590 hw->adapter_stopped = false; 3591 ixgbe_stop_adapter(hw); 3592 if (hw->mac.type == ixgbe_mac_82599EB) 3593 ixgbe_stop_mac_link_on_d3_82599(hw); 3594 /* Turn off the laser - noop with no optics */ 3595 ixgbe_disable_tx_laser(hw); 3596 3597 /* Update the stack */ 3598 sc->link_up = false; 3599 ixgbe_if_update_admin_status(ctx); 3600 3601 /* reprogram the RAR[0] in case user changed it. */ 3602 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 3603 3604 return; 3605 } /* ixgbe_if_stop */ 3606 3607 /************************************************************************ 3608 * ixgbe_update_link_status - Update OS on link state 3609 * 3610 * Note: Only updates the OS on the cached link state. 3611 * The real check of the hardware only happens with 3612 * a link interrupt. 3613 ************************************************************************/ 3614 static void 3615 ixgbe_if_update_admin_status(if_ctx_t ctx) 3616 { 3617 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3618 device_t dev = iflib_get_dev(ctx); 3619 3620 if (sc->link_up) { 3621 if (sc->link_active == false) { 3622 if (bootverbose) 3623 device_printf(dev, "Link is up %d Gbps %s \n", 3624 ((sc->link_speed == 128) ? 10 : 1), 3625 "Full Duplex"); 3626 sc->link_active = true; 3627 /* Update any Flow Control changes */ 3628 ixgbe_fc_enable(&sc->hw); 3629 /* Update DMA coalescing config */ 3630 ixgbe_config_dmac(sc); 3631 /* should actually be negotiated value */ 3632 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3633 3634 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3635 ixgbe_ping_all_vfs(sc); 3636 } 3637 } else { /* Link down */ 3638 if (sc->link_active == true) { 3639 if (bootverbose) 3640 device_printf(dev, "Link is Down\n"); 3641 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3642 sc->link_active = false; 3643 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3644 ixgbe_ping_all_vfs(sc); 3645 } 3646 } 3647 3648 /* Handle task requests from msix_link() */ 3649 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) 3650 ixgbe_handle_mod(ctx); 3651 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) 3652 ixgbe_handle_msf(ctx); 3653 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) 3654 ixgbe_handle_mbx(ctx); 3655 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) 3656 ixgbe_reinit_fdir(ctx); 3657 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) 3658 ixgbe_handle_phy(ctx); 3659 sc->task_requests = 0; 3660 3661 ixgbe_update_stats_counters(sc); 3662 } /* ixgbe_if_update_admin_status */ 3663 3664 /************************************************************************ 3665 * ixgbe_config_dmac - Configure DMA Coalescing 3666 ************************************************************************/ 3667 static void 3668 ixgbe_config_dmac(struct ixgbe_softc *sc) 3669 { 3670 struct ixgbe_hw *hw = &sc->hw; 3671 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3672 3673 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3674 return; 3675 3676 if (dcfg->watchdog_timer ^ sc->dmac || 3677 dcfg->link_speed ^ sc->link_speed) { 3678 dcfg->watchdog_timer = sc->dmac; 3679 dcfg->fcoe_en = false; 3680 dcfg->link_speed = sc->link_speed; 3681 dcfg->num_tcs = 1; 3682 3683 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3684 dcfg->watchdog_timer, dcfg->link_speed); 3685 3686 hw->mac.ops.dmac_config(hw); 3687 } 3688 } /* ixgbe_config_dmac */ 3689 3690 /************************************************************************ 3691 * ixgbe_if_enable_intr 3692 ************************************************************************/ 3693 void 3694 ixgbe_if_enable_intr(if_ctx_t ctx) 3695 { 3696 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3697 struct ixgbe_hw *hw = &sc->hw; 3698 struct ix_rx_queue *que = sc->rx_queues; 3699 u32 mask, fwsm; 3700 3701 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3702 3703 switch (sc->hw.mac.type) { 3704 case ixgbe_mac_82599EB: 3705 mask |= IXGBE_EIMS_ECC; 3706 /* Temperature sensor on some scs */ 3707 mask |= IXGBE_EIMS_GPI_SDP0; 3708 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3709 mask |= IXGBE_EIMS_GPI_SDP1; 3710 mask |= IXGBE_EIMS_GPI_SDP2; 3711 break; 3712 case ixgbe_mac_X540: 3713 /* Detect if Thermal Sensor is enabled */ 3714 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3715 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3716 mask |= IXGBE_EIMS_TS; 3717 mask |= IXGBE_EIMS_ECC; 3718 break; 3719 case ixgbe_mac_X550: 3720 /* MAC thermal sensor is automatically enabled */ 3721 mask |= IXGBE_EIMS_TS; 3722 mask |= IXGBE_EIMS_ECC; 3723 break; 3724 case ixgbe_mac_X550EM_x: 3725 case ixgbe_mac_X550EM_a: 3726 /* Some devices use SDP0 for important information */ 3727 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3728 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3729 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3730 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3731 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3732 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3733 mask |= IXGBE_EICR_GPI_SDP0_X540; 3734 mask |= IXGBE_EIMS_ECC; 3735 break; 3736 default: 3737 break; 3738 } 3739 3740 /* Enable Fan Failure detection */ 3741 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3742 mask |= IXGBE_EIMS_GPI_SDP1; 3743 /* Enable SR-IOV */ 3744 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3745 mask |= IXGBE_EIMS_MAILBOX; 3746 /* Enable Flow Director */ 3747 if (sc->feat_en & IXGBE_FEATURE_FDIR) 3748 mask |= IXGBE_EIMS_FLOW_DIR; 3749 3750 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3751 3752 /* With MSI-X we use auto clear */ 3753 if (sc->intr_type == IFLIB_INTR_MSIX) { 3754 mask = IXGBE_EIMS_ENABLE_MASK; 3755 /* Don't autoclear Link */ 3756 mask &= ~IXGBE_EIMS_OTHER; 3757 mask &= ~IXGBE_EIMS_LSC; 3758 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 3759 mask &= ~IXGBE_EIMS_MAILBOX; 3760 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3761 } 3762 3763 /* 3764 * Now enable all queues, this is done separately to 3765 * allow for handling the extended (beyond 32) MSI-X 3766 * vectors that can be used by 82599 3767 */ 3768 for (int i = 0; i < sc->num_rx_queues; i++, que++) 3769 ixgbe_enable_queue(sc, que->msix); 3770 3771 IXGBE_WRITE_FLUSH(hw); 3772 3773 } /* ixgbe_if_enable_intr */ 3774 3775 /************************************************************************ 3776 * ixgbe_disable_intr 3777 ************************************************************************/ 3778 static void 3779 ixgbe_if_disable_intr(if_ctx_t ctx) 3780 { 3781 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3782 3783 if (sc->intr_type == IFLIB_INTR_MSIX) 3784 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 3785 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 3786 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 3787 } else { 3788 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 3789 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 3790 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 3791 } 3792 IXGBE_WRITE_FLUSH(&sc->hw); 3793 3794 } /* ixgbe_if_disable_intr */ 3795 3796 /************************************************************************ 3797 * ixgbe_link_intr_enable 3798 ************************************************************************/ 3799 static void 3800 ixgbe_link_intr_enable(if_ctx_t ctx) 3801 { 3802 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; 3803 3804 /* Re-enable other interrupts */ 3805 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3806 } /* ixgbe_link_intr_enable */ 3807 3808 /************************************************************************ 3809 * ixgbe_if_rx_queue_intr_enable 3810 ************************************************************************/ 3811 static int 3812 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3813 { 3814 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3815 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 3816 3817 ixgbe_enable_queue(sc, que->msix); 3818 3819 return (0); 3820 } /* ixgbe_if_rx_queue_intr_enable */ 3821 3822 /************************************************************************ 3823 * ixgbe_enable_queue 3824 ************************************************************************/ 3825 static void 3826 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 3827 { 3828 struct ixgbe_hw *hw = &sc->hw; 3829 u64 queue = 1ULL << vector; 3830 u32 mask; 3831 3832 if (hw->mac.type == ixgbe_mac_82598EB) { 3833 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3834 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3835 } else { 3836 mask = (queue & 0xFFFFFFFF); 3837 if (mask) 3838 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3839 mask = (queue >> 32); 3840 if (mask) 3841 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3842 } 3843 } /* ixgbe_enable_queue */ 3844 3845 /************************************************************************ 3846 * ixgbe_disable_queue 3847 ************************************************************************/ 3848 static void 3849 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 3850 { 3851 struct ixgbe_hw *hw = &sc->hw; 3852 u64 queue = 1ULL << vector; 3853 u32 mask; 3854 3855 if (hw->mac.type == ixgbe_mac_82598EB) { 3856 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3857 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3858 } else { 3859 mask = (queue & 0xFFFFFFFF); 3860 if (mask) 3861 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3862 mask = (queue >> 32); 3863 if (mask) 3864 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3865 } 3866 } /* ixgbe_disable_queue */ 3867 3868 /************************************************************************ 3869 * ixgbe_intr - Legacy Interrupt Service Routine 3870 ************************************************************************/ 3871 int 3872 ixgbe_intr(void *arg) 3873 { 3874 struct ixgbe_softc *sc = arg; 3875 struct ix_rx_queue *que = sc->rx_queues; 3876 struct ixgbe_hw *hw = &sc->hw; 3877 if_ctx_t ctx = sc->ctx; 3878 u32 eicr, eicr_mask; 3879 3880 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3881 3882 ++que->irqs; 3883 if (eicr == 0) { 3884 ixgbe_if_enable_intr(ctx); 3885 return (FILTER_HANDLED); 3886 } 3887 3888 /* Check for fan failure */ 3889 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3890 (eicr & IXGBE_EICR_GPI_SDP1)) { 3891 device_printf(sc->dev, 3892 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3893 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3894 } 3895 3896 /* Link status change */ 3897 if (eicr & IXGBE_EICR_LSC) { 3898 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3899 iflib_admin_intr_deferred(ctx); 3900 } 3901 3902 if (ixgbe_is_sfp(hw)) { 3903 /* Pluggable optics-related interrupt */ 3904 if (hw->mac.type >= ixgbe_mac_X540) 3905 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3906 else 3907 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3908 3909 if (eicr & eicr_mask) { 3910 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3911 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 3912 } 3913 3914 if ((hw->mac.type == ixgbe_mac_82599EB) && 3915 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3916 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3917 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3918 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3919 } 3920 } 3921 3922 /* External PHY interrupt */ 3923 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3924 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3925 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 3926 3927 return (FILTER_SCHEDULE_THREAD); 3928 } /* ixgbe_intr */ 3929 3930 /************************************************************************ 3931 * ixgbe_free_pci_resources 3932 ************************************************************************/ 3933 static void 3934 ixgbe_free_pci_resources(if_ctx_t ctx) 3935 { 3936 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3937 struct ix_rx_queue *que = sc->rx_queues; 3938 device_t dev = iflib_get_dev(ctx); 3939 3940 /* Release all MSI-X queue resources */ 3941 if (sc->intr_type == IFLIB_INTR_MSIX) 3942 iflib_irq_free(ctx, &sc->irq); 3943 3944 if (que != NULL) { 3945 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 3946 iflib_irq_free(ctx, &que->que_irq); 3947 } 3948 } 3949 3950 if (sc->pci_mem != NULL) 3951 bus_release_resource(dev, SYS_RES_MEMORY, 3952 rman_get_rid(sc->pci_mem), sc->pci_mem); 3953 } /* ixgbe_free_pci_resources */ 3954 3955 /************************************************************************ 3956 * ixgbe_sysctl_flowcntl 3957 * 3958 * SYSCTL wrapper around setting Flow Control 3959 ************************************************************************/ 3960 static int 3961 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 3962 { 3963 struct ixgbe_softc *sc; 3964 int error, fc; 3965 3966 sc = (struct ixgbe_softc *)arg1; 3967 fc = sc->hw.fc.current_mode; 3968 3969 error = sysctl_handle_int(oidp, &fc, 0, req); 3970 if ((error) || (req->newptr == NULL)) 3971 return (error); 3972 3973 /* Don't bother if it's not changed */ 3974 if (fc == sc->hw.fc.current_mode) 3975 return (0); 3976 3977 return ixgbe_set_flowcntl(sc, fc); 3978 } /* ixgbe_sysctl_flowcntl */ 3979 3980 /************************************************************************ 3981 * ixgbe_set_flowcntl - Set flow control 3982 * 3983 * Flow control values: 3984 * 0 - off 3985 * 1 - rx pause 3986 * 2 - tx pause 3987 * 3 - full 3988 ************************************************************************/ 3989 static int 3990 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 3991 { 3992 switch (fc) { 3993 case ixgbe_fc_rx_pause: 3994 case ixgbe_fc_tx_pause: 3995 case ixgbe_fc_full: 3996 sc->hw.fc.requested_mode = fc; 3997 if (sc->num_rx_queues > 1) 3998 ixgbe_disable_rx_drop(sc); 3999 break; 4000 case ixgbe_fc_none: 4001 sc->hw.fc.requested_mode = ixgbe_fc_none; 4002 if (sc->num_rx_queues > 1) 4003 ixgbe_enable_rx_drop(sc); 4004 break; 4005 default: 4006 return (EINVAL); 4007 } 4008 4009 /* Don't autoneg if forcing a value */ 4010 sc->hw.fc.disable_fc_autoneg = true; 4011 ixgbe_fc_enable(&sc->hw); 4012 4013 return (0); 4014 } /* ixgbe_set_flowcntl */ 4015 4016 /************************************************************************ 4017 * ixgbe_enable_rx_drop 4018 * 4019 * Enable the hardware to drop packets when the buffer is 4020 * full. This is useful with multiqueue, so that no single 4021 * queue being full stalls the entire RX engine. We only 4022 * enable this when Multiqueue is enabled AND Flow Control 4023 * is disabled. 4024 ************************************************************************/ 4025 static void 4026 ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 4027 { 4028 struct ixgbe_hw *hw = &sc->hw; 4029 struct rx_ring *rxr; 4030 u32 srrctl; 4031 4032 for (int i = 0; i < sc->num_rx_queues; i++) { 4033 rxr = &sc->rx_queues[i].rxr; 4034 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4035 srrctl |= IXGBE_SRRCTL_DROP_EN; 4036 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4037 } 4038 4039 /* enable drop for each vf */ 4040 for (int i = 0; i < sc->num_vfs; i++) { 4041 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4042 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4043 IXGBE_QDE_ENABLE)); 4044 } 4045 } /* ixgbe_enable_rx_drop */ 4046 4047 /************************************************************************ 4048 * ixgbe_disable_rx_drop 4049 ************************************************************************/ 4050 static void 4051 ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 4052 { 4053 struct ixgbe_hw *hw = &sc->hw; 4054 struct rx_ring *rxr; 4055 u32 srrctl; 4056 4057 for (int i = 0; i < sc->num_rx_queues; i++) { 4058 rxr = &sc->rx_queues[i].rxr; 4059 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4060 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4061 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4062 } 4063 4064 /* disable drop for each vf */ 4065 for (int i = 0; i < sc->num_vfs; i++) { 4066 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4067 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4068 } 4069 } /* ixgbe_disable_rx_drop */ 4070 4071 /************************************************************************ 4072 * ixgbe_sysctl_advertise 4073 * 4074 * SYSCTL wrapper around setting advertised speed 4075 ************************************************************************/ 4076 static int 4077 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4078 { 4079 struct ixgbe_softc *sc; 4080 int error, advertise; 4081 4082 sc = (struct ixgbe_softc *)arg1; 4083 advertise = sc->advertise; 4084 4085 error = sysctl_handle_int(oidp, &advertise, 0, req); 4086 if ((error) || (req->newptr == NULL)) 4087 return (error); 4088 4089 return ixgbe_set_advertise(sc, advertise); 4090 } /* ixgbe_sysctl_advertise */ 4091 4092 /************************************************************************ 4093 * ixgbe_set_advertise - Control advertised link speed 4094 * 4095 * Flags: 4096 * 0x1 - advertise 100 Mb 4097 * 0x2 - advertise 1G 4098 * 0x4 - advertise 10G 4099 * 0x8 - advertise 10 Mb (yes, Mb) 4100 ************************************************************************/ 4101 static int 4102 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 4103 { 4104 device_t dev = iflib_get_dev(sc->ctx); 4105 struct ixgbe_hw *hw; 4106 ixgbe_link_speed speed = 0; 4107 ixgbe_link_speed link_caps = 0; 4108 s32 err = IXGBE_NOT_IMPLEMENTED; 4109 bool negotiate = false; 4110 4111 /* Checks to validate new value */ 4112 if (sc->advertise == advertise) /* no change */ 4113 return (0); 4114 4115 hw = &sc->hw; 4116 4117 /* No speed changes for backplane media */ 4118 if (hw->phy.media_type == ixgbe_media_type_backplane) 4119 return (ENODEV); 4120 4121 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4122 (hw->phy.multispeed_fiber))) { 4123 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4124 return (EINVAL); 4125 } 4126 4127 if (advertise < 0x1 || advertise > 0xF) { 4128 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n"); 4129 return (EINVAL); 4130 } 4131 4132 if (hw->mac.ops.get_link_capabilities) { 4133 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4134 &negotiate); 4135 if (err != IXGBE_SUCCESS) { 4136 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4137 return (ENODEV); 4138 } 4139 } 4140 4141 /* Set new value and report new advertised mode */ 4142 if (advertise & 0x1) { 4143 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4144 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4145 return (EINVAL); 4146 } 4147 speed |= IXGBE_LINK_SPEED_100_FULL; 4148 } 4149 if (advertise & 0x2) { 4150 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4151 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4152 return (EINVAL); 4153 } 4154 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4155 } 4156 if (advertise & 0x4) { 4157 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4158 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4159 return (EINVAL); 4160 } 4161 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4162 } 4163 if (advertise & 0x8) { 4164 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4165 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4166 return (EINVAL); 4167 } 4168 speed |= IXGBE_LINK_SPEED_10_FULL; 4169 } 4170 4171 hw->mac.autotry_restart = true; 4172 hw->mac.ops.setup_link(hw, speed, true); 4173 sc->advertise = advertise; 4174 4175 return (0); 4176 } /* ixgbe_set_advertise */ 4177 4178 /************************************************************************ 4179 * ixgbe_get_advertise - Get current advertised speed settings 4180 * 4181 * Formatted for sysctl usage. 4182 * Flags: 4183 * 0x1 - advertise 100 Mb 4184 * 0x2 - advertise 1G 4185 * 0x4 - advertise 10G 4186 * 0x8 - advertise 10 Mb (yes, Mb) 4187 ************************************************************************/ 4188 static int 4189 ixgbe_get_advertise(struct ixgbe_softc *sc) 4190 { 4191 struct ixgbe_hw *hw = &sc->hw; 4192 int speed; 4193 ixgbe_link_speed link_caps = 0; 4194 s32 err; 4195 bool negotiate = false; 4196 4197 /* 4198 * Advertised speed means nothing unless it's copper or 4199 * multi-speed fiber 4200 */ 4201 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4202 !(hw->phy.multispeed_fiber)) 4203 return (0); 4204 4205 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4206 if (err != IXGBE_SUCCESS) 4207 return (0); 4208 4209 speed = 4210 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 4211 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 4212 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 4213 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 4214 4215 return speed; 4216 } /* ixgbe_get_advertise */ 4217 4218 /************************************************************************ 4219 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4220 * 4221 * Control values: 4222 * 0/1 - off / on (use default value of 1000) 4223 * 4224 * Legal timer values are: 4225 * 50,100,250,500,1000,2000,5000,10000 4226 * 4227 * Turning off interrupt moderation will also turn this off. 4228 ************************************************************************/ 4229 static int 4230 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4231 { 4232 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4233 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4234 int error; 4235 u16 newval; 4236 4237 newval = sc->dmac; 4238 error = sysctl_handle_16(oidp, &newval, 0, req); 4239 if ((error) || (req->newptr == NULL)) 4240 return (error); 4241 4242 switch (newval) { 4243 case 0: 4244 /* Disabled */ 4245 sc->dmac = 0; 4246 break; 4247 case 1: 4248 /* Enable and use default */ 4249 sc->dmac = 1000; 4250 break; 4251 case 50: 4252 case 100: 4253 case 250: 4254 case 500: 4255 case 1000: 4256 case 2000: 4257 case 5000: 4258 case 10000: 4259 /* Legal values - allow */ 4260 sc->dmac = newval; 4261 break; 4262 default: 4263 /* Do nothing, illegal value */ 4264 return (EINVAL); 4265 } 4266 4267 /* Re-initialize hardware if it's already running */ 4268 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4269 ifp->if_init(ifp); 4270 4271 return (0); 4272 } /* ixgbe_sysctl_dmac */ 4273 4274 #ifdef IXGBE_DEBUG 4275 /************************************************************************ 4276 * ixgbe_sysctl_power_state 4277 * 4278 * Sysctl to test power states 4279 * Values: 4280 * 0 - set device to D0 4281 * 3 - set device to D3 4282 * (none) - get current device power state 4283 ************************************************************************/ 4284 static int 4285 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4286 { 4287 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4288 device_t dev = sc->dev; 4289 int curr_ps, new_ps, error = 0; 4290 4291 curr_ps = new_ps = pci_get_powerstate(dev); 4292 4293 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4294 if ((error) || (req->newptr == NULL)) 4295 return (error); 4296 4297 if (new_ps == curr_ps) 4298 return (0); 4299 4300 if (new_ps == 3 && curr_ps == 0) 4301 error = DEVICE_SUSPEND(dev); 4302 else if (new_ps == 0 && curr_ps == 3) 4303 error = DEVICE_RESUME(dev); 4304 else 4305 return (EINVAL); 4306 4307 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4308 4309 return (error); 4310 } /* ixgbe_sysctl_power_state */ 4311 #endif 4312 4313 /************************************************************************ 4314 * ixgbe_sysctl_wol_enable 4315 * 4316 * Sysctl to enable/disable the WoL capability, 4317 * if supported by the adapter. 4318 * 4319 * Values: 4320 * 0 - disabled 4321 * 1 - enabled 4322 ************************************************************************/ 4323 static int 4324 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4325 { 4326 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4327 struct ixgbe_hw *hw = &sc->hw; 4328 int new_wol_enabled; 4329 int error = 0; 4330 4331 new_wol_enabled = hw->wol_enabled; 4332 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4333 if ((error) || (req->newptr == NULL)) 4334 return (error); 4335 new_wol_enabled = !!(new_wol_enabled); 4336 if (new_wol_enabled == hw->wol_enabled) 4337 return (0); 4338 4339 if (new_wol_enabled > 0 && !sc->wol_support) 4340 return (ENODEV); 4341 else 4342 hw->wol_enabled = new_wol_enabled; 4343 4344 return (0); 4345 } /* ixgbe_sysctl_wol_enable */ 4346 4347 /************************************************************************ 4348 * ixgbe_sysctl_wufc - Wake Up Filter Control 4349 * 4350 * Sysctl to enable/disable the types of packets that the 4351 * adapter will wake up on upon receipt. 4352 * Flags: 4353 * 0x1 - Link Status Change 4354 * 0x2 - Magic Packet 4355 * 0x4 - Direct Exact 4356 * 0x8 - Directed Multicast 4357 * 0x10 - Broadcast 4358 * 0x20 - ARP/IPv4 Request Packet 4359 * 0x40 - Direct IPv4 Packet 4360 * 0x80 - Direct IPv6 Packet 4361 * 4362 * Settings not listed above will cause the sysctl to return an error. 4363 ************************************************************************/ 4364 static int 4365 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4366 { 4367 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4368 int error = 0; 4369 u32 new_wufc; 4370 4371 new_wufc = sc->wufc; 4372 4373 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4374 if ((error) || (req->newptr == NULL)) 4375 return (error); 4376 if (new_wufc == sc->wufc) 4377 return (0); 4378 4379 if (new_wufc & 0xffffff00) 4380 return (EINVAL); 4381 4382 new_wufc &= 0xff; 4383 new_wufc |= (0xffffff & sc->wufc); 4384 sc->wufc = new_wufc; 4385 4386 return (0); 4387 } /* ixgbe_sysctl_wufc */ 4388 4389 #ifdef IXGBE_DEBUG 4390 /************************************************************************ 4391 * ixgbe_sysctl_print_rss_config 4392 ************************************************************************/ 4393 static int 4394 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4395 { 4396 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4397 struct ixgbe_hw *hw = &sc->hw; 4398 device_t dev = sc->dev; 4399 struct sbuf *buf; 4400 int error = 0, reta_size; 4401 u32 reg; 4402 4403 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4404 if (!buf) { 4405 device_printf(dev, "Could not allocate sbuf for output.\n"); 4406 return (ENOMEM); 4407 } 4408 4409 // TODO: use sbufs to make a string to print out 4410 /* Set multiplier for RETA setup and table size based on MAC */ 4411 switch (sc->hw.mac.type) { 4412 case ixgbe_mac_X550: 4413 case ixgbe_mac_X550EM_x: 4414 case ixgbe_mac_X550EM_a: 4415 reta_size = 128; 4416 break; 4417 default: 4418 reta_size = 32; 4419 break; 4420 } 4421 4422 /* Print out the redirection table */ 4423 sbuf_cat(buf, "\n"); 4424 for (int i = 0; i < reta_size; i++) { 4425 if (i < 32) { 4426 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4427 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4428 } else { 4429 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4430 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4431 } 4432 } 4433 4434 // TODO: print more config 4435 4436 error = sbuf_finish(buf); 4437 if (error) 4438 device_printf(dev, "Error finishing sbuf: %d\n", error); 4439 4440 sbuf_delete(buf); 4441 4442 return (0); 4443 } /* ixgbe_sysctl_print_rss_config */ 4444 #endif /* IXGBE_DEBUG */ 4445 4446 /************************************************************************ 4447 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4448 * 4449 * For X552/X557-AT devices using an external PHY 4450 ************************************************************************/ 4451 static int 4452 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4453 { 4454 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4455 struct ixgbe_hw *hw = &sc->hw; 4456 u16 reg; 4457 4458 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4459 device_printf(iflib_get_dev(sc->ctx), 4460 "Device has no supported external thermal sensor.\n"); 4461 return (ENODEV); 4462 } 4463 4464 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4465 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4466 device_printf(iflib_get_dev(sc->ctx), 4467 "Error reading from PHY's current temperature register\n"); 4468 return (EAGAIN); 4469 } 4470 4471 /* Shift temp for output */ 4472 reg = reg >> 8; 4473 4474 return (sysctl_handle_16(oidp, NULL, reg, req)); 4475 } /* ixgbe_sysctl_phy_temp */ 4476 4477 /************************************************************************ 4478 * ixgbe_sysctl_phy_overtemp_occurred 4479 * 4480 * Reports (directly from the PHY) whether the current PHY 4481 * temperature is over the overtemp threshold. 4482 ************************************************************************/ 4483 static int 4484 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4485 { 4486 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4487 struct ixgbe_hw *hw = &sc->hw; 4488 u16 reg; 4489 4490 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4491 device_printf(iflib_get_dev(sc->ctx), 4492 "Device has no supported external thermal sensor.\n"); 4493 return (ENODEV); 4494 } 4495 4496 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4497 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4498 device_printf(iflib_get_dev(sc->ctx), 4499 "Error reading from PHY's temperature status register\n"); 4500 return (EAGAIN); 4501 } 4502 4503 /* Get occurrence bit */ 4504 reg = !!(reg & 0x4000); 4505 4506 return (sysctl_handle_16(oidp, 0, reg, req)); 4507 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4508 4509 /************************************************************************ 4510 * ixgbe_sysctl_eee_state 4511 * 4512 * Sysctl to set EEE power saving feature 4513 * Values: 4514 * 0 - disable EEE 4515 * 1 - enable EEE 4516 * (none) - get current device EEE state 4517 ************************************************************************/ 4518 static int 4519 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4520 { 4521 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4522 device_t dev = sc->dev; 4523 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4524 int curr_eee, new_eee, error = 0; 4525 s32 retval; 4526 4527 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 4528 4529 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4530 if ((error) || (req->newptr == NULL)) 4531 return (error); 4532 4533 /* Nothing to do */ 4534 if (new_eee == curr_eee) 4535 return (0); 4536 4537 /* Not supported */ 4538 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 4539 return (EINVAL); 4540 4541 /* Bounds checking */ 4542 if ((new_eee < 0) || (new_eee > 1)) 4543 return (EINVAL); 4544 4545 retval = ixgbe_setup_eee(&sc->hw, new_eee); 4546 if (retval) { 4547 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4548 return (EINVAL); 4549 } 4550 4551 /* Restart auto-neg */ 4552 ifp->if_init(ifp); 4553 4554 device_printf(dev, "New EEE state: %d\n", new_eee); 4555 4556 /* Cache new value */ 4557 if (new_eee) 4558 sc->feat_en |= IXGBE_FEATURE_EEE; 4559 else 4560 sc->feat_en &= ~IXGBE_FEATURE_EEE; 4561 4562 return (error); 4563 } /* ixgbe_sysctl_eee_state */ 4564 4565 /************************************************************************ 4566 * ixgbe_init_device_features 4567 ************************************************************************/ 4568 static void 4569 ixgbe_init_device_features(struct ixgbe_softc *sc) 4570 { 4571 sc->feat_cap = IXGBE_FEATURE_NETMAP 4572 | IXGBE_FEATURE_RSS 4573 | IXGBE_FEATURE_MSI 4574 | IXGBE_FEATURE_MSIX 4575 | IXGBE_FEATURE_LEGACY_IRQ; 4576 4577 /* Set capabilities first... */ 4578 switch (sc->hw.mac.type) { 4579 case ixgbe_mac_82598EB: 4580 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 4581 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4582 break; 4583 case ixgbe_mac_X540: 4584 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4585 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4586 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4587 (sc->hw.bus.func == 0)) 4588 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4589 break; 4590 case ixgbe_mac_X550: 4591 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4592 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4593 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4594 break; 4595 case ixgbe_mac_X550EM_x: 4596 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4597 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4598 break; 4599 case ixgbe_mac_X550EM_a: 4600 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4601 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4602 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4603 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4604 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4605 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4606 sc->feat_cap |= IXGBE_FEATURE_EEE; 4607 } 4608 break; 4609 case ixgbe_mac_82599EB: 4610 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4611 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4612 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4613 (sc->hw.bus.func == 0)) 4614 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4615 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4616 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4617 break; 4618 default: 4619 break; 4620 } 4621 4622 /* Enabled by default... */ 4623 /* Fan failure detection */ 4624 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4625 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4626 /* Netmap */ 4627 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 4628 sc->feat_en |= IXGBE_FEATURE_NETMAP; 4629 /* EEE */ 4630 if (sc->feat_cap & IXGBE_FEATURE_EEE) 4631 sc->feat_en |= IXGBE_FEATURE_EEE; 4632 /* Thermal Sensor */ 4633 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4634 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4635 4636 /* Enabled via global sysctl... */ 4637 /* Flow Director */ 4638 if (ixgbe_enable_fdir) { 4639 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 4640 sc->feat_en |= IXGBE_FEATURE_FDIR; 4641 else 4642 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled."); 4643 } 4644 /* 4645 * Message Signal Interrupts - Extended (MSI-X) 4646 * Normal MSI is only enabled if MSI-X calls fail. 4647 */ 4648 if (!ixgbe_enable_msix) 4649 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 4650 /* Receive-Side Scaling (RSS) */ 4651 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4652 sc->feat_en |= IXGBE_FEATURE_RSS; 4653 4654 /* Disable features with unmet dependencies... */ 4655 /* No MSI-X */ 4656 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 4657 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 4658 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4659 sc->feat_en &= ~IXGBE_FEATURE_RSS; 4660 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 4661 } 4662 } /* ixgbe_init_device_features */ 4663 4664 /************************************************************************ 4665 * ixgbe_check_fan_failure 4666 ************************************************************************/ 4667 static void 4668 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 4669 { 4670 u32 mask; 4671 4672 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 4673 IXGBE_ESDP_SDP1; 4674 4675 if (reg & mask) 4676 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4677 } /* ixgbe_check_fan_failure */ 4678 4679 /************************************************************************ 4680 * ixgbe_sbuf_fw_version 4681 ************************************************************************/ 4682 static void 4683 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) 4684 { 4685 struct ixgbe_nvm_version nvm_ver = {0}; 4686 uint16_t phyfw = 0; 4687 int status; 4688 const char *space = ""; 4689 4690 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ 4691 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ 4692 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ 4693 status = ixgbe_get_phy_firmware_version(hw, &phyfw); 4694 4695 if (nvm_ver.oem_valid) { 4696 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, 4697 nvm_ver.oem_minor, nvm_ver.oem_release); 4698 space = " "; 4699 } 4700 4701 if (nvm_ver.or_valid) { 4702 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 4703 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); 4704 space = " "; 4705 } 4706 4707 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | 4708 NVM_VER_INVALID)) { 4709 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); 4710 space = " "; 4711 } 4712 4713 if (phyfw != 0 && status == IXGBE_SUCCESS) 4714 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); 4715 } /* ixgbe_sbuf_fw_version */ 4716 4717 /************************************************************************ 4718 * ixgbe_print_fw_version 4719 ************************************************************************/ 4720 static void 4721 ixgbe_print_fw_version(if_ctx_t ctx) 4722 { 4723 struct ixgbe_softc *sc = iflib_get_softc(ctx); 4724 struct ixgbe_hw *hw = &sc->hw; 4725 device_t dev = sc->dev; 4726 struct sbuf *buf; 4727 int error = 0; 4728 4729 buf = sbuf_new_auto(); 4730 if (!buf) { 4731 device_printf(dev, "Could not allocate sbuf for output.\n"); 4732 return; 4733 } 4734 4735 ixgbe_sbuf_fw_version(hw, buf); 4736 4737 error = sbuf_finish(buf); 4738 if (error) 4739 device_printf(dev, "Error finishing sbuf: %d\n", error); 4740 else if (sbuf_len(buf)) 4741 device_printf(dev, "%s\n", sbuf_data(buf)); 4742 4743 sbuf_delete(buf); 4744 } /* ixgbe_print_fw_version */ 4745 4746 /************************************************************************ 4747 * ixgbe_sysctl_print_fw_version 4748 ************************************************************************/ 4749 static int 4750 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 4751 { 4752 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4753 struct ixgbe_hw *hw = &sc->hw; 4754 device_t dev = sc->dev; 4755 struct sbuf *buf; 4756 int error = 0; 4757 4758 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4759 if (!buf) { 4760 device_printf(dev, "Could not allocate sbuf for output.\n"); 4761 return (ENOMEM); 4762 } 4763 4764 ixgbe_sbuf_fw_version(hw, buf); 4765 4766 error = sbuf_finish(buf); 4767 if (error) 4768 device_printf(dev, "Error finishing sbuf: %d\n", error); 4769 4770 sbuf_delete(buf); 4771 4772 return (0); 4773 } /* ixgbe_sysctl_print_fw_version */ 4774