1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_rss.h" 38 39 #include "ixgbe.h" 40 #include "ixgbe_sriov.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixgbe_driver_version[] = "4.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixgbe_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixgbe_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), 106 /* required last entry */ 107 PVID_END 108 }; 109 110 static void *ixgbe_register(device_t); 111 static int ixgbe_if_attach_pre(if_ctx_t); 112 static int ixgbe_if_attach_post(if_ctx_t); 113 static int ixgbe_if_detach(if_ctx_t); 114 static int ixgbe_if_shutdown(if_ctx_t); 115 static int ixgbe_if_suspend(if_ctx_t); 116 static int ixgbe_if_resume(if_ctx_t); 117 118 static void ixgbe_if_stop(if_ctx_t); 119 void ixgbe_if_enable_intr(if_ctx_t); 120 static void ixgbe_if_disable_intr(if_ctx_t); 121 static void ixgbe_link_intr_enable(if_ctx_t); 122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); 124 static int ixgbe_if_media_change(if_ctx_t); 125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); 127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); 128 static void ixgbe_if_multi_set(if_ctx_t); 129 static int ixgbe_if_promisc_set(if_ctx_t, int); 130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 132 static void ixgbe_if_queues_free(if_ctx_t); 133 static void ixgbe_if_timer(if_ctx_t, uint16_t); 134 static void ixgbe_if_update_admin_status(if_ctx_t); 135 static void ixgbe_if_vlan_register(if_ctx_t, u16); 136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16); 137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); 138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 139 int ixgbe_intr(void *); 140 141 /************************************************************************ 142 * Function prototypes 143 ************************************************************************/ 144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 145 146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32); 147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32); 148 static void ixgbe_add_device_sysctls(if_ctx_t); 149 static int ixgbe_allocate_pci_resources(if_ctx_t); 150 static int ixgbe_setup_low_power_mode(if_ctx_t); 151 152 static void ixgbe_config_dmac(struct ixgbe_softc *); 153 static void ixgbe_configure_ivars(struct ixgbe_softc *); 154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 156 static bool ixgbe_sfp_probe(if_ctx_t); 157 158 static void ixgbe_free_pci_resources(if_ctx_t); 159 160 static int ixgbe_msix_link(void *); 161 static int ixgbe_msix_que(void *); 162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 163 static void ixgbe_initialize_receive_units(if_ctx_t); 164 static void ixgbe_initialize_transmit_units(if_ctx_t); 165 166 static int ixgbe_setup_interface(if_ctx_t); 167 static void ixgbe_init_device_features(struct ixgbe_softc *); 168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); 170 static void ixgbe_print_fw_version(if_ctx_t); 171 static void ixgbe_add_media_types(if_ctx_t); 172 static void ixgbe_update_stats_counters(struct ixgbe_softc *); 173 static void ixgbe_config_link(if_ctx_t); 174 static void ixgbe_get_slot_info(struct ixgbe_softc *); 175 static void ixgbe_check_wol_support(struct ixgbe_softc *); 176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 178 179 static void ixgbe_add_hw_stats(struct ixgbe_softc *); 180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 181 static int ixgbe_set_advertise(struct ixgbe_softc *, int); 182 static int ixgbe_get_default_advertise(struct ixgbe_softc *); 183 static void ixgbe_setup_vlan_hw_support(if_ctx_t); 184 static void ixgbe_config_gpie(struct ixgbe_softc *); 185 static void ixgbe_config_delay_values(struct ixgbe_softc *); 186 187 /* Sysctl handlers */ 188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 195 #ifdef IXGBE_DEBUG 196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 198 #endif 199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 206 207 /* Deferred interrupt tasklets */ 208 static void ixgbe_handle_msf(void *); 209 static void ixgbe_handle_mod(void *); 210 static void ixgbe_handle_phy(void *); 211 212 /************************************************************************ 213 * FreeBSD Device Interface Entry Points 214 ************************************************************************/ 215 static device_method_t ix_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_register, ixgbe_register), 218 DEVMETHOD(device_probe, iflib_device_probe), 219 DEVMETHOD(device_attach, iflib_device_attach), 220 DEVMETHOD(device_detach, iflib_device_detach), 221 DEVMETHOD(device_shutdown, iflib_device_shutdown), 222 DEVMETHOD(device_suspend, iflib_device_suspend), 223 DEVMETHOD(device_resume, iflib_device_resume), 224 #ifdef PCI_IOV 225 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 228 #endif /* PCI_IOV */ 229 DEVMETHOD_END 230 }; 231 232 static driver_t ix_driver = { 233 "ix", ix_methods, sizeof(struct ixgbe_softc), 234 }; 235 236 DRIVER_MODULE(ix, pci, ix_driver, 0, 0); 237 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 238 MODULE_DEPEND(ix, pci, 1, 1, 1); 239 MODULE_DEPEND(ix, ether, 1, 1, 1); 240 MODULE_DEPEND(ix, iflib, 1, 1, 1); 241 242 static device_method_t ixgbe_if_methods[] = { 243 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 244 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 245 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 246 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 247 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 248 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 249 DEVMETHOD(ifdi_init, ixgbe_if_init), 250 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 251 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 252 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 253 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 254 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 255 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 256 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 257 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 258 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 259 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 260 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 261 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 262 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 263 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 264 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 265 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 266 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 267 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 268 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 269 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 270 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 271 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 272 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), 273 #ifdef PCI_IOV 274 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 275 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 276 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 277 #endif /* PCI_IOV */ 278 DEVMETHOD_END 279 }; 280 281 /* 282 * TUNEABLE PARAMETERS: 283 */ 284 285 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 286 "IXGBE driver parameters"); 287 static driver_t ixgbe_if_driver = { 288 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) 289 }; 290 291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 293 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 294 295 /* Flow control setting, default to full */ 296 static int ixgbe_flow_control = ixgbe_fc_full; 297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 298 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 299 300 /* Advertise Speed, default to 0 (auto) */ 301 static int ixgbe_advertise_speed = 0; 302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 304 305 /* 306 * Smart speed setting, default to on 307 * this only works as a compile option 308 * right now as its during attach, set 309 * this to 'ixgbe_smart_speed_off' to 310 * disable. 311 */ 312 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 313 314 /* 315 * MSI-X should be the default for best performance, 316 * but this allows it to be forced off for testing. 317 */ 318 static int ixgbe_enable_msix = 1; 319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 320 "Enable MSI-X interrupts"); 321 322 /* 323 * Defining this on will allow the use 324 * of unsupported SFP+ modules, note that 325 * doing so you are on your own :) 326 */ 327 static int allow_unsupported_sfp = false; 328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 329 &allow_unsupported_sfp, 0, 330 "Allow unsupported SFP modules...use at your own risk"); 331 332 /* 333 * Not sure if Flow Director is fully baked, 334 * so we'll default to turning it off. 335 */ 336 static int ixgbe_enable_fdir = 0; 337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 338 "Enable Flow Director"); 339 340 /* Receive-Side Scaling */ 341 static int ixgbe_enable_rss = 1; 342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 343 "Enable Receive-Side Scaling (RSS)"); 344 345 /* 346 * AIM: Adaptive Interrupt Moderation 347 * which means that the interrupt rate 348 * is varied over time based on the 349 * traffic for that interrupt vector 350 */ 351 static int ixgbe_enable_aim = false; 352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 353 "Enable adaptive interrupt moderation"); 354 355 #if 0 356 /* Keep running tab on them for sanity check */ 357 static int ixgbe_total_ports; 358 #endif 359 360 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 361 362 /* 363 * For Flow Director: this is the number of TX packets we sample 364 * for the filter pool, this means every 20th packet will be probed. 365 * 366 * This feature can be disabled by setting this to 0. 367 */ 368 static int atr_sample_rate = 20; 369 370 extern struct if_txrx ixgbe_txrx; 371 372 static struct if_shared_ctx ixgbe_sctx_init = { 373 .isc_magic = IFLIB_MAGIC, 374 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 375 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 376 .isc_tx_maxsegsize = PAGE_SIZE, 377 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 378 .isc_tso_maxsegsize = PAGE_SIZE, 379 .isc_rx_maxsize = PAGE_SIZE*4, 380 .isc_rx_nsegments = 1, 381 .isc_rx_maxsegsize = PAGE_SIZE*4, 382 .isc_nfl = 1, 383 .isc_ntxqs = 1, 384 .isc_nrxqs = 1, 385 386 .isc_admin_intrcnt = 1, 387 .isc_vendor_info = ixgbe_vendor_info_array, 388 .isc_driver_version = ixgbe_driver_version, 389 .isc_driver = &ixgbe_if_driver, 390 .isc_flags = IFLIB_TSO_INIT_IP, 391 392 .isc_nrxd_min = {MIN_RXD}, 393 .isc_ntxd_min = {MIN_TXD}, 394 .isc_nrxd_max = {MAX_RXD}, 395 .isc_ntxd_max = {MAX_TXD}, 396 .isc_nrxd_default = {DEFAULT_RXD}, 397 .isc_ntxd_default = {DEFAULT_TXD}, 398 }; 399 400 /************************************************************************ 401 * ixgbe_if_tx_queues_alloc 402 ************************************************************************/ 403 static int 404 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 405 int ntxqs, int ntxqsets) 406 { 407 struct ixgbe_softc *sc = iflib_get_softc(ctx); 408 if_softc_ctx_t scctx = sc->shared; 409 struct ix_tx_queue *que; 410 int i, j, error; 411 412 MPASS(sc->num_tx_queues > 0); 413 MPASS(sc->num_tx_queues == ntxqsets); 414 MPASS(ntxqs == 1); 415 416 /* Allocate queue structure memory */ 417 sc->tx_queues = 418 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 419 M_IXGBE, M_NOWAIT | M_ZERO); 420 if (!sc->tx_queues) { 421 device_printf(iflib_get_dev(ctx), 422 "Unable to allocate TX ring memory\n"); 423 return (ENOMEM); 424 } 425 426 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 427 struct tx_ring *txr = &que->txr; 428 429 /* In case SR-IOV is enabled, align the index properly */ 430 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 431 i); 432 433 txr->sc = que->sc = sc; 434 435 /* Allocate report status array */ 436 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 437 if (txr->tx_rsq == NULL) { 438 error = ENOMEM; 439 goto fail; 440 } 441 for (j = 0; j < scctx->isc_ntxd[0]; j++) 442 txr->tx_rsq[j] = QIDX_INVALID; 443 /* get the virtual and physical address of the hardware queues */ 444 txr->tail = IXGBE_TDT(txr->me); 445 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 446 txr->tx_paddr = paddrs[i]; 447 448 txr->bytes = 0; 449 txr->total_packets = 0; 450 451 /* Set the rate at which we sample packets */ 452 if (sc->feat_en & IXGBE_FEATURE_FDIR) 453 txr->atr_sample = atr_sample_rate; 454 455 } 456 457 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 458 sc->num_tx_queues); 459 460 return (0); 461 462 fail: 463 ixgbe_if_queues_free(ctx); 464 465 return (error); 466 } /* ixgbe_if_tx_queues_alloc */ 467 468 /************************************************************************ 469 * ixgbe_if_rx_queues_alloc 470 ************************************************************************/ 471 static int 472 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 473 int nrxqs, int nrxqsets) 474 { 475 struct ixgbe_softc *sc = iflib_get_softc(ctx); 476 struct ix_rx_queue *que; 477 int i; 478 479 MPASS(sc->num_rx_queues > 0); 480 MPASS(sc->num_rx_queues == nrxqsets); 481 MPASS(nrxqs == 1); 482 483 /* Allocate queue structure memory */ 484 sc->rx_queues = 485 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 486 M_IXGBE, M_NOWAIT | M_ZERO); 487 if (!sc->rx_queues) { 488 device_printf(iflib_get_dev(ctx), 489 "Unable to allocate TX ring memory\n"); 490 return (ENOMEM); 491 } 492 493 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 494 struct rx_ring *rxr = &que->rxr; 495 496 /* In case SR-IOV is enabled, align the index properly */ 497 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 498 i); 499 500 rxr->sc = que->sc = sc; 501 502 /* get the virtual and physical address of the hw queues */ 503 rxr->tail = IXGBE_RDT(rxr->me); 504 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 505 rxr->rx_paddr = paddrs[i]; 506 rxr->bytes = 0; 507 rxr->que = que; 508 } 509 510 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 511 sc->num_rx_queues); 512 513 return (0); 514 } /* ixgbe_if_rx_queues_alloc */ 515 516 /************************************************************************ 517 * ixgbe_if_queues_free 518 ************************************************************************/ 519 static void 520 ixgbe_if_queues_free(if_ctx_t ctx) 521 { 522 struct ixgbe_softc *sc = iflib_get_softc(ctx); 523 struct ix_tx_queue *tx_que = sc->tx_queues; 524 struct ix_rx_queue *rx_que = sc->rx_queues; 525 int i; 526 527 if (tx_que != NULL) { 528 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { 529 struct tx_ring *txr = &tx_que->txr; 530 if (txr->tx_rsq == NULL) 531 break; 532 533 free(txr->tx_rsq, M_IXGBE); 534 txr->tx_rsq = NULL; 535 } 536 537 free(sc->tx_queues, M_IXGBE); 538 sc->tx_queues = NULL; 539 } 540 if (rx_que != NULL) { 541 free(sc->rx_queues, M_IXGBE); 542 sc->rx_queues = NULL; 543 } 544 } /* ixgbe_if_queues_free */ 545 546 /************************************************************************ 547 * ixgbe_initialize_rss_mapping 548 ************************************************************************/ 549 static void 550 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 551 { 552 struct ixgbe_hw *hw = &sc->hw; 553 u32 reta = 0, mrqc, rss_key[10]; 554 int queue_id, table_size, index_mult; 555 int i, j; 556 u32 rss_hash_config; 557 558 if (sc->feat_en & IXGBE_FEATURE_RSS) { 559 /* Fetch the configured RSS key */ 560 rss_getkey((uint8_t *)&rss_key); 561 } else { 562 /* set up random bits */ 563 arc4rand(&rss_key, sizeof(rss_key), 0); 564 } 565 566 /* Set multiplier for RETA setup and table size based on MAC */ 567 index_mult = 0x1; 568 table_size = 128; 569 switch (sc->hw.mac.type) { 570 case ixgbe_mac_82598EB: 571 index_mult = 0x11; 572 break; 573 case ixgbe_mac_X550: 574 case ixgbe_mac_X550EM_x: 575 case ixgbe_mac_X550EM_a: 576 table_size = 512; 577 break; 578 default: 579 break; 580 } 581 582 /* Set up the redirection table */ 583 for (i = 0, j = 0; i < table_size; i++, j++) { 584 if (j == sc->num_rx_queues) 585 j = 0; 586 587 if (sc->feat_en & IXGBE_FEATURE_RSS) { 588 /* 589 * Fetch the RSS bucket id for the given indirection 590 * entry. Cap it at the number of configured buckets 591 * (which is num_rx_queues.) 592 */ 593 queue_id = rss_get_indirection_to_bucket(i); 594 queue_id = queue_id % sc->num_rx_queues; 595 } else 596 queue_id = (j * index_mult); 597 598 /* 599 * The low 8 bits are for hash value (n+0); 600 * The next 8 bits are for hash value (n+1), etc. 601 */ 602 reta = reta >> 8; 603 reta = reta | (((uint32_t)queue_id) << 24); 604 if ((i & 3) == 3) { 605 if (i < 128) 606 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 607 else 608 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 609 reta); 610 reta = 0; 611 } 612 } 613 614 /* Now fill our hash function seeds */ 615 for (i = 0; i < 10; i++) 616 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 617 618 /* Perform hash on these packet types */ 619 if (sc->feat_en & IXGBE_FEATURE_RSS) 620 rss_hash_config = rss_gethashconfig(); 621 else { 622 /* 623 * Disable UDP - IP fragments aren't currently being handled 624 * and so we end up with a mix of 2-tuple and 4-tuple 625 * traffic. 626 */ 627 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 628 | RSS_HASHTYPE_RSS_TCP_IPV4 629 | RSS_HASHTYPE_RSS_IPV6 630 | RSS_HASHTYPE_RSS_TCP_IPV6 631 | RSS_HASHTYPE_RSS_IPV6_EX 632 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 633 } 634 635 mrqc = IXGBE_MRQC_RSSEN; 636 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 638 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 640 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 642 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 644 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 646 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 648 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 649 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 650 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 651 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 652 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 653 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 654 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 655 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 656 } /* ixgbe_initialize_rss_mapping */ 657 658 /************************************************************************ 659 * ixgbe_initialize_receive_units - Setup receive registers and features. 660 ************************************************************************/ 661 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 662 663 static void 664 ixgbe_initialize_receive_units(if_ctx_t ctx) 665 { 666 struct ixgbe_softc *sc = iflib_get_softc(ctx); 667 if_softc_ctx_t scctx = sc->shared; 668 struct ixgbe_hw *hw = &sc->hw; 669 struct ifnet *ifp = iflib_get_ifp(ctx); 670 struct ix_rx_queue *que; 671 int i, j; 672 u32 bufsz, fctrl, srrctl, rxcsum; 673 u32 hlreg; 674 675 /* 676 * Make sure receives are disabled while 677 * setting up the descriptor ring 678 */ 679 ixgbe_disable_rx(hw); 680 681 /* Enable broadcasts */ 682 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 683 fctrl |= IXGBE_FCTRL_BAM; 684 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 685 fctrl |= IXGBE_FCTRL_DPF; 686 fctrl |= IXGBE_FCTRL_PMCF; 687 } 688 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 689 690 /* Set for Jumbo Frames? */ 691 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 692 if (ifp->if_mtu > ETHERMTU) 693 hlreg |= IXGBE_HLREG0_JUMBOEN; 694 else 695 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 696 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 697 698 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 699 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 700 701 /* Setup the Base and Length of the Rx Descriptor Ring */ 702 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { 703 struct rx_ring *rxr = &que->rxr; 704 u64 rdba = rxr->rx_paddr; 705 706 j = rxr->me; 707 708 /* Setup the Base and Length of the Rx Descriptor Ring */ 709 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 710 (rdba & 0x00000000ffffffffULL)); 711 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 712 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 713 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 714 715 /* Set up the SRRCTL register */ 716 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 717 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 718 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 719 srrctl |= bufsz; 720 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 721 722 /* 723 * Set DROP_EN iff we have no flow control and >1 queue. 724 * Note that srrctl was cleared shortly before during reset, 725 * so we do not need to clear the bit, but do it just in case 726 * this code is moved elsewhere. 727 */ 728 if (sc->num_rx_queues > 1 && 729 sc->hw.fc.requested_mode == ixgbe_fc_none) { 730 srrctl |= IXGBE_SRRCTL_DROP_EN; 731 } else { 732 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 733 } 734 735 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 736 737 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 738 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 739 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 740 741 /* Set the driver rx tail address */ 742 rxr->tail = IXGBE_RDT(rxr->me); 743 } 744 745 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 746 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 747 | IXGBE_PSRTYPE_UDPHDR 748 | IXGBE_PSRTYPE_IPV4HDR 749 | IXGBE_PSRTYPE_IPV6HDR; 750 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 751 } 752 753 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 754 755 ixgbe_initialize_rss_mapping(sc); 756 757 if (sc->num_rx_queues > 1) { 758 /* RSS and RX IPP Checksum are mutually exclusive */ 759 rxcsum |= IXGBE_RXCSUM_PCSD; 760 } 761 762 if (ifp->if_capenable & IFCAP_RXCSUM) 763 rxcsum |= IXGBE_RXCSUM_PCSD; 764 765 /* This is useful for calculating UDP/IP fragment checksums */ 766 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 767 rxcsum |= IXGBE_RXCSUM_IPPCSE; 768 769 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 770 771 } /* ixgbe_initialize_receive_units */ 772 773 /************************************************************************ 774 * ixgbe_initialize_transmit_units - Enable transmit units. 775 ************************************************************************/ 776 static void 777 ixgbe_initialize_transmit_units(if_ctx_t ctx) 778 { 779 struct ixgbe_softc *sc = iflib_get_softc(ctx); 780 struct ixgbe_hw *hw = &sc->hw; 781 if_softc_ctx_t scctx = sc->shared; 782 struct ix_tx_queue *que; 783 int i; 784 785 /* Setup the Base and Length of the Tx Descriptor Ring */ 786 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; 787 i++, que++) { 788 struct tx_ring *txr = &que->txr; 789 u64 tdba = txr->tx_paddr; 790 u32 txctrl = 0; 791 int j = txr->me; 792 793 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 794 (tdba & 0x00000000ffffffffULL)); 795 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 796 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 797 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 798 799 /* Setup the HW Tx Head and Tail descriptor pointers */ 800 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 801 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 802 803 /* Cache the tail address */ 804 txr->tail = IXGBE_TDT(txr->me); 805 806 txr->tx_rs_cidx = txr->tx_rs_pidx; 807 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 808 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 809 txr->tx_rsq[k] = QIDX_INVALID; 810 811 /* Disable Head Writeback */ 812 /* 813 * Note: for X550 series devices, these registers are actually 814 * prefixed with TPH_ isntead of DCA_, but the addresses and 815 * fields remain the same. 816 */ 817 switch (hw->mac.type) { 818 case ixgbe_mac_82598EB: 819 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 820 break; 821 default: 822 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 823 break; 824 } 825 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 826 switch (hw->mac.type) { 827 case ixgbe_mac_82598EB: 828 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 829 break; 830 default: 831 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 832 break; 833 } 834 835 } 836 837 if (hw->mac.type != ixgbe_mac_82598EB) { 838 u32 dmatxctl, rttdcs; 839 840 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 841 dmatxctl |= IXGBE_DMATXCTL_TE; 842 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 843 /* Disable arbiter to set MTQC */ 844 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 845 rttdcs |= IXGBE_RTTDCS_ARBDIS; 846 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 847 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 848 ixgbe_get_mtqc(sc->iov_mode)); 849 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 850 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 851 } 852 853 } /* ixgbe_initialize_transmit_units */ 854 855 /************************************************************************ 856 * ixgbe_register 857 ************************************************************************/ 858 static void * 859 ixgbe_register(device_t dev) 860 { 861 return (&ixgbe_sctx_init); 862 } /* ixgbe_register */ 863 864 /************************************************************************ 865 * ixgbe_if_attach_pre - Device initialization routine, part 1 866 * 867 * Called when the driver is being loaded. 868 * Identifies the type of hardware, initializes the hardware, 869 * and initializes iflib structures. 870 * 871 * return 0 on success, positive on failure 872 ************************************************************************/ 873 static int 874 ixgbe_if_attach_pre(if_ctx_t ctx) 875 { 876 struct ixgbe_softc *sc; 877 device_t dev; 878 if_softc_ctx_t scctx; 879 struct ixgbe_hw *hw; 880 int error = 0; 881 u32 ctrl_ext; 882 883 INIT_DEBUGOUT("ixgbe_attach: begin"); 884 885 /* Allocate, clear, and link in our adapter structure */ 886 dev = iflib_get_dev(ctx); 887 sc = iflib_get_softc(ctx); 888 sc->hw.back = sc; 889 sc->ctx = ctx; 890 sc->dev = dev; 891 scctx = sc->shared = iflib_get_softc_ctx(ctx); 892 sc->media = iflib_get_media(ctx); 893 hw = &sc->hw; 894 895 /* Determine hardware revision */ 896 hw->vendor_id = pci_get_vendor(dev); 897 hw->device_id = pci_get_device(dev); 898 hw->revision_id = pci_get_revid(dev); 899 hw->subsystem_vendor_id = pci_get_subvendor(dev); 900 hw->subsystem_device_id = pci_get_subdevice(dev); 901 902 /* Do base PCI setup - map BAR0 */ 903 if (ixgbe_allocate_pci_resources(ctx)) { 904 device_printf(dev, "Allocation of PCI resources failed\n"); 905 return (ENXIO); 906 } 907 908 /* let hardware know driver is loaded */ 909 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 910 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 911 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 912 913 /* 914 * Initialize the shared code 915 */ 916 if (ixgbe_init_shared_code(hw) != 0) { 917 device_printf(dev, "Unable to initialize the shared code\n"); 918 error = ENXIO; 919 goto err_pci; 920 } 921 922 if (hw->mbx.ops.init_params) 923 hw->mbx.ops.init_params(hw); 924 925 hw->allow_unsupported_sfp = allow_unsupported_sfp; 926 927 if (hw->mac.type != ixgbe_mac_82598EB) 928 hw->phy.smart_speed = ixgbe_smart_speed; 929 930 ixgbe_init_device_features(sc); 931 932 /* Enable WoL (if supported) */ 933 ixgbe_check_wol_support(sc); 934 935 /* Verify adapter fan is still functional (if applicable) */ 936 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 937 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 938 ixgbe_check_fan_failure(sc, esdp, false); 939 } 940 941 /* Ensure SW/FW semaphore is free */ 942 ixgbe_init_swfw_semaphore(hw); 943 944 /* Set an initial default flow control value */ 945 hw->fc.requested_mode = ixgbe_flow_control; 946 947 hw->phy.reset_if_overtemp = true; 948 error = ixgbe_reset_hw(hw); 949 hw->phy.reset_if_overtemp = false; 950 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 951 /* 952 * No optics in this port, set up 953 * so the timer routine will probe 954 * for later insertion. 955 */ 956 sc->sfp_probe = true; 957 error = 0; 958 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 959 device_printf(dev, "Unsupported SFP+ module detected!\n"); 960 error = EIO; 961 goto err_pci; 962 } else if (error) { 963 device_printf(dev, "Hardware initialization failed\n"); 964 error = EIO; 965 goto err_pci; 966 } 967 968 /* Make sure we have a good EEPROM before we read from it */ 969 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 970 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 971 error = EIO; 972 goto err_pci; 973 } 974 975 error = ixgbe_start_hw(hw); 976 switch (error) { 977 case IXGBE_ERR_EEPROM_VERSION: 978 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 979 break; 980 case IXGBE_ERR_SFP_NOT_SUPPORTED: 981 device_printf(dev, "Unsupported SFP+ Module\n"); 982 error = EIO; 983 goto err_pci; 984 case IXGBE_ERR_SFP_NOT_PRESENT: 985 device_printf(dev, "No SFP+ Module found\n"); 986 /* falls thru */ 987 default: 988 break; 989 } 990 991 /* Most of the iflib initialization... */ 992 993 iflib_set_mac(ctx, hw->mac.addr); 994 switch (sc->hw.mac.type) { 995 case ixgbe_mac_X550: 996 case ixgbe_mac_X550EM_x: 997 case ixgbe_mac_X550EM_a: 998 scctx->isc_rss_table_size = 512; 999 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1000 break; 1001 default: 1002 scctx->isc_rss_table_size = 128; 1003 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1004 } 1005 1006 /* Allow legacy interrupts */ 1007 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1008 1009 scctx->isc_txqsizes[0] = 1010 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1011 sizeof(u32), DBA_ALIGN), 1012 scctx->isc_rxqsizes[0] = 1013 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1014 DBA_ALIGN); 1015 1016 /* XXX */ 1017 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1018 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1019 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 1020 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1021 } else { 1022 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1023 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1024 } 1025 1026 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1027 1028 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1029 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1030 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1031 1032 scctx->isc_txrx = &ixgbe_txrx; 1033 1034 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1035 1036 return (0); 1037 1038 err_pci: 1039 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1040 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1041 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1042 ixgbe_free_pci_resources(ctx); 1043 1044 return (error); 1045 } /* ixgbe_if_attach_pre */ 1046 1047 /********************************************************************* 1048 * ixgbe_if_attach_post - Device initialization routine, part 2 1049 * 1050 * Called during driver load, but after interrupts and 1051 * resources have been allocated and configured. 1052 * Sets up some data structures not relevant to iflib. 1053 * 1054 * return 0 on success, positive on failure 1055 *********************************************************************/ 1056 static int 1057 ixgbe_if_attach_post(if_ctx_t ctx) 1058 { 1059 device_t dev; 1060 struct ixgbe_softc *sc; 1061 struct ixgbe_hw *hw; 1062 int error = 0; 1063 1064 dev = iflib_get_dev(ctx); 1065 sc = iflib_get_softc(ctx); 1066 hw = &sc->hw; 1067 1068 1069 if (sc->intr_type == IFLIB_INTR_LEGACY && 1070 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1071 device_printf(dev, "Device does not support legacy interrupts"); 1072 error = ENXIO; 1073 goto err; 1074 } 1075 1076 /* Allocate multicast array memory. */ 1077 sc->mta = malloc(sizeof(*sc->mta) * 1078 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1079 if (sc->mta == NULL) { 1080 device_printf(dev, "Can not allocate multicast setup array\n"); 1081 error = ENOMEM; 1082 goto err; 1083 } 1084 1085 /* hw.ix defaults init */ 1086 ixgbe_set_advertise(sc, ixgbe_advertise_speed); 1087 1088 /* Enable the optics for 82599 SFP+ fiber */ 1089 ixgbe_enable_tx_laser(hw); 1090 1091 /* Enable power to the phy. */ 1092 ixgbe_set_phy_power(hw, true); 1093 1094 ixgbe_initialize_iov(sc); 1095 1096 error = ixgbe_setup_interface(ctx); 1097 if (error) { 1098 device_printf(dev, "Interface setup failed: %d\n", error); 1099 goto err; 1100 } 1101 1102 ixgbe_if_update_admin_status(ctx); 1103 1104 /* Initialize statistics */ 1105 ixgbe_update_stats_counters(sc); 1106 ixgbe_add_hw_stats(sc); 1107 1108 /* Check PCIE slot type/speed/width */ 1109 ixgbe_get_slot_info(sc); 1110 1111 /* 1112 * Do time init and sysctl init here, but 1113 * only on the first port of a bypass sc. 1114 */ 1115 ixgbe_bypass_init(sc); 1116 1117 /* Display NVM and Option ROM versions */ 1118 ixgbe_print_fw_version(ctx); 1119 1120 /* Set an initial dmac value */ 1121 sc->dmac = 0; 1122 /* Set initial advertised speeds (if applicable) */ 1123 sc->advertise = ixgbe_get_default_advertise(sc); 1124 1125 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1126 ixgbe_define_iov_schemas(dev, &error); 1127 1128 /* Add sysctls */ 1129 ixgbe_add_device_sysctls(ctx); 1130 1131 return (0); 1132 err: 1133 return (error); 1134 } /* ixgbe_if_attach_post */ 1135 1136 /************************************************************************ 1137 * ixgbe_check_wol_support 1138 * 1139 * Checks whether the adapter's ports are capable of 1140 * Wake On LAN by reading the adapter's NVM. 1141 * 1142 * Sets each port's hw->wol_enabled value depending 1143 * on the value read here. 1144 ************************************************************************/ 1145 static void 1146 ixgbe_check_wol_support(struct ixgbe_softc *sc) 1147 { 1148 struct ixgbe_hw *hw = &sc->hw; 1149 u16 dev_caps = 0; 1150 1151 /* Find out WoL support for port */ 1152 sc->wol_support = hw->wol_enabled = 0; 1153 ixgbe_get_device_caps(hw, &dev_caps); 1154 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1155 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1156 hw->bus.func == 0)) 1157 sc->wol_support = hw->wol_enabled = 1; 1158 1159 /* Save initial wake up filter configuration */ 1160 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1161 1162 return; 1163 } /* ixgbe_check_wol_support */ 1164 1165 /************************************************************************ 1166 * ixgbe_setup_interface 1167 * 1168 * Setup networking device structure and register an interface. 1169 ************************************************************************/ 1170 static int 1171 ixgbe_setup_interface(if_ctx_t ctx) 1172 { 1173 struct ifnet *ifp = iflib_get_ifp(ctx); 1174 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1175 1176 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1177 1178 if_setbaudrate(ifp, IF_Gbps(10)); 1179 1180 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1181 1182 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1183 1184 ixgbe_add_media_types(ctx); 1185 1186 /* Autoselect media by default */ 1187 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1188 1189 return (0); 1190 } /* ixgbe_setup_interface */ 1191 1192 /************************************************************************ 1193 * ixgbe_if_get_counter 1194 ************************************************************************/ 1195 static uint64_t 1196 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1197 { 1198 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1199 if_t ifp = iflib_get_ifp(ctx); 1200 1201 switch (cnt) { 1202 case IFCOUNTER_IPACKETS: 1203 return (sc->ipackets); 1204 case IFCOUNTER_OPACKETS: 1205 return (sc->opackets); 1206 case IFCOUNTER_IBYTES: 1207 return (sc->ibytes); 1208 case IFCOUNTER_OBYTES: 1209 return (sc->obytes); 1210 case IFCOUNTER_IMCASTS: 1211 return (sc->imcasts); 1212 case IFCOUNTER_OMCASTS: 1213 return (sc->omcasts); 1214 case IFCOUNTER_COLLISIONS: 1215 return (0); 1216 case IFCOUNTER_IQDROPS: 1217 return (sc->iqdrops); 1218 case IFCOUNTER_OQDROPS: 1219 return (0); 1220 case IFCOUNTER_IERRORS: 1221 return (sc->ierrors); 1222 default: 1223 return (if_get_counter_default(ifp, cnt)); 1224 } 1225 } /* ixgbe_if_get_counter */ 1226 1227 /************************************************************************ 1228 * ixgbe_if_i2c_req 1229 ************************************************************************/ 1230 static int 1231 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1232 { 1233 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1234 struct ixgbe_hw *hw = &sc->hw; 1235 int i; 1236 1237 1238 if (hw->phy.ops.read_i2c_byte == NULL) 1239 return (ENXIO); 1240 for (i = 0; i < req->len; i++) 1241 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1242 req->dev_addr, &req->data[i]); 1243 return (0); 1244 } /* ixgbe_if_i2c_req */ 1245 1246 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1247 * @ctx: iflib context 1248 * @event: event code to check 1249 * 1250 * Defaults to returning true for unknown events. 1251 * 1252 * @returns true if iflib needs to reinit the interface 1253 */ 1254 static bool 1255 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1256 { 1257 switch (event) { 1258 case IFLIB_RESTART_VLAN_CONFIG: 1259 return (false); 1260 default: 1261 return (true); 1262 } 1263 } 1264 1265 /************************************************************************ 1266 * ixgbe_add_media_types 1267 ************************************************************************/ 1268 static void 1269 ixgbe_add_media_types(if_ctx_t ctx) 1270 { 1271 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1272 struct ixgbe_hw *hw = &sc->hw; 1273 device_t dev = iflib_get_dev(ctx); 1274 u64 layer; 1275 1276 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 1277 1278 /* Media types with matching FreeBSD media defines */ 1279 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1280 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1281 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1282 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1283 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1284 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1285 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1286 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1287 1288 if (hw->mac.type == ixgbe_mac_X550) { 1289 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1290 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL); 1291 } 1292 1293 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1294 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1295 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1296 NULL); 1297 1298 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1299 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1300 if (hw->phy.multispeed_fiber) 1301 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, 1302 NULL); 1303 } 1304 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1305 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1306 if (hw->phy.multispeed_fiber) 1307 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, 1308 NULL); 1309 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1310 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1311 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1312 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1313 1314 #ifdef IFM_ETH_XTYPE 1315 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1316 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1317 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1318 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1319 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1320 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1321 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1322 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1323 #else 1324 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1325 device_printf(dev, "Media supported: 10GbaseKR\n"); 1326 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1327 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1328 } 1329 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1330 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1331 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1332 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1333 } 1334 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1335 device_printf(dev, "Media supported: 1000baseKX\n"); 1336 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1337 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1338 } 1339 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1340 device_printf(dev, "Media supported: 2500baseKX\n"); 1341 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1342 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1343 } 1344 #endif 1345 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1346 device_printf(dev, "Media supported: 1000baseBX\n"); 1347 1348 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1349 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1350 0, NULL); 1351 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1352 } 1353 1354 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1355 } /* ixgbe_add_media_types */ 1356 1357 /************************************************************************ 1358 * ixgbe_is_sfp 1359 ************************************************************************/ 1360 static inline bool 1361 ixgbe_is_sfp(struct ixgbe_hw *hw) 1362 { 1363 switch (hw->mac.type) { 1364 case ixgbe_mac_82598EB: 1365 if (hw->phy.type == ixgbe_phy_nl) 1366 return (true); 1367 return (false); 1368 case ixgbe_mac_82599EB: 1369 switch (hw->mac.ops.get_media_type(hw)) { 1370 case ixgbe_media_type_fiber: 1371 case ixgbe_media_type_fiber_qsfp: 1372 return (true); 1373 default: 1374 return (false); 1375 } 1376 case ixgbe_mac_X550EM_x: 1377 case ixgbe_mac_X550EM_a: 1378 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1379 return (true); 1380 return (false); 1381 default: 1382 return (false); 1383 } 1384 } /* ixgbe_is_sfp */ 1385 1386 /************************************************************************ 1387 * ixgbe_config_link 1388 ************************************************************************/ 1389 static void 1390 ixgbe_config_link(if_ctx_t ctx) 1391 { 1392 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1393 struct ixgbe_hw *hw = &sc->hw; 1394 u32 autoneg, err = 0; 1395 bool sfp, negotiate; 1396 1397 sfp = ixgbe_is_sfp(hw); 1398 1399 if (sfp) { 1400 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 1401 iflib_admin_intr_deferred(ctx); 1402 } else { 1403 if (hw->mac.ops.check_link) 1404 err = ixgbe_check_link(hw, &sc->link_speed, 1405 &sc->link_up, false); 1406 if (err) 1407 return; 1408 autoneg = hw->phy.autoneg_advertised; 1409 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1410 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1411 &negotiate); 1412 if (err) 1413 return; 1414 1415 if (hw->mac.type == ixgbe_mac_X550 && 1416 hw->phy.autoneg_advertised == 0) { 1417 /* 1418 * 2.5G and 5G autonegotiation speeds on X550 1419 * are disabled by default due to reported 1420 * interoperability issues with some switches. 1421 * 1422 * The second condition checks if any operations 1423 * involving setting autonegotiation speeds have 1424 * been performed prior to this ixgbe_config_link() 1425 * call. 1426 * 1427 * If hw->phy.autoneg_advertised does not 1428 * equal 0, this means that the user might have 1429 * set autonegotiation speeds via the sysctl 1430 * before bringing the interface up. In this 1431 * case, we should not disable 2.5G and 5G 1432 * since that speeds might be selected by the 1433 * user. 1434 * 1435 * Otherwise (i.e. if hw->phy.autoneg_advertised 1436 * is set to 0), it is the first time we set 1437 * autonegotiation preferences and the default 1438 * set of speeds should exclude 2.5G and 5G. 1439 */ 1440 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 1441 IXGBE_LINK_SPEED_5GB_FULL); 1442 } 1443 1444 if (hw->mac.ops.setup_link) 1445 err = hw->mac.ops.setup_link(hw, autoneg, 1446 sc->link_up); 1447 } 1448 } /* ixgbe_config_link */ 1449 1450 /************************************************************************ 1451 * ixgbe_update_stats_counters - Update board statistics counters. 1452 ************************************************************************/ 1453 static void 1454 ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1455 { 1456 struct ixgbe_hw *hw = &sc->hw; 1457 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1458 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1459 u32 lxoffrxc; 1460 u64 total_missed_rx = 0; 1461 1462 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1463 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1464 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1465 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1466 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1467 1468 for (int i = 0; i < 16; i++) { 1469 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1470 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1471 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1472 } 1473 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1474 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1475 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1476 1477 /* Hardware workaround, gprc counts missed packets */ 1478 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1479 stats->gprc -= missed_rx; 1480 1481 if (hw->mac.type != ixgbe_mac_82598EB) { 1482 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1483 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1484 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1485 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1486 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1487 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1488 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1489 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1490 stats->lxoffrxc += lxoffrxc; 1491 } else { 1492 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1493 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1494 stats->lxoffrxc += lxoffrxc; 1495 /* 82598 only has a counter in the high register */ 1496 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1497 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1498 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1499 } 1500 1501 /* 1502 * For watchdog management we need to know if we have been paused 1503 * during the last interval, so capture that here. 1504 */ 1505 if (lxoffrxc) 1506 sc->shared->isc_pause_frames = 1; 1507 1508 /* 1509 * Workaround: mprc hardware is incorrectly counting 1510 * broadcasts, so for now we subtract those. 1511 */ 1512 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1513 stats->bprc += bprc; 1514 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1515 if (hw->mac.type == ixgbe_mac_82598EB) 1516 stats->mprc -= bprc; 1517 1518 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1519 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1520 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1521 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1522 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1523 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1524 1525 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1526 stats->lxontxc += lxon; 1527 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1528 stats->lxofftxc += lxoff; 1529 total = lxon + lxoff; 1530 1531 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1532 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1533 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1534 stats->gptc -= total; 1535 stats->mptc -= total; 1536 stats->ptc64 -= total; 1537 stats->gotc -= total * ETHER_MIN_LEN; 1538 1539 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1540 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1541 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1542 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1543 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1544 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1545 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1546 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1547 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1548 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1549 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1550 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1551 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1552 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1553 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1554 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1555 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1556 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1557 /* Only read FCOE on 82599 */ 1558 if (hw->mac.type != ixgbe_mac_82598EB) { 1559 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1560 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1561 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1562 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1563 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1564 } 1565 1566 /* Fill out the OS statistics structure */ 1567 IXGBE_SET_IPACKETS(sc, stats->gprc); 1568 IXGBE_SET_OPACKETS(sc, stats->gptc); 1569 IXGBE_SET_IBYTES(sc, stats->gorc); 1570 IXGBE_SET_OBYTES(sc, stats->gotc); 1571 IXGBE_SET_IMCASTS(sc, stats->mprc); 1572 IXGBE_SET_OMCASTS(sc, stats->mptc); 1573 IXGBE_SET_COLLISIONS(sc, 0); 1574 IXGBE_SET_IQDROPS(sc, total_missed_rx); 1575 1576 /* 1577 * Aggregate following types of errors as RX errors: 1578 * - CRC error count, 1579 * - illegal byte error count, 1580 * - checksum error count, 1581 * - missed packets count, 1582 * - length error count, 1583 * - undersized packets count, 1584 * - fragmented packets count, 1585 * - oversized packets count, 1586 * - jabber count. 1587 */ 1588 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + stats->xec + 1589 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + 1590 stats->rjc); 1591 } /* ixgbe_update_stats_counters */ 1592 1593 /************************************************************************ 1594 * ixgbe_add_hw_stats 1595 * 1596 * Add sysctl variables, one per statistic, to the system. 1597 ************************************************************************/ 1598 static void 1599 ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1600 { 1601 device_t dev = iflib_get_dev(sc->ctx); 1602 struct ix_rx_queue *rx_que; 1603 struct ix_tx_queue *tx_que; 1604 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1605 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1606 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1607 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1608 struct sysctl_oid *stat_node, *queue_node; 1609 struct sysctl_oid_list *stat_list, *queue_list; 1610 int i; 1611 1612 #define QUEUE_NAME_LEN 32 1613 char namebuf[QUEUE_NAME_LEN]; 1614 1615 /* Driver Statistics */ 1616 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1617 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); 1618 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1619 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1620 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1621 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1622 1623 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 1624 struct tx_ring *txr = &tx_que->txr; 1625 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1626 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1627 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1628 queue_list = SYSCTL_CHILDREN(queue_node); 1629 1630 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1631 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1632 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1633 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1634 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1635 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1636 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1637 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1638 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1639 CTLFLAG_RD, &txr->total_packets, 1640 "Queue Packets Transmitted"); 1641 } 1642 1643 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 1644 struct rx_ring *rxr = &rx_que->rxr; 1645 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1646 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1647 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1648 queue_list = SYSCTL_CHILDREN(queue_node); 1649 1650 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1651 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 1652 &sc->rx_queues[i], 0, 1653 ixgbe_sysctl_interrupt_rate_handler, "IU", 1654 "Interrupt Rate"); 1655 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1656 CTLFLAG_RD, &(sc->rx_queues[i].irqs), 1657 "irqs on this queue"); 1658 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1659 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1660 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1661 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1662 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1663 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1664 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1665 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1666 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1667 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1668 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1669 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1670 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1671 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1672 } 1673 1674 /* MAC stats get their own sub node */ 1675 1676 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1677 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); 1678 stat_list = SYSCTL_CHILDREN(stat_node); 1679 1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", 1681 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); 1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1683 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1685 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1687 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1689 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1691 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1693 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1695 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1697 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1698 1699 /* Flow Control stats */ 1700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1701 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1702 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1703 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1704 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1705 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1707 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1708 1709 /* Packet Reception Stats */ 1710 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1711 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1713 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1715 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1716 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1717 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1719 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1721 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1722 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1723 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1725 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1727 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1728 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1729 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1731 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1733 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1734 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1735 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1737 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1739 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1740 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1741 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1742 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1743 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1744 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1745 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1746 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1747 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1748 1749 /* Packet Transmission Stats */ 1750 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1751 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1752 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1753 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1754 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1755 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1756 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1757 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1758 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1759 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1760 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1761 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1762 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1763 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1764 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1765 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1766 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1767 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1768 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1769 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1770 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1771 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1772 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1773 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1774 } /* ixgbe_add_hw_stats */ 1775 1776 /************************************************************************ 1777 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1778 * 1779 * Retrieves the TDH value from the hardware 1780 ************************************************************************/ 1781 static int 1782 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1783 { 1784 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1785 int error; 1786 unsigned int val; 1787 1788 if (!txr) 1789 return (0); 1790 1791 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); 1792 error = sysctl_handle_int(oidp, &val, 0, req); 1793 if (error || !req->newptr) 1794 return error; 1795 1796 return (0); 1797 } /* ixgbe_sysctl_tdh_handler */ 1798 1799 /************************************************************************ 1800 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1801 * 1802 * Retrieves the TDT value from the hardware 1803 ************************************************************************/ 1804 static int 1805 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1806 { 1807 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1808 int error; 1809 unsigned int val; 1810 1811 if (!txr) 1812 return (0); 1813 1814 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); 1815 error = sysctl_handle_int(oidp, &val, 0, req); 1816 if (error || !req->newptr) 1817 return error; 1818 1819 return (0); 1820 } /* ixgbe_sysctl_tdt_handler */ 1821 1822 /************************************************************************ 1823 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1824 * 1825 * Retrieves the RDH value from the hardware 1826 ************************************************************************/ 1827 static int 1828 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1829 { 1830 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1831 int error; 1832 unsigned int val; 1833 1834 if (!rxr) 1835 return (0); 1836 1837 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); 1838 error = sysctl_handle_int(oidp, &val, 0, req); 1839 if (error || !req->newptr) 1840 return error; 1841 1842 return (0); 1843 } /* ixgbe_sysctl_rdh_handler */ 1844 1845 /************************************************************************ 1846 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1847 * 1848 * Retrieves the RDT value from the hardware 1849 ************************************************************************/ 1850 static int 1851 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1852 { 1853 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1854 int error; 1855 unsigned int val; 1856 1857 if (!rxr) 1858 return (0); 1859 1860 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); 1861 error = sysctl_handle_int(oidp, &val, 0, req); 1862 if (error || !req->newptr) 1863 return error; 1864 1865 return (0); 1866 } /* ixgbe_sysctl_rdt_handler */ 1867 1868 /************************************************************************ 1869 * ixgbe_if_vlan_register 1870 * 1871 * Run via vlan config EVENT, it enables us to use the 1872 * HW Filter table since we can get the vlan id. This 1873 * just creates the entry in the soft version of the 1874 * VFTA, init will repopulate the real table. 1875 ************************************************************************/ 1876 static void 1877 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1878 { 1879 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1880 u16 index, bit; 1881 1882 index = (vtag >> 5) & 0x7F; 1883 bit = vtag & 0x1F; 1884 sc->shadow_vfta[index] |= (1 << bit); 1885 ++sc->num_vlans; 1886 ixgbe_setup_vlan_hw_support(ctx); 1887 } /* ixgbe_if_vlan_register */ 1888 1889 /************************************************************************ 1890 * ixgbe_if_vlan_unregister 1891 * 1892 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1893 ************************************************************************/ 1894 static void 1895 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1896 { 1897 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1898 u16 index, bit; 1899 1900 index = (vtag >> 5) & 0x7F; 1901 bit = vtag & 0x1F; 1902 sc->shadow_vfta[index] &= ~(1 << bit); 1903 --sc->num_vlans; 1904 /* Re-init to load the changes */ 1905 ixgbe_setup_vlan_hw_support(ctx); 1906 } /* ixgbe_if_vlan_unregister */ 1907 1908 /************************************************************************ 1909 * ixgbe_setup_vlan_hw_support 1910 ************************************************************************/ 1911 static void 1912 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1913 { 1914 struct ifnet *ifp = iflib_get_ifp(ctx); 1915 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1916 struct ixgbe_hw *hw = &sc->hw; 1917 struct rx_ring *rxr; 1918 int i; 1919 u32 ctrl; 1920 1921 1922 /* 1923 * We get here thru init_locked, meaning 1924 * a soft reset, this has already cleared 1925 * the VFTA and other state, so if there 1926 * have been no vlan's registered do nothing. 1927 */ 1928 if (sc->num_vlans == 0) 1929 return; 1930 1931 /* Setup the queues for vlans */ 1932 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1933 for (i = 0; i < sc->num_rx_queues; i++) { 1934 rxr = &sc->rx_queues[i].rxr; 1935 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1936 if (hw->mac.type != ixgbe_mac_82598EB) { 1937 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1938 ctrl |= IXGBE_RXDCTL_VME; 1939 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1940 } 1941 rxr->vtag_strip = true; 1942 } 1943 } 1944 1945 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1946 return; 1947 /* 1948 * A soft reset zero's out the VFTA, so 1949 * we need to repopulate it now. 1950 */ 1951 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1952 if (sc->shadow_vfta[i] != 0) 1953 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1954 sc->shadow_vfta[i]); 1955 1956 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1957 /* Enable the Filter Table if enabled */ 1958 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1959 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1960 ctrl |= IXGBE_VLNCTRL_VFE; 1961 } 1962 if (hw->mac.type == ixgbe_mac_82598EB) 1963 ctrl |= IXGBE_VLNCTRL_VME; 1964 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1965 } /* ixgbe_setup_vlan_hw_support */ 1966 1967 /************************************************************************ 1968 * ixgbe_get_slot_info 1969 * 1970 * Get the width and transaction speed of 1971 * the slot this adapter is plugged into. 1972 ************************************************************************/ 1973 static void 1974 ixgbe_get_slot_info(struct ixgbe_softc *sc) 1975 { 1976 device_t dev = iflib_get_dev(sc->ctx); 1977 struct ixgbe_hw *hw = &sc->hw; 1978 int bus_info_valid = true; 1979 u32 offset; 1980 u16 link; 1981 1982 /* Some devices are behind an internal bridge */ 1983 switch (hw->device_id) { 1984 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1985 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1986 goto get_parent_info; 1987 default: 1988 break; 1989 } 1990 1991 ixgbe_get_bus_info(hw); 1992 1993 /* 1994 * Some devices don't use PCI-E, but there is no need 1995 * to display "Unknown" for bus speed and width. 1996 */ 1997 switch (hw->mac.type) { 1998 case ixgbe_mac_X550EM_x: 1999 case ixgbe_mac_X550EM_a: 2000 return; 2001 default: 2002 goto display; 2003 } 2004 2005 get_parent_info: 2006 /* 2007 * For the Quad port adapter we need to parse back 2008 * up the PCI tree to find the speed of the expansion 2009 * slot into which this adapter is plugged. A bit more work. 2010 */ 2011 dev = device_get_parent(device_get_parent(dev)); 2012 #ifdef IXGBE_DEBUG 2013 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2014 pci_get_slot(dev), pci_get_function(dev)); 2015 #endif 2016 dev = device_get_parent(device_get_parent(dev)); 2017 #ifdef IXGBE_DEBUG 2018 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2019 pci_get_slot(dev), pci_get_function(dev)); 2020 #endif 2021 /* Now get the PCI Express Capabilities offset */ 2022 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 2023 /* 2024 * Hmm...can't get PCI-Express capabilities. 2025 * Falling back to default method. 2026 */ 2027 bus_info_valid = false; 2028 ixgbe_get_bus_info(hw); 2029 goto display; 2030 } 2031 /* ...and read the Link Status Register */ 2032 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2033 ixgbe_set_pci_config_data_generic(hw, link); 2034 2035 display: 2036 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2037 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2038 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2039 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2040 "Unknown"), 2041 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 2042 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 2043 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 2044 "Unknown")); 2045 2046 if (bus_info_valid) { 2047 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2048 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2049 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2050 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2051 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 2052 } 2053 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2054 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2055 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2056 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2057 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 2058 } 2059 } else 2060 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2061 2062 return; 2063 } /* ixgbe_get_slot_info */ 2064 2065 /************************************************************************ 2066 * ixgbe_if_msix_intr_assign 2067 * 2068 * Setup MSI-X Interrupt resources and handlers 2069 ************************************************************************/ 2070 static int 2071 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2072 { 2073 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2074 struct ix_rx_queue *rx_que = sc->rx_queues; 2075 struct ix_tx_queue *tx_que; 2076 int error, rid, vector = 0; 2077 char buf[16]; 2078 2079 /* Admin Que is vector 0*/ 2080 rid = vector + 1; 2081 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 2082 rid = vector + 1; 2083 2084 snprintf(buf, sizeof(buf), "rxq%d", i); 2085 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2086 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2087 2088 if (error) { 2089 device_printf(iflib_get_dev(ctx), 2090 "Failed to allocate que int %d err: %d", i, error); 2091 sc->num_rx_queues = i + 1; 2092 goto fail; 2093 } 2094 2095 rx_que->msix = vector; 2096 } 2097 for (int i = 0; i < sc->num_tx_queues; i++) { 2098 snprintf(buf, sizeof(buf), "txq%d", i); 2099 tx_que = &sc->tx_queues[i]; 2100 tx_que->msix = i % sc->num_rx_queues; 2101 iflib_softirq_alloc_generic(ctx, 2102 &sc->rx_queues[tx_que->msix].que_irq, 2103 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2104 } 2105 rid = vector + 1; 2106 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 2107 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq"); 2108 if (error) { 2109 device_printf(iflib_get_dev(ctx), 2110 "Failed to register admin handler"); 2111 return (error); 2112 } 2113 2114 sc->vector = vector; 2115 2116 return (0); 2117 fail: 2118 iflib_irq_free(ctx, &sc->irq); 2119 rx_que = sc->rx_queues; 2120 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 2121 iflib_irq_free(ctx, &rx_que->que_irq); 2122 2123 return (error); 2124 } /* ixgbe_if_msix_intr_assign */ 2125 2126 static inline void 2127 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que) 2128 { 2129 uint32_t newitr = 0; 2130 struct rx_ring *rxr = &que->rxr; 2131 2132 /* 2133 * Do Adaptive Interrupt Moderation: 2134 * - Write out last calculated setting 2135 * - Calculate based on average size over 2136 * the last interval. 2137 */ 2138 if (que->eitr_setting) { 2139 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), 2140 que->eitr_setting); 2141 } 2142 2143 que->eitr_setting = 0; 2144 /* Idle, do nothing */ 2145 if (rxr->bytes == 0) { 2146 return; 2147 } 2148 2149 if ((rxr->bytes) && (rxr->packets)) { 2150 newitr = (rxr->bytes / rxr->packets); 2151 } 2152 2153 newitr += 24; /* account for hardware frame, crc */ 2154 /* set an upper boundary */ 2155 newitr = min(newitr, 3000); 2156 2157 /* Be nice to the mid range */ 2158 if ((newitr > 300) && (newitr < 1200)) { 2159 newitr = (newitr / 3); 2160 } else { 2161 newitr = (newitr / 2); 2162 } 2163 2164 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2165 newitr |= newitr << 16; 2166 } else { 2167 newitr |= IXGBE_EITR_CNT_WDIS; 2168 } 2169 2170 /* save for next interrupt */ 2171 que->eitr_setting = newitr; 2172 2173 /* Reset state */ 2174 rxr->bytes = 0; 2175 rxr->packets = 0; 2176 2177 return; 2178 } 2179 2180 /********************************************************************* 2181 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2182 **********************************************************************/ 2183 static int 2184 ixgbe_msix_que(void *arg) 2185 { 2186 struct ix_rx_queue *que = arg; 2187 struct ixgbe_softc *sc = que->sc; 2188 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx); 2189 2190 /* Protect against spurious interrupts */ 2191 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2192 return (FILTER_HANDLED); 2193 2194 ixgbe_disable_queue(sc, que->msix); 2195 ++que->irqs; 2196 2197 /* Check for AIM */ 2198 if (sc->enable_aim) { 2199 ixgbe_perform_aim(sc, que); 2200 } 2201 2202 return (FILTER_SCHEDULE_THREAD); 2203 } /* ixgbe_msix_que */ 2204 2205 /************************************************************************ 2206 * ixgbe_media_status - Media Ioctl callback 2207 * 2208 * Called whenever the user queries the status of 2209 * the interface using ifconfig. 2210 ************************************************************************/ 2211 static void 2212 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2213 { 2214 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2215 struct ixgbe_hw *hw = &sc->hw; 2216 int layer; 2217 2218 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2219 2220 ifmr->ifm_status = IFM_AVALID; 2221 ifmr->ifm_active = IFM_ETHER; 2222 2223 if (!sc->link_active) 2224 return; 2225 2226 ifmr->ifm_status |= IFM_ACTIVE; 2227 layer = sc->phy_layer; 2228 2229 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2230 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2231 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2232 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2233 switch (sc->link_speed) { 2234 case IXGBE_LINK_SPEED_10GB_FULL: 2235 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2236 break; 2237 case IXGBE_LINK_SPEED_1GB_FULL: 2238 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2239 break; 2240 case IXGBE_LINK_SPEED_100_FULL: 2241 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2242 break; 2243 case IXGBE_LINK_SPEED_10_FULL: 2244 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2245 break; 2246 } 2247 if (hw->mac.type == ixgbe_mac_X550) 2248 switch (sc->link_speed) { 2249 case IXGBE_LINK_SPEED_5GB_FULL: 2250 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2251 break; 2252 case IXGBE_LINK_SPEED_2_5GB_FULL: 2253 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2254 break; 2255 } 2256 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2257 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2258 switch (sc->link_speed) { 2259 case IXGBE_LINK_SPEED_10GB_FULL: 2260 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2261 break; 2262 } 2263 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2264 switch (sc->link_speed) { 2265 case IXGBE_LINK_SPEED_10GB_FULL: 2266 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2267 break; 2268 case IXGBE_LINK_SPEED_1GB_FULL: 2269 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2270 break; 2271 } 2272 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2273 switch (sc->link_speed) { 2274 case IXGBE_LINK_SPEED_10GB_FULL: 2275 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2276 break; 2277 case IXGBE_LINK_SPEED_1GB_FULL: 2278 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2279 break; 2280 } 2281 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2282 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2283 switch (sc->link_speed) { 2284 case IXGBE_LINK_SPEED_10GB_FULL: 2285 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2286 break; 2287 case IXGBE_LINK_SPEED_1GB_FULL: 2288 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2289 break; 2290 } 2291 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2292 switch (sc->link_speed) { 2293 case IXGBE_LINK_SPEED_10GB_FULL: 2294 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2295 break; 2296 } 2297 /* 2298 * XXX: These need to use the proper media types once 2299 * they're added. 2300 */ 2301 #ifndef IFM_ETH_XTYPE 2302 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2303 switch (sc->link_speed) { 2304 case IXGBE_LINK_SPEED_10GB_FULL: 2305 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2306 break; 2307 case IXGBE_LINK_SPEED_2_5GB_FULL: 2308 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2309 break; 2310 case IXGBE_LINK_SPEED_1GB_FULL: 2311 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2312 break; 2313 } 2314 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2315 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2316 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2317 switch (sc->link_speed) { 2318 case IXGBE_LINK_SPEED_10GB_FULL: 2319 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2320 break; 2321 case IXGBE_LINK_SPEED_2_5GB_FULL: 2322 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2323 break; 2324 case IXGBE_LINK_SPEED_1GB_FULL: 2325 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2326 break; 2327 } 2328 #else 2329 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2330 switch (sc->link_speed) { 2331 case IXGBE_LINK_SPEED_10GB_FULL: 2332 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2333 break; 2334 case IXGBE_LINK_SPEED_2_5GB_FULL: 2335 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2336 break; 2337 case IXGBE_LINK_SPEED_1GB_FULL: 2338 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2339 break; 2340 } 2341 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2342 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2343 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2344 switch (sc->link_speed) { 2345 case IXGBE_LINK_SPEED_10GB_FULL: 2346 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2347 break; 2348 case IXGBE_LINK_SPEED_2_5GB_FULL: 2349 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2350 break; 2351 case IXGBE_LINK_SPEED_1GB_FULL: 2352 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2353 break; 2354 } 2355 #endif 2356 2357 /* If nothing is recognized... */ 2358 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2359 ifmr->ifm_active |= IFM_UNKNOWN; 2360 2361 /* Display current flow control setting used on link */ 2362 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2363 hw->fc.current_mode == ixgbe_fc_full) 2364 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2365 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2366 hw->fc.current_mode == ixgbe_fc_full) 2367 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2368 } /* ixgbe_media_status */ 2369 2370 /************************************************************************ 2371 * ixgbe_media_change - Media Ioctl callback 2372 * 2373 * Called when the user changes speed/duplex using 2374 * media/mediopt option with ifconfig. 2375 ************************************************************************/ 2376 static int 2377 ixgbe_if_media_change(if_ctx_t ctx) 2378 { 2379 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2380 struct ifmedia *ifm = iflib_get_media(ctx); 2381 struct ixgbe_hw *hw = &sc->hw; 2382 ixgbe_link_speed speed = 0; 2383 2384 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2385 2386 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2387 return (EINVAL); 2388 2389 if (hw->phy.media_type == ixgbe_media_type_backplane) 2390 return (EPERM); 2391 2392 /* 2393 * We don't actually need to check against the supported 2394 * media types of the adapter; ifmedia will take care of 2395 * that for us. 2396 */ 2397 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2398 case IFM_AUTO: 2399 case IFM_10G_T: 2400 speed |= IXGBE_LINK_SPEED_100_FULL; 2401 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2402 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2403 break; 2404 case IFM_10G_LRM: 2405 case IFM_10G_LR: 2406 #ifndef IFM_ETH_XTYPE 2407 case IFM_10G_SR: /* KR, too */ 2408 case IFM_10G_CX4: /* KX4 */ 2409 #else 2410 case IFM_10G_KR: 2411 case IFM_10G_KX4: 2412 #endif 2413 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2414 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2415 break; 2416 #ifndef IFM_ETH_XTYPE 2417 case IFM_1000_CX: /* KX */ 2418 #else 2419 case IFM_1000_KX: 2420 #endif 2421 case IFM_1000_LX: 2422 case IFM_1000_SX: 2423 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2424 break; 2425 case IFM_1000_T: 2426 speed |= IXGBE_LINK_SPEED_100_FULL; 2427 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2428 break; 2429 case IFM_10G_TWINAX: 2430 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2431 break; 2432 case IFM_5000_T: 2433 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2434 break; 2435 case IFM_2500_T: 2436 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2437 break; 2438 case IFM_100_TX: 2439 speed |= IXGBE_LINK_SPEED_100_FULL; 2440 break; 2441 case IFM_10_T: 2442 speed |= IXGBE_LINK_SPEED_10_FULL; 2443 break; 2444 default: 2445 goto invalid; 2446 } 2447 2448 hw->mac.autotry_restart = true; 2449 hw->mac.ops.setup_link(hw, speed, true); 2450 sc->advertise = 2451 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 2452 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 2453 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 2454 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 2455 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 2456 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 2457 2458 return (0); 2459 2460 invalid: 2461 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2462 2463 return (EINVAL); 2464 } /* ixgbe_if_media_change */ 2465 2466 /************************************************************************ 2467 * ixgbe_set_promisc 2468 ************************************************************************/ 2469 static int 2470 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2471 { 2472 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2473 struct ifnet *ifp = iflib_get_ifp(ctx); 2474 u32 rctl; 2475 int mcnt = 0; 2476 2477 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 2478 rctl &= (~IXGBE_FCTRL_UPE); 2479 if (ifp->if_flags & IFF_ALLMULTI) 2480 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2481 else { 2482 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2483 } 2484 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2485 rctl &= (~IXGBE_FCTRL_MPE); 2486 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2487 2488 if (ifp->if_flags & IFF_PROMISC) { 2489 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2490 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2491 } else if (ifp->if_flags & IFF_ALLMULTI) { 2492 rctl |= IXGBE_FCTRL_MPE; 2493 rctl &= ~IXGBE_FCTRL_UPE; 2494 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2495 } 2496 return (0); 2497 } /* ixgbe_if_promisc_set */ 2498 2499 /************************************************************************ 2500 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2501 ************************************************************************/ 2502 static int 2503 ixgbe_msix_link(void *arg) 2504 { 2505 struct ixgbe_softc *sc = arg; 2506 struct ixgbe_hw *hw = &sc->hw; 2507 u32 eicr, eicr_mask; 2508 s32 retval; 2509 2510 ++sc->link_irq; 2511 2512 /* Pause other interrupts */ 2513 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2514 2515 /* First get the cause */ 2516 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2517 /* Be sure the queue bits are not cleared */ 2518 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2519 /* Clear interrupt with write */ 2520 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2521 2522 /* Link status change */ 2523 if (eicr & IXGBE_EICR_LSC) { 2524 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2525 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; 2526 } 2527 2528 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 2529 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 2530 (eicr & IXGBE_EICR_FLOW_DIR)) { 2531 /* This is probably overkill :) */ 2532 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) 2533 return (FILTER_HANDLED); 2534 /* Disable the interrupt */ 2535 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2536 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2537 } else 2538 if (eicr & IXGBE_EICR_ECC) { 2539 device_printf(iflib_get_dev(sc->ctx), 2540 "Received ECC Err, initiating reset\n"); 2541 hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2542 ixgbe_reset_hw(hw); 2543 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2544 } 2545 2546 /* Check for over temp condition */ 2547 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2548 switch (sc->hw.mac.type) { 2549 case ixgbe_mac_X550EM_a: 2550 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2551 break; 2552 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2553 IXGBE_EICR_GPI_SDP0_X550EM_a); 2554 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2555 IXGBE_EICR_GPI_SDP0_X550EM_a); 2556 retval = hw->phy.ops.check_overtemp(hw); 2557 if (retval != IXGBE_ERR_OVERTEMP) 2558 break; 2559 device_printf(iflib_get_dev(sc->ctx), 2560 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2561 device_printf(iflib_get_dev(sc->ctx), 2562 "System shutdown required!\n"); 2563 break; 2564 default: 2565 if (!(eicr & IXGBE_EICR_TS)) 2566 break; 2567 retval = hw->phy.ops.check_overtemp(hw); 2568 if (retval != IXGBE_ERR_OVERTEMP) 2569 break; 2570 device_printf(iflib_get_dev(sc->ctx), 2571 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2572 device_printf(iflib_get_dev(sc->ctx), 2573 "System shutdown required!\n"); 2574 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2575 break; 2576 } 2577 } 2578 2579 /* Check for VF message */ 2580 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 2581 (eicr & IXGBE_EICR_MAILBOX)) 2582 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; 2583 } 2584 2585 if (ixgbe_is_sfp(hw)) { 2586 /* Pluggable optics-related interrupt */ 2587 if (hw->mac.type >= ixgbe_mac_X540) 2588 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2589 else 2590 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2591 2592 if (eicr & eicr_mask) { 2593 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2594 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 2595 } 2596 2597 if ((hw->mac.type == ixgbe_mac_82599EB) && 2598 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2599 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2600 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2601 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 2602 } 2603 } 2604 2605 /* Check for fan failure */ 2606 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2607 ixgbe_check_fan_failure(sc, eicr, true); 2608 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2609 } 2610 2611 /* External PHY interrupt */ 2612 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2613 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2614 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2615 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 2616 } 2617 2618 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2619 } /* ixgbe_msix_link */ 2620 2621 /************************************************************************ 2622 * ixgbe_sysctl_interrupt_rate_handler 2623 ************************************************************************/ 2624 static int 2625 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2626 { 2627 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2628 int error; 2629 unsigned int reg, usec, rate; 2630 2631 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); 2632 usec = ((reg & 0x0FF8) >> 3); 2633 if (usec > 0) 2634 rate = 500000 / usec; 2635 else 2636 rate = 0; 2637 error = sysctl_handle_int(oidp, &rate, 0, req); 2638 if (error || !req->newptr) 2639 return error; 2640 reg &= ~0xfff; /* default, no limitation */ 2641 ixgbe_max_interrupt_rate = 0; 2642 if (rate > 0 && rate < 500000) { 2643 if (rate < 1000) 2644 rate = 1000; 2645 ixgbe_max_interrupt_rate = rate; 2646 reg |= ((4000000/rate) & 0xff8); 2647 } 2648 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); 2649 2650 return (0); 2651 } /* ixgbe_sysctl_interrupt_rate_handler */ 2652 2653 /************************************************************************ 2654 * ixgbe_add_device_sysctls 2655 ************************************************************************/ 2656 static void 2657 ixgbe_add_device_sysctls(if_ctx_t ctx) 2658 { 2659 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2660 device_t dev = iflib_get_dev(ctx); 2661 struct ixgbe_hw *hw = &sc->hw; 2662 struct sysctl_oid_list *child; 2663 struct sysctl_ctx_list *ctx_list; 2664 2665 ctx_list = device_get_sysctl_ctx(dev); 2666 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2667 2668 /* Sysctls for all devices */ 2669 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2670 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2671 sc, 0, ixgbe_sysctl_flowcntl, "I", 2672 IXGBE_SYSCTL_DESC_SET_FC); 2673 2674 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2675 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2676 sc, 0, ixgbe_sysctl_advertise, "I", 2677 IXGBE_SYSCTL_DESC_ADV_SPEED); 2678 2679 sc->enable_aim = ixgbe_enable_aim; 2680 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2681 &sc->enable_aim, 0, "Interrupt Moderation"); 2682 2683 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", 2684 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2685 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); 2686 2687 #ifdef IXGBE_DEBUG 2688 /* testing sysctls (for all devices) */ 2689 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2690 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2691 sc, 0, ixgbe_sysctl_power_state, 2692 "I", "PCI Power State"); 2693 2694 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2695 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2696 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2697 #endif 2698 /* for X550 series devices */ 2699 if (hw->mac.type >= ixgbe_mac_X550) 2700 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2701 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2702 sc, 0, ixgbe_sysctl_dmac, 2703 "I", "DMA Coalesce"); 2704 2705 /* for WoL-capable devices */ 2706 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2707 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2708 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2709 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2710 2711 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2712 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2713 sc, 0, ixgbe_sysctl_wufc, 2714 "I", "Enable/Disable Wake Up Filters"); 2715 } 2716 2717 /* for X552/X557-AT devices */ 2718 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2719 struct sysctl_oid *phy_node; 2720 struct sysctl_oid_list *phy_list; 2721 2722 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2723 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); 2724 phy_list = SYSCTL_CHILDREN(phy_node); 2725 2726 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2727 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2728 sc, 0, ixgbe_sysctl_phy_temp, 2729 "I", "Current External PHY Temperature (Celsius)"); 2730 2731 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2732 "overtemp_occurred", 2733 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2734 ixgbe_sysctl_phy_overtemp_occurred, "I", 2735 "External PHY High Temperature Event Occurred"); 2736 } 2737 2738 if (sc->feat_cap & IXGBE_FEATURE_EEE) { 2739 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2740 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2741 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2742 } 2743 } /* ixgbe_add_device_sysctls */ 2744 2745 /************************************************************************ 2746 * ixgbe_allocate_pci_resources 2747 ************************************************************************/ 2748 static int 2749 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2750 { 2751 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2752 device_t dev = iflib_get_dev(ctx); 2753 int rid; 2754 2755 rid = PCIR_BAR(0); 2756 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2757 RF_ACTIVE); 2758 2759 if (!(sc->pci_mem)) { 2760 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2761 return (ENXIO); 2762 } 2763 2764 /* Save bus_space values for READ/WRITE_REG macros */ 2765 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 2766 sc->osdep.mem_bus_space_handle = 2767 rman_get_bushandle(sc->pci_mem); 2768 /* Set hw values for shared code */ 2769 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 2770 2771 return (0); 2772 } /* ixgbe_allocate_pci_resources */ 2773 2774 /************************************************************************ 2775 * ixgbe_detach - Device removal routine 2776 * 2777 * Called when the driver is being removed. 2778 * Stops the adapter and deallocates all the resources 2779 * that were allocated for driver operation. 2780 * 2781 * return 0 on success, positive on failure 2782 ************************************************************************/ 2783 static int 2784 ixgbe_if_detach(if_ctx_t ctx) 2785 { 2786 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2787 device_t dev = iflib_get_dev(ctx); 2788 u32 ctrl_ext; 2789 2790 INIT_DEBUGOUT("ixgbe_detach: begin"); 2791 2792 if (ixgbe_pci_iov_detach(dev) != 0) { 2793 device_printf(dev, "SR-IOV in use; detach first.\n"); 2794 return (EBUSY); 2795 } 2796 2797 ixgbe_setup_low_power_mode(ctx); 2798 2799 /* let hardware know driver is unloading */ 2800 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 2801 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2802 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 2803 2804 ixgbe_free_pci_resources(ctx); 2805 free(sc->mta, M_IXGBE); 2806 2807 return (0); 2808 } /* ixgbe_if_detach */ 2809 2810 /************************************************************************ 2811 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2812 * 2813 * Prepare the adapter/port for LPLU and/or WoL 2814 ************************************************************************/ 2815 static int 2816 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2817 { 2818 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2819 struct ixgbe_hw *hw = &sc->hw; 2820 device_t dev = iflib_get_dev(ctx); 2821 s32 error = 0; 2822 2823 if (!hw->wol_enabled) 2824 ixgbe_set_phy_power(hw, false); 2825 2826 /* Limit power management flow to X550EM baseT */ 2827 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2828 hw->phy.ops.enter_lplu) { 2829 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2830 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2831 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2832 2833 /* 2834 * Clear Wake Up Status register to prevent any previous wakeup 2835 * events from waking us up immediately after we suspend. 2836 */ 2837 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2838 2839 /* 2840 * Program the Wakeup Filter Control register with user filter 2841 * settings 2842 */ 2843 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 2844 2845 /* Enable wakeups and power management in Wakeup Control */ 2846 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2847 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2848 2849 /* X550EM baseT adapters need a special LPLU flow */ 2850 hw->phy.reset_disable = true; 2851 ixgbe_if_stop(ctx); 2852 error = hw->phy.ops.enter_lplu(hw); 2853 if (error) 2854 device_printf(dev, "Error entering LPLU: %d\n", error); 2855 hw->phy.reset_disable = false; 2856 } else { 2857 /* Just stop for other adapters */ 2858 ixgbe_if_stop(ctx); 2859 } 2860 2861 return error; 2862 } /* ixgbe_setup_low_power_mode */ 2863 2864 /************************************************************************ 2865 * ixgbe_shutdown - Shutdown entry point 2866 ************************************************************************/ 2867 static int 2868 ixgbe_if_shutdown(if_ctx_t ctx) 2869 { 2870 int error = 0; 2871 2872 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2873 2874 error = ixgbe_setup_low_power_mode(ctx); 2875 2876 return (error); 2877 } /* ixgbe_if_shutdown */ 2878 2879 /************************************************************************ 2880 * ixgbe_suspend 2881 * 2882 * From D0 to D3 2883 ************************************************************************/ 2884 static int 2885 ixgbe_if_suspend(if_ctx_t ctx) 2886 { 2887 int error = 0; 2888 2889 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2890 2891 error = ixgbe_setup_low_power_mode(ctx); 2892 2893 return (error); 2894 } /* ixgbe_if_suspend */ 2895 2896 /************************************************************************ 2897 * ixgbe_resume 2898 * 2899 * From D3 to D0 2900 ************************************************************************/ 2901 static int 2902 ixgbe_if_resume(if_ctx_t ctx) 2903 { 2904 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2905 device_t dev = iflib_get_dev(ctx); 2906 struct ifnet *ifp = iflib_get_ifp(ctx); 2907 struct ixgbe_hw *hw = &sc->hw; 2908 u32 wus; 2909 2910 INIT_DEBUGOUT("ixgbe_resume: begin"); 2911 2912 /* Read & clear WUS register */ 2913 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2914 if (wus) 2915 device_printf(dev, "Woken up by (WUS): %#010x\n", 2916 IXGBE_READ_REG(hw, IXGBE_WUS)); 2917 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2918 /* And clear WUFC until next low-power transition */ 2919 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2920 2921 /* 2922 * Required after D3->D0 transition; 2923 * will re-advertise all previous advertised speeds 2924 */ 2925 if (ifp->if_flags & IFF_UP) 2926 ixgbe_if_init(ctx); 2927 2928 return (0); 2929 } /* ixgbe_if_resume */ 2930 2931 /************************************************************************ 2932 * ixgbe_if_mtu_set - Ioctl mtu entry point 2933 * 2934 * Return 0 on success, EINVAL on failure 2935 ************************************************************************/ 2936 static int 2937 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2938 { 2939 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2940 int error = 0; 2941 2942 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2943 2944 if (mtu > IXGBE_MAX_MTU) { 2945 error = EINVAL; 2946 } else { 2947 sc->max_frame_size = mtu + IXGBE_MTU_HDR; 2948 } 2949 2950 return error; 2951 } /* ixgbe_if_mtu_set */ 2952 2953 /************************************************************************ 2954 * ixgbe_if_crcstrip_set 2955 ************************************************************************/ 2956 static void 2957 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2958 { 2959 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2960 struct ixgbe_hw *hw = &sc->hw; 2961 /* crc stripping is set in two places: 2962 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2963 * IXGBE_RDRXCTL (set by the original driver in 2964 * ixgbe_setup_hw_rsc() called in init_locked. 2965 * We disable the setting when netmap is compiled in). 2966 * We update the values here, but also in ixgbe.c because 2967 * init_locked sometimes is called outside our control. 2968 */ 2969 uint32_t hl, rxc; 2970 2971 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2972 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2973 #ifdef NETMAP 2974 if (netmap_verbose) 2975 D("%s read HLREG 0x%x rxc 0x%x", 2976 onoff ? "enter" : "exit", hl, rxc); 2977 #endif 2978 /* hw requirements ... */ 2979 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2980 rxc |= IXGBE_RDRXCTL_RSCACKC; 2981 if (onoff && !crcstrip) { 2982 /* keep the crc. Fast rx */ 2983 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 2984 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 2985 } else { 2986 /* reset default mode */ 2987 hl |= IXGBE_HLREG0_RXCRCSTRP; 2988 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 2989 } 2990 #ifdef NETMAP 2991 if (netmap_verbose) 2992 D("%s write HLREG 0x%x rxc 0x%x", 2993 onoff ? "enter" : "exit", hl, rxc); 2994 #endif 2995 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 2996 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 2997 } /* ixgbe_if_crcstrip_set */ 2998 2999 /********************************************************************* 3000 * ixgbe_if_init - Init entry point 3001 * 3002 * Used in two ways: It is used by the stack as an init 3003 * entry point in network interface structure. It is also 3004 * used by the driver as a hw/sw initialization routine to 3005 * get to a consistent state. 3006 * 3007 * Return 0 on success, positive on failure 3008 **********************************************************************/ 3009 void 3010 ixgbe_if_init(if_ctx_t ctx) 3011 { 3012 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3013 struct ifnet *ifp = iflib_get_ifp(ctx); 3014 device_t dev = iflib_get_dev(ctx); 3015 struct ixgbe_hw *hw = &sc->hw; 3016 struct ix_rx_queue *rx_que; 3017 struct ix_tx_queue *tx_que; 3018 u32 txdctl, mhadd; 3019 u32 rxdctl, rxctrl; 3020 u32 ctrl_ext; 3021 3022 int i, j, err; 3023 3024 INIT_DEBUGOUT("ixgbe_if_init: begin"); 3025 3026 /* Queue indices may change with IOV mode */ 3027 ixgbe_align_all_queue_indices(sc); 3028 3029 /* reprogram the RAR[0] in case user changed it. */ 3030 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 3031 3032 /* Get the latest mac address, User can use a LAA */ 3033 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3034 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 3035 hw->addr_ctrl.rar_used_count = 1; 3036 3037 ixgbe_init_hw(hw); 3038 3039 ixgbe_initialize_iov(sc); 3040 3041 ixgbe_initialize_transmit_units(ctx); 3042 3043 /* Setup Multicast table */ 3044 ixgbe_if_multi_set(ctx); 3045 3046 /* Determine the correct mbuf pool, based on frame size */ 3047 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 3048 3049 /* Configure RX settings */ 3050 ixgbe_initialize_receive_units(ctx); 3051 3052 /* 3053 * Initialize variable holding task enqueue requests 3054 * from MSI-X interrupts 3055 */ 3056 sc->task_requests = 0; 3057 3058 /* Enable SDP & MSI-X interrupts based on adapter */ 3059 ixgbe_config_gpie(sc); 3060 3061 /* Set MTU size */ 3062 if (ifp->if_mtu > ETHERMTU) { 3063 /* aka IXGBE_MAXFRS on 82599 and newer */ 3064 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3065 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3066 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3067 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3068 } 3069 3070 /* Now enable all the queues */ 3071 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 3072 struct tx_ring *txr = &tx_que->txr; 3073 3074 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3075 txdctl |= IXGBE_TXDCTL_ENABLE; 3076 /* Set WTHRESH to 8, burst writeback */ 3077 txdctl |= (8 << 16); 3078 /* 3079 * When the internal queue falls below PTHRESH (32), 3080 * start prefetching as long as there are at least 3081 * HTHRESH (1) buffers ready. The values are taken 3082 * from the Intel linux driver 3.8.21. 3083 * Prefetching enables tx line rate even with 1 queue. 3084 */ 3085 txdctl |= (32 << 0) | (1 << 8); 3086 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 3087 } 3088 3089 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 3090 struct rx_ring *rxr = &rx_que->rxr; 3091 3092 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3093 if (hw->mac.type == ixgbe_mac_82598EB) { 3094 /* 3095 * PTHRESH = 21 3096 * HTHRESH = 4 3097 * WTHRESH = 8 3098 */ 3099 rxdctl &= ~0x3FFFFF; 3100 rxdctl |= 0x080420; 3101 } 3102 rxdctl |= IXGBE_RXDCTL_ENABLE; 3103 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 3104 for (j = 0; j < 10; j++) { 3105 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 3106 IXGBE_RXDCTL_ENABLE) 3107 break; 3108 else 3109 msec_delay(1); 3110 } 3111 wmb(); 3112 } 3113 3114 /* Enable Receive engine */ 3115 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3116 if (hw->mac.type == ixgbe_mac_82598EB) 3117 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3118 rxctrl |= IXGBE_RXCTRL_RXEN; 3119 ixgbe_enable_rx_dma(hw, rxctrl); 3120 3121 /* Set up MSI/MSI-X routing */ 3122 if (ixgbe_enable_msix) { 3123 ixgbe_configure_ivars(sc); 3124 /* Set up auto-mask */ 3125 if (hw->mac.type == ixgbe_mac_82598EB) 3126 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3127 else { 3128 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3129 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3130 } 3131 } else { /* Simple settings for Legacy/MSI */ 3132 ixgbe_set_ivar(sc, 0, 0, 0); 3133 ixgbe_set_ivar(sc, 0, 0, 1); 3134 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3135 } 3136 3137 ixgbe_init_fdir(sc); 3138 3139 /* 3140 * Check on any SFP devices that 3141 * need to be kick-started 3142 */ 3143 if (hw->phy.type == ixgbe_phy_none) { 3144 err = hw->phy.ops.identify(hw); 3145 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3146 device_printf(dev, 3147 "Unsupported SFP+ module type was detected.\n"); 3148 return; 3149 } 3150 } 3151 3152 /* Set moderation on the Link interrupt */ 3153 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); 3154 3155 /* Enable power to the phy. */ 3156 ixgbe_set_phy_power(hw, true); 3157 3158 /* Config/Enable Link */ 3159 ixgbe_config_link(ctx); 3160 3161 /* Hardware Packet Buffer & Flow Control setup */ 3162 ixgbe_config_delay_values(sc); 3163 3164 /* Initialize the FC settings */ 3165 ixgbe_start_hw(hw); 3166 3167 /* Set up VLAN support and filter */ 3168 ixgbe_setup_vlan_hw_support(ctx); 3169 3170 /* Setup DMA Coalescing */ 3171 ixgbe_config_dmac(sc); 3172 3173 /* And now turn on interrupts */ 3174 ixgbe_if_enable_intr(ctx); 3175 3176 /* Enable the use of the MBX by the VF's */ 3177 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 3178 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3179 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3180 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3181 } 3182 3183 } /* ixgbe_init_locked */ 3184 3185 /************************************************************************ 3186 * ixgbe_set_ivar 3187 * 3188 * Setup the correct IVAR register for a particular MSI-X interrupt 3189 * (yes this is all very magic and confusing :) 3190 * - entry is the register array entry 3191 * - vector is the MSI-X vector for this queue 3192 * - type is RX/TX/MISC 3193 ************************************************************************/ 3194 static void 3195 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 3196 { 3197 struct ixgbe_hw *hw = &sc->hw; 3198 u32 ivar, index; 3199 3200 vector |= IXGBE_IVAR_ALLOC_VAL; 3201 3202 switch (hw->mac.type) { 3203 case ixgbe_mac_82598EB: 3204 if (type == -1) 3205 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3206 else 3207 entry += (type * 64); 3208 index = (entry >> 2) & 0x1F; 3209 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3210 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3211 ivar |= (vector << (8 * (entry & 0x3))); 3212 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 3213 break; 3214 case ixgbe_mac_82599EB: 3215 case ixgbe_mac_X540: 3216 case ixgbe_mac_X550: 3217 case ixgbe_mac_X550EM_x: 3218 case ixgbe_mac_X550EM_a: 3219 if (type == -1) { /* MISC IVAR */ 3220 index = (entry & 1) * 8; 3221 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3222 ivar &= ~(0xFF << index); 3223 ivar |= (vector << index); 3224 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3225 } else { /* RX/TX IVARS */ 3226 index = (16 * (entry & 1)) + (8 * type); 3227 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3228 ivar &= ~(0xFF << index); 3229 ivar |= (vector << index); 3230 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3231 } 3232 default: 3233 break; 3234 } 3235 } /* ixgbe_set_ivar */ 3236 3237 /************************************************************************ 3238 * ixgbe_configure_ivars 3239 ************************************************************************/ 3240 static void 3241 ixgbe_configure_ivars(struct ixgbe_softc *sc) 3242 { 3243 struct ix_rx_queue *rx_que = sc->rx_queues; 3244 struct ix_tx_queue *tx_que = sc->tx_queues; 3245 u32 newitr; 3246 3247 if (ixgbe_max_interrupt_rate > 0) 3248 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3249 else { 3250 /* 3251 * Disable DMA coalescing if interrupt moderation is 3252 * disabled. 3253 */ 3254 sc->dmac = 0; 3255 newitr = 0; 3256 } 3257 3258 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 3259 struct rx_ring *rxr = &rx_que->rxr; 3260 3261 /* First the RX queue entry */ 3262 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); 3263 3264 /* Set an Initial EITR value */ 3265 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); 3266 } 3267 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 3268 struct tx_ring *txr = &tx_que->txr; 3269 3270 /* ... and the TX */ 3271 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); 3272 } 3273 /* For the Link interrupt */ 3274 ixgbe_set_ivar(sc, 1, sc->vector, -1); 3275 } /* ixgbe_configure_ivars */ 3276 3277 /************************************************************************ 3278 * ixgbe_config_gpie 3279 ************************************************************************/ 3280 static void 3281 ixgbe_config_gpie(struct ixgbe_softc *sc) 3282 { 3283 struct ixgbe_hw *hw = &sc->hw; 3284 u32 gpie; 3285 3286 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3287 3288 if (sc->intr_type == IFLIB_INTR_MSIX) { 3289 /* Enable Enhanced MSI-X mode */ 3290 gpie |= IXGBE_GPIE_MSIX_MODE 3291 | IXGBE_GPIE_EIAME 3292 | IXGBE_GPIE_PBA_SUPPORT 3293 | IXGBE_GPIE_OCD; 3294 } 3295 3296 /* Fan Failure Interrupt */ 3297 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3298 gpie |= IXGBE_SDP1_GPIEN; 3299 3300 /* Thermal Sensor Interrupt */ 3301 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3302 gpie |= IXGBE_SDP0_GPIEN_X540; 3303 3304 /* Link detection */ 3305 switch (hw->mac.type) { 3306 case ixgbe_mac_82599EB: 3307 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3308 break; 3309 case ixgbe_mac_X550EM_x: 3310 case ixgbe_mac_X550EM_a: 3311 gpie |= IXGBE_SDP0_GPIEN_X540; 3312 break; 3313 default: 3314 break; 3315 } 3316 3317 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3318 3319 } /* ixgbe_config_gpie */ 3320 3321 /************************************************************************ 3322 * ixgbe_config_delay_values 3323 * 3324 * Requires sc->max_frame_size to be set. 3325 ************************************************************************/ 3326 static void 3327 ixgbe_config_delay_values(struct ixgbe_softc *sc) 3328 { 3329 struct ixgbe_hw *hw = &sc->hw; 3330 u32 rxpb, frame, size, tmp; 3331 3332 frame = sc->max_frame_size; 3333 3334 /* Calculate High Water */ 3335 switch (hw->mac.type) { 3336 case ixgbe_mac_X540: 3337 case ixgbe_mac_X550: 3338 case ixgbe_mac_X550EM_x: 3339 case ixgbe_mac_X550EM_a: 3340 tmp = IXGBE_DV_X540(frame, frame); 3341 break; 3342 default: 3343 tmp = IXGBE_DV(frame, frame); 3344 break; 3345 } 3346 size = IXGBE_BT2KB(tmp); 3347 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3348 hw->fc.high_water[0] = rxpb - size; 3349 3350 /* Now calculate Low Water */ 3351 switch (hw->mac.type) { 3352 case ixgbe_mac_X540: 3353 case ixgbe_mac_X550: 3354 case ixgbe_mac_X550EM_x: 3355 case ixgbe_mac_X550EM_a: 3356 tmp = IXGBE_LOW_DV_X540(frame); 3357 break; 3358 default: 3359 tmp = IXGBE_LOW_DV(frame); 3360 break; 3361 } 3362 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3363 3364 hw->fc.pause_time = IXGBE_FC_PAUSE; 3365 hw->fc.send_xon = true; 3366 } /* ixgbe_config_delay_values */ 3367 3368 /************************************************************************ 3369 * ixgbe_set_multi - Multicast Update 3370 * 3371 * Called whenever multicast address list is updated. 3372 ************************************************************************/ 3373 static u_int 3374 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) 3375 { 3376 struct ixgbe_softc *sc = arg; 3377 struct ixgbe_mc_addr *mta = sc->mta; 3378 3379 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 3380 return (0); 3381 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3382 mta[idx].vmdq = sc->pool; 3383 3384 return (1); 3385 } /* ixgbe_mc_filter_apply */ 3386 3387 static void 3388 ixgbe_if_multi_set(if_ctx_t ctx) 3389 { 3390 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3391 struct ixgbe_mc_addr *mta; 3392 struct ifnet *ifp = iflib_get_ifp(ctx); 3393 u8 *update_ptr; 3394 u32 fctrl; 3395 u_int mcnt; 3396 3397 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3398 3399 mta = sc->mta; 3400 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3401 3402 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc); 3403 3404 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3405 update_ptr = (u8 *)mta; 3406 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 3407 ixgbe_mc_array_itr, true); 3408 } 3409 3410 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 3411 3412 if (ifp->if_flags & IFF_PROMISC) 3413 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3414 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3415 ifp->if_flags & IFF_ALLMULTI) { 3416 fctrl |= IXGBE_FCTRL_MPE; 3417 fctrl &= ~IXGBE_FCTRL_UPE; 3418 } else 3419 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3420 3421 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 3422 } /* ixgbe_if_multi_set */ 3423 3424 /************************************************************************ 3425 * ixgbe_mc_array_itr 3426 * 3427 * An iterator function needed by the multicast shared code. 3428 * It feeds the shared code routine the addresses in the 3429 * array of ixgbe_set_multi() one by one. 3430 ************************************************************************/ 3431 static u8 * 3432 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3433 { 3434 struct ixgbe_mc_addr *mta; 3435 3436 mta = (struct ixgbe_mc_addr *)*update_ptr; 3437 *vmdq = mta->vmdq; 3438 3439 *update_ptr = (u8*)(mta + 1); 3440 3441 return (mta->addr); 3442 } /* ixgbe_mc_array_itr */ 3443 3444 /************************************************************************ 3445 * ixgbe_local_timer - Timer routine 3446 * 3447 * Checks for link status, updates statistics, 3448 * and runs the watchdog check. 3449 ************************************************************************/ 3450 static void 3451 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3452 { 3453 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3454 3455 if (qid != 0) 3456 return; 3457 3458 /* Check for pluggable optics */ 3459 if (sc->sfp_probe) 3460 if (!ixgbe_sfp_probe(ctx)) 3461 return; /* Nothing to do */ 3462 3463 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3464 3465 /* Fire off the adminq task */ 3466 iflib_admin_intr_deferred(ctx); 3467 3468 } /* ixgbe_if_timer */ 3469 3470 /************************************************************************ 3471 * ixgbe_sfp_probe 3472 * 3473 * Determine if a port had optics inserted. 3474 ************************************************************************/ 3475 static bool 3476 ixgbe_sfp_probe(if_ctx_t ctx) 3477 { 3478 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3479 struct ixgbe_hw *hw = &sc->hw; 3480 device_t dev = iflib_get_dev(ctx); 3481 bool result = false; 3482 3483 if ((hw->phy.type == ixgbe_phy_nl) && 3484 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3485 s32 ret = hw->phy.ops.identify_sfp(hw); 3486 if (ret) 3487 goto out; 3488 ret = hw->phy.ops.reset(hw); 3489 sc->sfp_probe = false; 3490 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3491 device_printf(dev, "Unsupported SFP+ module detected!"); 3492 device_printf(dev, 3493 "Reload driver with supported module.\n"); 3494 goto out; 3495 } else 3496 device_printf(dev, "SFP+ module detected!\n"); 3497 /* We now have supported optics */ 3498 result = true; 3499 } 3500 out: 3501 3502 return (result); 3503 } /* ixgbe_sfp_probe */ 3504 3505 /************************************************************************ 3506 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3507 ************************************************************************/ 3508 static void 3509 ixgbe_handle_mod(void *context) 3510 { 3511 if_ctx_t ctx = context; 3512 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3513 struct ixgbe_hw *hw = &sc->hw; 3514 device_t dev = iflib_get_dev(ctx); 3515 u32 err, cage_full = 0; 3516 3517 if (sc->hw.need_crosstalk_fix) { 3518 switch (hw->mac.type) { 3519 case ixgbe_mac_82599EB: 3520 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3521 IXGBE_ESDP_SDP2; 3522 break; 3523 case ixgbe_mac_X550EM_x: 3524 case ixgbe_mac_X550EM_a: 3525 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3526 IXGBE_ESDP_SDP0; 3527 break; 3528 default: 3529 break; 3530 } 3531 3532 if (!cage_full) 3533 goto handle_mod_out; 3534 } 3535 3536 err = hw->phy.ops.identify_sfp(hw); 3537 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3538 device_printf(dev, 3539 "Unsupported SFP+ module type was detected.\n"); 3540 goto handle_mod_out; 3541 } 3542 3543 if (hw->mac.type == ixgbe_mac_82598EB) 3544 err = hw->phy.ops.reset(hw); 3545 else 3546 err = hw->mac.ops.setup_sfp(hw); 3547 3548 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3549 device_printf(dev, 3550 "Setup failure - unsupported SFP+ module type.\n"); 3551 goto handle_mod_out; 3552 } 3553 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3554 return; 3555 3556 handle_mod_out: 3557 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3558 } /* ixgbe_handle_mod */ 3559 3560 3561 /************************************************************************ 3562 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3563 ************************************************************************/ 3564 static void 3565 ixgbe_handle_msf(void *context) 3566 { 3567 if_ctx_t ctx = context; 3568 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3569 struct ixgbe_hw *hw = &sc->hw; 3570 u32 autoneg; 3571 bool negotiate; 3572 3573 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3574 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 3575 3576 autoneg = hw->phy.autoneg_advertised; 3577 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3578 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3579 if (hw->mac.ops.setup_link) 3580 hw->mac.ops.setup_link(hw, autoneg, true); 3581 3582 /* Adjust media types shown in ifconfig */ 3583 ifmedia_removeall(sc->media); 3584 ixgbe_add_media_types(sc->ctx); 3585 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 3586 } /* ixgbe_handle_msf */ 3587 3588 /************************************************************************ 3589 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3590 ************************************************************************/ 3591 static void 3592 ixgbe_handle_phy(void *context) 3593 { 3594 if_ctx_t ctx = context; 3595 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3596 struct ixgbe_hw *hw = &sc->hw; 3597 int error; 3598 3599 error = hw->phy.ops.handle_lasi(hw); 3600 if (error == IXGBE_ERR_OVERTEMP) 3601 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3602 else if (error) 3603 device_printf(sc->dev, 3604 "Error handling LASI interrupt: %d\n", error); 3605 } /* ixgbe_handle_phy */ 3606 3607 /************************************************************************ 3608 * ixgbe_if_stop - Stop the hardware 3609 * 3610 * Disables all traffic on the adapter by issuing a 3611 * global reset on the MAC and deallocates TX/RX buffers. 3612 ************************************************************************/ 3613 static void 3614 ixgbe_if_stop(if_ctx_t ctx) 3615 { 3616 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3617 struct ixgbe_hw *hw = &sc->hw; 3618 3619 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3620 3621 ixgbe_reset_hw(hw); 3622 hw->adapter_stopped = false; 3623 ixgbe_stop_adapter(hw); 3624 if (hw->mac.type == ixgbe_mac_82599EB) 3625 ixgbe_stop_mac_link_on_d3_82599(hw); 3626 /* Turn off the laser - noop with no optics */ 3627 ixgbe_disable_tx_laser(hw); 3628 3629 /* Update the stack */ 3630 sc->link_up = false; 3631 ixgbe_if_update_admin_status(ctx); 3632 3633 /* reprogram the RAR[0] in case user changed it. */ 3634 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 3635 3636 return; 3637 } /* ixgbe_if_stop */ 3638 3639 /************************************************************************ 3640 * ixgbe_update_link_status - Update OS on link state 3641 * 3642 * Note: Only updates the OS on the cached link state. 3643 * The real check of the hardware only happens with 3644 * a link interrupt. 3645 ************************************************************************/ 3646 static void 3647 ixgbe_if_update_admin_status(if_ctx_t ctx) 3648 { 3649 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3650 device_t dev = iflib_get_dev(ctx); 3651 3652 if (sc->link_up) { 3653 if (sc->link_active == false) { 3654 if (bootverbose) 3655 device_printf(dev, "Link is up %d Gbps %s \n", 3656 ((sc->link_speed == 128) ? 10 : 1), 3657 "Full Duplex"); 3658 sc->link_active = true; 3659 /* Update any Flow Control changes */ 3660 ixgbe_fc_enable(&sc->hw); 3661 /* Update DMA coalescing config */ 3662 ixgbe_config_dmac(sc); 3663 /* should actually be negotiated value */ 3664 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3665 3666 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3667 ixgbe_ping_all_vfs(sc); 3668 } 3669 } else { /* Link down */ 3670 if (sc->link_active == true) { 3671 if (bootverbose) 3672 device_printf(dev, "Link is Down\n"); 3673 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3674 sc->link_active = false; 3675 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3676 ixgbe_ping_all_vfs(sc); 3677 } 3678 } 3679 3680 /* Handle task requests from msix_link() */ 3681 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) 3682 ixgbe_handle_mod(ctx); 3683 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) 3684 ixgbe_handle_msf(ctx); 3685 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) 3686 ixgbe_handle_mbx(ctx); 3687 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) 3688 ixgbe_reinit_fdir(ctx); 3689 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) 3690 ixgbe_handle_phy(ctx); 3691 sc->task_requests = 0; 3692 3693 ixgbe_update_stats_counters(sc); 3694 } /* ixgbe_if_update_admin_status */ 3695 3696 /************************************************************************ 3697 * ixgbe_config_dmac - Configure DMA Coalescing 3698 ************************************************************************/ 3699 static void 3700 ixgbe_config_dmac(struct ixgbe_softc *sc) 3701 { 3702 struct ixgbe_hw *hw = &sc->hw; 3703 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3704 3705 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3706 return; 3707 3708 if (dcfg->watchdog_timer ^ sc->dmac || 3709 dcfg->link_speed ^ sc->link_speed) { 3710 dcfg->watchdog_timer = sc->dmac; 3711 dcfg->fcoe_en = false; 3712 dcfg->link_speed = sc->link_speed; 3713 dcfg->num_tcs = 1; 3714 3715 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3716 dcfg->watchdog_timer, dcfg->link_speed); 3717 3718 hw->mac.ops.dmac_config(hw); 3719 } 3720 } /* ixgbe_config_dmac */ 3721 3722 /************************************************************************ 3723 * ixgbe_if_enable_intr 3724 ************************************************************************/ 3725 void 3726 ixgbe_if_enable_intr(if_ctx_t ctx) 3727 { 3728 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3729 struct ixgbe_hw *hw = &sc->hw; 3730 struct ix_rx_queue *que = sc->rx_queues; 3731 u32 mask, fwsm; 3732 3733 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3734 3735 switch (sc->hw.mac.type) { 3736 case ixgbe_mac_82599EB: 3737 mask |= IXGBE_EIMS_ECC; 3738 /* Temperature sensor on some scs */ 3739 mask |= IXGBE_EIMS_GPI_SDP0; 3740 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3741 mask |= IXGBE_EIMS_GPI_SDP1; 3742 mask |= IXGBE_EIMS_GPI_SDP2; 3743 break; 3744 case ixgbe_mac_X540: 3745 /* Detect if Thermal Sensor is enabled */ 3746 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3747 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3748 mask |= IXGBE_EIMS_TS; 3749 mask |= IXGBE_EIMS_ECC; 3750 break; 3751 case ixgbe_mac_X550: 3752 /* MAC thermal sensor is automatically enabled */ 3753 mask |= IXGBE_EIMS_TS; 3754 mask |= IXGBE_EIMS_ECC; 3755 break; 3756 case ixgbe_mac_X550EM_x: 3757 case ixgbe_mac_X550EM_a: 3758 /* Some devices use SDP0 for important information */ 3759 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3760 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3761 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3762 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3763 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3764 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3765 mask |= IXGBE_EICR_GPI_SDP0_X540; 3766 mask |= IXGBE_EIMS_ECC; 3767 break; 3768 default: 3769 break; 3770 } 3771 3772 /* Enable Fan Failure detection */ 3773 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3774 mask |= IXGBE_EIMS_GPI_SDP1; 3775 /* Enable SR-IOV */ 3776 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3777 mask |= IXGBE_EIMS_MAILBOX; 3778 /* Enable Flow Director */ 3779 if (sc->feat_en & IXGBE_FEATURE_FDIR) 3780 mask |= IXGBE_EIMS_FLOW_DIR; 3781 3782 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3783 3784 /* With MSI-X we use auto clear */ 3785 if (sc->intr_type == IFLIB_INTR_MSIX) { 3786 mask = IXGBE_EIMS_ENABLE_MASK; 3787 /* Don't autoclear Link */ 3788 mask &= ~IXGBE_EIMS_OTHER; 3789 mask &= ~IXGBE_EIMS_LSC; 3790 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 3791 mask &= ~IXGBE_EIMS_MAILBOX; 3792 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3793 } 3794 3795 /* 3796 * Now enable all queues, this is done separately to 3797 * allow for handling the extended (beyond 32) MSI-X 3798 * vectors that can be used by 82599 3799 */ 3800 for (int i = 0; i < sc->num_rx_queues; i++, que++) 3801 ixgbe_enable_queue(sc, que->msix); 3802 3803 IXGBE_WRITE_FLUSH(hw); 3804 3805 } /* ixgbe_if_enable_intr */ 3806 3807 /************************************************************************ 3808 * ixgbe_disable_intr 3809 ************************************************************************/ 3810 static void 3811 ixgbe_if_disable_intr(if_ctx_t ctx) 3812 { 3813 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3814 3815 if (sc->intr_type == IFLIB_INTR_MSIX) 3816 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 3817 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 3818 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 3819 } else { 3820 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 3821 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 3822 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 3823 } 3824 IXGBE_WRITE_FLUSH(&sc->hw); 3825 3826 } /* ixgbe_if_disable_intr */ 3827 3828 /************************************************************************ 3829 * ixgbe_link_intr_enable 3830 ************************************************************************/ 3831 static void 3832 ixgbe_link_intr_enable(if_ctx_t ctx) 3833 { 3834 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; 3835 3836 /* Re-enable other interrupts */ 3837 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3838 } /* ixgbe_link_intr_enable */ 3839 3840 /************************************************************************ 3841 * ixgbe_if_rx_queue_intr_enable 3842 ************************************************************************/ 3843 static int 3844 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3845 { 3846 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3847 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 3848 3849 ixgbe_enable_queue(sc, que->msix); 3850 3851 return (0); 3852 } /* ixgbe_if_rx_queue_intr_enable */ 3853 3854 /************************************************************************ 3855 * ixgbe_enable_queue 3856 ************************************************************************/ 3857 static void 3858 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 3859 { 3860 struct ixgbe_hw *hw = &sc->hw; 3861 u64 queue = 1ULL << vector; 3862 u32 mask; 3863 3864 if (hw->mac.type == ixgbe_mac_82598EB) { 3865 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3866 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3867 } else { 3868 mask = (queue & 0xFFFFFFFF); 3869 if (mask) 3870 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3871 mask = (queue >> 32); 3872 if (mask) 3873 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3874 } 3875 } /* ixgbe_enable_queue */ 3876 3877 /************************************************************************ 3878 * ixgbe_disable_queue 3879 ************************************************************************/ 3880 static void 3881 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 3882 { 3883 struct ixgbe_hw *hw = &sc->hw; 3884 u64 queue = 1ULL << vector; 3885 u32 mask; 3886 3887 if (hw->mac.type == ixgbe_mac_82598EB) { 3888 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3889 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3890 } else { 3891 mask = (queue & 0xFFFFFFFF); 3892 if (mask) 3893 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3894 mask = (queue >> 32); 3895 if (mask) 3896 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3897 } 3898 } /* ixgbe_disable_queue */ 3899 3900 /************************************************************************ 3901 * ixgbe_intr - Legacy Interrupt Service Routine 3902 ************************************************************************/ 3903 int 3904 ixgbe_intr(void *arg) 3905 { 3906 struct ixgbe_softc *sc = arg; 3907 struct ix_rx_queue *que = sc->rx_queues; 3908 struct ixgbe_hw *hw = &sc->hw; 3909 if_ctx_t ctx = sc->ctx; 3910 u32 eicr, eicr_mask; 3911 3912 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3913 3914 ++que->irqs; 3915 if (eicr == 0) { 3916 ixgbe_if_enable_intr(ctx); 3917 return (FILTER_HANDLED); 3918 } 3919 3920 /* Check for fan failure */ 3921 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3922 (eicr & IXGBE_EICR_GPI_SDP1)) { 3923 device_printf(sc->dev, 3924 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3925 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3926 } 3927 3928 /* Link status change */ 3929 if (eicr & IXGBE_EICR_LSC) { 3930 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3931 iflib_admin_intr_deferred(ctx); 3932 } 3933 3934 if (ixgbe_is_sfp(hw)) { 3935 /* Pluggable optics-related interrupt */ 3936 if (hw->mac.type >= ixgbe_mac_X540) 3937 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3938 else 3939 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3940 3941 if (eicr & eicr_mask) { 3942 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3943 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 3944 } 3945 3946 if ((hw->mac.type == ixgbe_mac_82599EB) && 3947 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3948 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3949 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3950 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3951 } 3952 } 3953 3954 /* External PHY interrupt */ 3955 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3956 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3957 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 3958 3959 return (FILTER_SCHEDULE_THREAD); 3960 } /* ixgbe_intr */ 3961 3962 /************************************************************************ 3963 * ixgbe_free_pci_resources 3964 ************************************************************************/ 3965 static void 3966 ixgbe_free_pci_resources(if_ctx_t ctx) 3967 { 3968 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3969 struct ix_rx_queue *que = sc->rx_queues; 3970 device_t dev = iflib_get_dev(ctx); 3971 3972 /* Release all MSI-X queue resources */ 3973 if (sc->intr_type == IFLIB_INTR_MSIX) 3974 iflib_irq_free(ctx, &sc->irq); 3975 3976 if (que != NULL) { 3977 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 3978 iflib_irq_free(ctx, &que->que_irq); 3979 } 3980 } 3981 3982 if (sc->pci_mem != NULL) 3983 bus_release_resource(dev, SYS_RES_MEMORY, 3984 rman_get_rid(sc->pci_mem), sc->pci_mem); 3985 } /* ixgbe_free_pci_resources */ 3986 3987 /************************************************************************ 3988 * ixgbe_sysctl_flowcntl 3989 * 3990 * SYSCTL wrapper around setting Flow Control 3991 ************************************************************************/ 3992 static int 3993 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 3994 { 3995 struct ixgbe_softc *sc; 3996 int error, fc; 3997 3998 sc = (struct ixgbe_softc *)arg1; 3999 fc = sc->hw.fc.current_mode; 4000 4001 error = sysctl_handle_int(oidp, &fc, 0, req); 4002 if ((error) || (req->newptr == NULL)) 4003 return (error); 4004 4005 /* Don't bother if it's not changed */ 4006 if (fc == sc->hw.fc.current_mode) 4007 return (0); 4008 4009 return ixgbe_set_flowcntl(sc, fc); 4010 } /* ixgbe_sysctl_flowcntl */ 4011 4012 /************************************************************************ 4013 * ixgbe_set_flowcntl - Set flow control 4014 * 4015 * Flow control values: 4016 * 0 - off 4017 * 1 - rx pause 4018 * 2 - tx pause 4019 * 3 - full 4020 ************************************************************************/ 4021 static int 4022 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 4023 { 4024 switch (fc) { 4025 case ixgbe_fc_rx_pause: 4026 case ixgbe_fc_tx_pause: 4027 case ixgbe_fc_full: 4028 sc->hw.fc.requested_mode = fc; 4029 if (sc->num_rx_queues > 1) 4030 ixgbe_disable_rx_drop(sc); 4031 break; 4032 case ixgbe_fc_none: 4033 sc->hw.fc.requested_mode = ixgbe_fc_none; 4034 if (sc->num_rx_queues > 1) 4035 ixgbe_enable_rx_drop(sc); 4036 break; 4037 default: 4038 return (EINVAL); 4039 } 4040 4041 /* Don't autoneg if forcing a value */ 4042 sc->hw.fc.disable_fc_autoneg = true; 4043 ixgbe_fc_enable(&sc->hw); 4044 4045 return (0); 4046 } /* ixgbe_set_flowcntl */ 4047 4048 /************************************************************************ 4049 * ixgbe_enable_rx_drop 4050 * 4051 * Enable the hardware to drop packets when the buffer is 4052 * full. This is useful with multiqueue, so that no single 4053 * queue being full stalls the entire RX engine. We only 4054 * enable this when Multiqueue is enabled AND Flow Control 4055 * is disabled. 4056 ************************************************************************/ 4057 static void 4058 ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 4059 { 4060 struct ixgbe_hw *hw = &sc->hw; 4061 struct rx_ring *rxr; 4062 u32 srrctl; 4063 4064 for (int i = 0; i < sc->num_rx_queues; i++) { 4065 rxr = &sc->rx_queues[i].rxr; 4066 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4067 srrctl |= IXGBE_SRRCTL_DROP_EN; 4068 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4069 } 4070 4071 /* enable drop for each vf */ 4072 for (int i = 0; i < sc->num_vfs; i++) { 4073 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4074 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4075 IXGBE_QDE_ENABLE)); 4076 } 4077 } /* ixgbe_enable_rx_drop */ 4078 4079 /************************************************************************ 4080 * ixgbe_disable_rx_drop 4081 ************************************************************************/ 4082 static void 4083 ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 4084 { 4085 struct ixgbe_hw *hw = &sc->hw; 4086 struct rx_ring *rxr; 4087 u32 srrctl; 4088 4089 for (int i = 0; i < sc->num_rx_queues; i++) { 4090 rxr = &sc->rx_queues[i].rxr; 4091 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4092 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4093 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4094 } 4095 4096 /* disable drop for each vf */ 4097 for (int i = 0; i < sc->num_vfs; i++) { 4098 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4099 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4100 } 4101 } /* ixgbe_disable_rx_drop */ 4102 4103 /************************************************************************ 4104 * ixgbe_sysctl_advertise 4105 * 4106 * SYSCTL wrapper around setting advertised speed 4107 ************************************************************************/ 4108 static int 4109 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4110 { 4111 struct ixgbe_softc *sc; 4112 int error, advertise; 4113 4114 sc = (struct ixgbe_softc *)arg1; 4115 advertise = sc->advertise; 4116 4117 error = sysctl_handle_int(oidp, &advertise, 0, req); 4118 if ((error) || (req->newptr == NULL)) 4119 return (error); 4120 4121 return ixgbe_set_advertise(sc, advertise); 4122 } /* ixgbe_sysctl_advertise */ 4123 4124 /************************************************************************ 4125 * ixgbe_set_advertise - Control advertised link speed 4126 * 4127 * Flags: 4128 * 0x1 - advertise 100 Mb 4129 * 0x2 - advertise 1G 4130 * 0x4 - advertise 10G 4131 * 0x8 - advertise 10 Mb (yes, Mb) 4132 * 0x10 - advertise 2.5G (disabled by default) 4133 * 0x20 - advertise 5G (disabled by default) 4134 * 4135 ************************************************************************/ 4136 static int 4137 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 4138 { 4139 device_t dev = iflib_get_dev(sc->ctx); 4140 struct ixgbe_hw *hw; 4141 ixgbe_link_speed speed = 0; 4142 ixgbe_link_speed link_caps = 0; 4143 s32 err = IXGBE_NOT_IMPLEMENTED; 4144 bool negotiate = false; 4145 4146 /* Checks to validate new value */ 4147 if (sc->advertise == advertise) /* no change */ 4148 return (0); 4149 4150 hw = &sc->hw; 4151 4152 /* No speed changes for backplane media */ 4153 if (hw->phy.media_type == ixgbe_media_type_backplane) 4154 return (ENODEV); 4155 4156 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4157 (hw->phy.multispeed_fiber))) { 4158 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4159 return (EINVAL); 4160 } 4161 4162 if (advertise < 0x1 || advertise > 0x3F) { 4163 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n"); 4164 return (EINVAL); 4165 } 4166 4167 if (hw->mac.ops.get_link_capabilities) { 4168 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4169 &negotiate); 4170 if (err != IXGBE_SUCCESS) { 4171 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4172 return (ENODEV); 4173 } 4174 } 4175 4176 /* Set new value and report new advertised mode */ 4177 if (advertise & 0x1) { 4178 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4179 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4180 return (EINVAL); 4181 } 4182 speed |= IXGBE_LINK_SPEED_100_FULL; 4183 } 4184 if (advertise & 0x2) { 4185 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4186 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4187 return (EINVAL); 4188 } 4189 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4190 } 4191 if (advertise & 0x4) { 4192 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4193 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4194 return (EINVAL); 4195 } 4196 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4197 } 4198 if (advertise & 0x8) { 4199 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4200 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4201 return (EINVAL); 4202 } 4203 speed |= IXGBE_LINK_SPEED_10_FULL; 4204 } 4205 if (advertise & 0x10) { 4206 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 4207 device_printf(dev, "Interface does not support 2.5G advertised speed\n"); 4208 return (EINVAL); 4209 } 4210 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 4211 } 4212 if (advertise & 0x20) { 4213 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 4214 device_printf(dev, "Interface does not support 5G advertised speed\n"); 4215 return (EINVAL); 4216 } 4217 speed |= IXGBE_LINK_SPEED_5GB_FULL; 4218 } 4219 4220 hw->mac.autotry_restart = true; 4221 hw->mac.ops.setup_link(hw, speed, true); 4222 sc->advertise = advertise; 4223 4224 return (0); 4225 } /* ixgbe_set_advertise */ 4226 4227 /************************************************************************ 4228 * ixgbe_get_default_advertise - Get default advertised speed settings 4229 * 4230 * Formatted for sysctl usage. 4231 * Flags: 4232 * 0x1 - advertise 100 Mb 4233 * 0x2 - advertise 1G 4234 * 0x4 - advertise 10G 4235 * 0x8 - advertise 10 Mb (yes, Mb) 4236 * 0x10 - advertise 2.5G (disabled by default) 4237 * 0x20 - advertise 5G (disabled by default) 4238 ************************************************************************/ 4239 static int 4240 ixgbe_get_default_advertise(struct ixgbe_softc *sc) 4241 { 4242 struct ixgbe_hw *hw = &sc->hw; 4243 int speed; 4244 ixgbe_link_speed link_caps = 0; 4245 s32 err; 4246 bool negotiate = false; 4247 4248 /* 4249 * Advertised speed means nothing unless it's copper or 4250 * multi-speed fiber 4251 */ 4252 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4253 !(hw->phy.multispeed_fiber)) 4254 return (0); 4255 4256 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4257 if (err != IXGBE_SUCCESS) 4258 return (0); 4259 4260 if (hw->mac.type == ixgbe_mac_X550) { 4261 /* 4262 * 2.5G and 5G autonegotiation speeds on X550 4263 * are disabled by default due to reported 4264 * interoperability issues with some switches. 4265 */ 4266 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 4267 IXGBE_LINK_SPEED_5GB_FULL); 4268 } 4269 4270 speed = 4271 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 4272 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 4273 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 4274 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 4275 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 4276 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 4277 4278 return speed; 4279 } /* ixgbe_get_default_advertise */ 4280 4281 /************************************************************************ 4282 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4283 * 4284 * Control values: 4285 * 0/1 - off / on (use default value of 1000) 4286 * 4287 * Legal timer values are: 4288 * 50,100,250,500,1000,2000,5000,10000 4289 * 4290 * Turning off interrupt moderation will also turn this off. 4291 ************************************************************************/ 4292 static int 4293 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4294 { 4295 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4296 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4297 int error; 4298 u16 newval; 4299 4300 newval = sc->dmac; 4301 error = sysctl_handle_16(oidp, &newval, 0, req); 4302 if ((error) || (req->newptr == NULL)) 4303 return (error); 4304 4305 switch (newval) { 4306 case 0: 4307 /* Disabled */ 4308 sc->dmac = 0; 4309 break; 4310 case 1: 4311 /* Enable and use default */ 4312 sc->dmac = 1000; 4313 break; 4314 case 50: 4315 case 100: 4316 case 250: 4317 case 500: 4318 case 1000: 4319 case 2000: 4320 case 5000: 4321 case 10000: 4322 /* Legal values - allow */ 4323 sc->dmac = newval; 4324 break; 4325 default: 4326 /* Do nothing, illegal value */ 4327 return (EINVAL); 4328 } 4329 4330 /* Re-initialize hardware if it's already running */ 4331 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4332 ifp->if_init(ifp); 4333 4334 return (0); 4335 } /* ixgbe_sysctl_dmac */ 4336 4337 #ifdef IXGBE_DEBUG 4338 /************************************************************************ 4339 * ixgbe_sysctl_power_state 4340 * 4341 * Sysctl to test power states 4342 * Values: 4343 * 0 - set device to D0 4344 * 3 - set device to D3 4345 * (none) - get current device power state 4346 ************************************************************************/ 4347 static int 4348 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4349 { 4350 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4351 device_t dev = sc->dev; 4352 int curr_ps, new_ps, error = 0; 4353 4354 curr_ps = new_ps = pci_get_powerstate(dev); 4355 4356 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4357 if ((error) || (req->newptr == NULL)) 4358 return (error); 4359 4360 if (new_ps == curr_ps) 4361 return (0); 4362 4363 if (new_ps == 3 && curr_ps == 0) 4364 error = DEVICE_SUSPEND(dev); 4365 else if (new_ps == 0 && curr_ps == 3) 4366 error = DEVICE_RESUME(dev); 4367 else 4368 return (EINVAL); 4369 4370 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4371 4372 return (error); 4373 } /* ixgbe_sysctl_power_state */ 4374 #endif 4375 4376 /************************************************************************ 4377 * ixgbe_sysctl_wol_enable 4378 * 4379 * Sysctl to enable/disable the WoL capability, 4380 * if supported by the adapter. 4381 * 4382 * Values: 4383 * 0 - disabled 4384 * 1 - enabled 4385 ************************************************************************/ 4386 static int 4387 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4388 { 4389 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4390 struct ixgbe_hw *hw = &sc->hw; 4391 int new_wol_enabled; 4392 int error = 0; 4393 4394 new_wol_enabled = hw->wol_enabled; 4395 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4396 if ((error) || (req->newptr == NULL)) 4397 return (error); 4398 new_wol_enabled = !!(new_wol_enabled); 4399 if (new_wol_enabled == hw->wol_enabled) 4400 return (0); 4401 4402 if (new_wol_enabled > 0 && !sc->wol_support) 4403 return (ENODEV); 4404 else 4405 hw->wol_enabled = new_wol_enabled; 4406 4407 return (0); 4408 } /* ixgbe_sysctl_wol_enable */ 4409 4410 /************************************************************************ 4411 * ixgbe_sysctl_wufc - Wake Up Filter Control 4412 * 4413 * Sysctl to enable/disable the types of packets that the 4414 * adapter will wake up on upon receipt. 4415 * Flags: 4416 * 0x1 - Link Status Change 4417 * 0x2 - Magic Packet 4418 * 0x4 - Direct Exact 4419 * 0x8 - Directed Multicast 4420 * 0x10 - Broadcast 4421 * 0x20 - ARP/IPv4 Request Packet 4422 * 0x40 - Direct IPv4 Packet 4423 * 0x80 - Direct IPv6 Packet 4424 * 4425 * Settings not listed above will cause the sysctl to return an error. 4426 ************************************************************************/ 4427 static int 4428 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4429 { 4430 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4431 int error = 0; 4432 u32 new_wufc; 4433 4434 new_wufc = sc->wufc; 4435 4436 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4437 if ((error) || (req->newptr == NULL)) 4438 return (error); 4439 if (new_wufc == sc->wufc) 4440 return (0); 4441 4442 if (new_wufc & 0xffffff00) 4443 return (EINVAL); 4444 4445 new_wufc &= 0xff; 4446 new_wufc |= (0xffffff & sc->wufc); 4447 sc->wufc = new_wufc; 4448 4449 return (0); 4450 } /* ixgbe_sysctl_wufc */ 4451 4452 #ifdef IXGBE_DEBUG 4453 /************************************************************************ 4454 * ixgbe_sysctl_print_rss_config 4455 ************************************************************************/ 4456 static int 4457 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4458 { 4459 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4460 struct ixgbe_hw *hw = &sc->hw; 4461 device_t dev = sc->dev; 4462 struct sbuf *buf; 4463 int error = 0, reta_size; 4464 u32 reg; 4465 4466 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4467 if (!buf) { 4468 device_printf(dev, "Could not allocate sbuf for output.\n"); 4469 return (ENOMEM); 4470 } 4471 4472 // TODO: use sbufs to make a string to print out 4473 /* Set multiplier for RETA setup and table size based on MAC */ 4474 switch (sc->hw.mac.type) { 4475 case ixgbe_mac_X550: 4476 case ixgbe_mac_X550EM_x: 4477 case ixgbe_mac_X550EM_a: 4478 reta_size = 128; 4479 break; 4480 default: 4481 reta_size = 32; 4482 break; 4483 } 4484 4485 /* Print out the redirection table */ 4486 sbuf_cat(buf, "\n"); 4487 for (int i = 0; i < reta_size; i++) { 4488 if (i < 32) { 4489 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4490 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4491 } else { 4492 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4493 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4494 } 4495 } 4496 4497 // TODO: print more config 4498 4499 error = sbuf_finish(buf); 4500 if (error) 4501 device_printf(dev, "Error finishing sbuf: %d\n", error); 4502 4503 sbuf_delete(buf); 4504 4505 return (0); 4506 } /* ixgbe_sysctl_print_rss_config */ 4507 #endif /* IXGBE_DEBUG */ 4508 4509 /************************************************************************ 4510 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4511 * 4512 * For X552/X557-AT devices using an external PHY 4513 ************************************************************************/ 4514 static int 4515 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4516 { 4517 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4518 struct ixgbe_hw *hw = &sc->hw; 4519 u16 reg; 4520 4521 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4522 device_printf(iflib_get_dev(sc->ctx), 4523 "Device has no supported external thermal sensor.\n"); 4524 return (ENODEV); 4525 } 4526 4527 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4528 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4529 device_printf(iflib_get_dev(sc->ctx), 4530 "Error reading from PHY's current temperature register\n"); 4531 return (EAGAIN); 4532 } 4533 4534 /* Shift temp for output */ 4535 reg = reg >> 8; 4536 4537 return (sysctl_handle_16(oidp, NULL, reg, req)); 4538 } /* ixgbe_sysctl_phy_temp */ 4539 4540 /************************************************************************ 4541 * ixgbe_sysctl_phy_overtemp_occurred 4542 * 4543 * Reports (directly from the PHY) whether the current PHY 4544 * temperature is over the overtemp threshold. 4545 ************************************************************************/ 4546 static int 4547 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4548 { 4549 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4550 struct ixgbe_hw *hw = &sc->hw; 4551 u16 reg; 4552 4553 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4554 device_printf(iflib_get_dev(sc->ctx), 4555 "Device has no supported external thermal sensor.\n"); 4556 return (ENODEV); 4557 } 4558 4559 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4560 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4561 device_printf(iflib_get_dev(sc->ctx), 4562 "Error reading from PHY's temperature status register\n"); 4563 return (EAGAIN); 4564 } 4565 4566 /* Get occurrence bit */ 4567 reg = !!(reg & 0x4000); 4568 4569 return (sysctl_handle_16(oidp, 0, reg, req)); 4570 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4571 4572 /************************************************************************ 4573 * ixgbe_sysctl_eee_state 4574 * 4575 * Sysctl to set EEE power saving feature 4576 * Values: 4577 * 0 - disable EEE 4578 * 1 - enable EEE 4579 * (none) - get current device EEE state 4580 ************************************************************************/ 4581 static int 4582 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4583 { 4584 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4585 device_t dev = sc->dev; 4586 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4587 int curr_eee, new_eee, error = 0; 4588 s32 retval; 4589 4590 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 4591 4592 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4593 if ((error) || (req->newptr == NULL)) 4594 return (error); 4595 4596 /* Nothing to do */ 4597 if (new_eee == curr_eee) 4598 return (0); 4599 4600 /* Not supported */ 4601 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 4602 return (EINVAL); 4603 4604 /* Bounds checking */ 4605 if ((new_eee < 0) || (new_eee > 1)) 4606 return (EINVAL); 4607 4608 retval = ixgbe_setup_eee(&sc->hw, new_eee); 4609 if (retval) { 4610 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4611 return (EINVAL); 4612 } 4613 4614 /* Restart auto-neg */ 4615 ifp->if_init(ifp); 4616 4617 device_printf(dev, "New EEE state: %d\n", new_eee); 4618 4619 /* Cache new value */ 4620 if (new_eee) 4621 sc->feat_en |= IXGBE_FEATURE_EEE; 4622 else 4623 sc->feat_en &= ~IXGBE_FEATURE_EEE; 4624 4625 return (error); 4626 } /* ixgbe_sysctl_eee_state */ 4627 4628 /************************************************************************ 4629 * ixgbe_init_device_features 4630 ************************************************************************/ 4631 static void 4632 ixgbe_init_device_features(struct ixgbe_softc *sc) 4633 { 4634 sc->feat_cap = IXGBE_FEATURE_NETMAP 4635 | IXGBE_FEATURE_RSS 4636 | IXGBE_FEATURE_MSI 4637 | IXGBE_FEATURE_MSIX 4638 | IXGBE_FEATURE_LEGACY_IRQ; 4639 4640 /* Set capabilities first... */ 4641 switch (sc->hw.mac.type) { 4642 case ixgbe_mac_82598EB: 4643 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 4644 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4645 break; 4646 case ixgbe_mac_X540: 4647 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4648 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4649 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4650 (sc->hw.bus.func == 0)) 4651 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4652 break; 4653 case ixgbe_mac_X550: 4654 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4655 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4656 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4657 break; 4658 case ixgbe_mac_X550EM_x: 4659 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4660 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4661 break; 4662 case ixgbe_mac_X550EM_a: 4663 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4664 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4665 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4666 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4667 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4668 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4669 sc->feat_cap |= IXGBE_FEATURE_EEE; 4670 } 4671 break; 4672 case ixgbe_mac_82599EB: 4673 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4674 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4675 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4676 (sc->hw.bus.func == 0)) 4677 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4678 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4679 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4680 break; 4681 default: 4682 break; 4683 } 4684 4685 /* Enabled by default... */ 4686 /* Fan failure detection */ 4687 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4688 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4689 /* Netmap */ 4690 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 4691 sc->feat_en |= IXGBE_FEATURE_NETMAP; 4692 /* EEE */ 4693 if (sc->feat_cap & IXGBE_FEATURE_EEE) 4694 sc->feat_en |= IXGBE_FEATURE_EEE; 4695 /* Thermal Sensor */ 4696 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4697 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4698 4699 /* Enabled via global sysctl... */ 4700 /* Flow Director */ 4701 if (ixgbe_enable_fdir) { 4702 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 4703 sc->feat_en |= IXGBE_FEATURE_FDIR; 4704 else 4705 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled."); 4706 } 4707 /* 4708 * Message Signal Interrupts - Extended (MSI-X) 4709 * Normal MSI is only enabled if MSI-X calls fail. 4710 */ 4711 if (!ixgbe_enable_msix) 4712 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 4713 /* Receive-Side Scaling (RSS) */ 4714 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4715 sc->feat_en |= IXGBE_FEATURE_RSS; 4716 4717 /* Disable features with unmet dependencies... */ 4718 /* No MSI-X */ 4719 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 4720 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 4721 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4722 sc->feat_en &= ~IXGBE_FEATURE_RSS; 4723 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 4724 } 4725 } /* ixgbe_init_device_features */ 4726 4727 /************************************************************************ 4728 * ixgbe_check_fan_failure 4729 ************************************************************************/ 4730 static void 4731 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 4732 { 4733 u32 mask; 4734 4735 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 4736 IXGBE_ESDP_SDP1; 4737 4738 if (reg & mask) 4739 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4740 } /* ixgbe_check_fan_failure */ 4741 4742 /************************************************************************ 4743 * ixgbe_sbuf_fw_version 4744 ************************************************************************/ 4745 static void 4746 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) 4747 { 4748 struct ixgbe_nvm_version nvm_ver = {0}; 4749 uint16_t phyfw = 0; 4750 int status; 4751 const char *space = ""; 4752 4753 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ 4754 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ 4755 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ 4756 status = ixgbe_get_phy_firmware_version(hw, &phyfw); 4757 4758 if (nvm_ver.oem_valid) { 4759 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, 4760 nvm_ver.oem_minor, nvm_ver.oem_release); 4761 space = " "; 4762 } 4763 4764 if (nvm_ver.or_valid) { 4765 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 4766 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); 4767 space = " "; 4768 } 4769 4770 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | 4771 NVM_VER_INVALID)) { 4772 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); 4773 space = " "; 4774 } 4775 4776 if (phyfw != 0 && status == IXGBE_SUCCESS) 4777 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); 4778 } /* ixgbe_sbuf_fw_version */ 4779 4780 /************************************************************************ 4781 * ixgbe_print_fw_version 4782 ************************************************************************/ 4783 static void 4784 ixgbe_print_fw_version(if_ctx_t ctx) 4785 { 4786 struct ixgbe_softc *sc = iflib_get_softc(ctx); 4787 struct ixgbe_hw *hw = &sc->hw; 4788 device_t dev = sc->dev; 4789 struct sbuf *buf; 4790 int error = 0; 4791 4792 buf = sbuf_new_auto(); 4793 if (!buf) { 4794 device_printf(dev, "Could not allocate sbuf for output.\n"); 4795 return; 4796 } 4797 4798 ixgbe_sbuf_fw_version(hw, buf); 4799 4800 error = sbuf_finish(buf); 4801 if (error) 4802 device_printf(dev, "Error finishing sbuf: %d\n", error); 4803 else if (sbuf_len(buf)) 4804 device_printf(dev, "%s\n", sbuf_data(buf)); 4805 4806 sbuf_delete(buf); 4807 } /* ixgbe_print_fw_version */ 4808 4809 /************************************************************************ 4810 * ixgbe_sysctl_print_fw_version 4811 ************************************************************************/ 4812 static int 4813 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 4814 { 4815 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4816 struct ixgbe_hw *hw = &sc->hw; 4817 device_t dev = sc->dev; 4818 struct sbuf *buf; 4819 int error = 0; 4820 4821 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4822 if (!buf) { 4823 device_printf(dev, "Could not allocate sbuf for output.\n"); 4824 return (ENOMEM); 4825 } 4826 4827 ixgbe_sbuf_fw_version(hw, buf); 4828 4829 error = sbuf_finish(buf); 4830 if (error) 4831 device_printf(dev, "Error finishing sbuf: %d\n", error); 4832 4833 sbuf_delete(buf); 4834 4835 return (0); 4836 } /* ixgbe_sysctl_print_fw_version */ 4837