1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_rss.h" 38 39 #include "ixgbe.h" 40 #include "ixgbe_sriov.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixgbe_driver_version[] = "4.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixgbe_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixgbe_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), 106 /* required last entry */ 107 PVID_END 108 }; 109 110 static void *ixgbe_register(device_t); 111 static int ixgbe_if_attach_pre(if_ctx_t); 112 static int ixgbe_if_attach_post(if_ctx_t); 113 static int ixgbe_if_detach(if_ctx_t); 114 static int ixgbe_if_shutdown(if_ctx_t); 115 static int ixgbe_if_suspend(if_ctx_t); 116 static int ixgbe_if_resume(if_ctx_t); 117 118 static void ixgbe_if_stop(if_ctx_t); 119 void ixgbe_if_enable_intr(if_ctx_t); 120 static void ixgbe_if_disable_intr(if_ctx_t); 121 static void ixgbe_link_intr_enable(if_ctx_t); 122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); 124 static int ixgbe_if_media_change(if_ctx_t); 125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); 127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); 128 static void ixgbe_if_multi_set(if_ctx_t); 129 static int ixgbe_if_promisc_set(if_ctx_t, int); 130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 132 static void ixgbe_if_queues_free(if_ctx_t); 133 static void ixgbe_if_timer(if_ctx_t, uint16_t); 134 static void ixgbe_if_update_admin_status(if_ctx_t); 135 static void ixgbe_if_vlan_register(if_ctx_t, u16); 136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16); 137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); 138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 139 int ixgbe_intr(void *); 140 141 /************************************************************************ 142 * Function prototypes 143 ************************************************************************/ 144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 145 146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32); 147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32); 148 static void ixgbe_add_device_sysctls(if_ctx_t); 149 static int ixgbe_allocate_pci_resources(if_ctx_t); 150 static int ixgbe_setup_low_power_mode(if_ctx_t); 151 152 static void ixgbe_config_dmac(struct ixgbe_softc *); 153 static void ixgbe_configure_ivars(struct ixgbe_softc *); 154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 156 static bool ixgbe_sfp_probe(if_ctx_t); 157 158 static void ixgbe_free_pci_resources(if_ctx_t); 159 160 static int ixgbe_msix_link(void *); 161 static int ixgbe_msix_que(void *); 162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 163 static void ixgbe_initialize_receive_units(if_ctx_t); 164 static void ixgbe_initialize_transmit_units(if_ctx_t); 165 166 static int ixgbe_setup_interface(if_ctx_t); 167 static void ixgbe_init_device_features(struct ixgbe_softc *); 168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); 170 static void ixgbe_print_fw_version(if_ctx_t); 171 static void ixgbe_add_media_types(if_ctx_t); 172 static void ixgbe_update_stats_counters(struct ixgbe_softc *); 173 static void ixgbe_config_link(if_ctx_t); 174 static void ixgbe_get_slot_info(struct ixgbe_softc *); 175 static void ixgbe_check_wol_support(struct ixgbe_softc *); 176 static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 177 static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 178 179 static void ixgbe_add_hw_stats(struct ixgbe_softc *); 180 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 181 static int ixgbe_set_advertise(struct ixgbe_softc *, int); 182 static int ixgbe_get_default_advertise(struct ixgbe_softc *); 183 static void ixgbe_setup_vlan_hw_support(if_ctx_t); 184 static void ixgbe_config_gpie(struct ixgbe_softc *); 185 static void ixgbe_config_delay_values(struct ixgbe_softc *); 186 187 /* Sysctl handlers */ 188 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 189 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 190 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 191 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); 195 #ifdef IXGBE_DEBUG 196 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 198 #endif 199 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 201 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 206 207 /* Deferred interrupt tasklets */ 208 static void ixgbe_handle_msf(void *); 209 static void ixgbe_handle_mod(void *); 210 static void ixgbe_handle_phy(void *); 211 212 /************************************************************************ 213 * FreeBSD Device Interface Entry Points 214 ************************************************************************/ 215 static device_method_t ix_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_register, ixgbe_register), 218 DEVMETHOD(device_probe, iflib_device_probe), 219 DEVMETHOD(device_attach, iflib_device_attach), 220 DEVMETHOD(device_detach, iflib_device_detach), 221 DEVMETHOD(device_shutdown, iflib_device_shutdown), 222 DEVMETHOD(device_suspend, iflib_device_suspend), 223 DEVMETHOD(device_resume, iflib_device_resume), 224 #ifdef PCI_IOV 225 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 226 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 227 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 228 #endif /* PCI_IOV */ 229 DEVMETHOD_END 230 }; 231 232 static driver_t ix_driver = { 233 "ix", ix_methods, sizeof(struct ixgbe_softc), 234 }; 235 236 devclass_t ix_devclass; 237 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 239 MODULE_DEPEND(ix, pci, 1, 1, 1); 240 MODULE_DEPEND(ix, ether, 1, 1, 1); 241 MODULE_DEPEND(ix, iflib, 1, 1, 1); 242 243 static device_method_t ixgbe_if_methods[] = { 244 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 245 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 246 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 247 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 248 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 249 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 250 DEVMETHOD(ifdi_init, ixgbe_if_init), 251 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 252 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 253 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 254 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 255 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 256 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 257 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 258 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 259 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 260 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 261 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 262 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 263 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 264 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 265 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 266 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 267 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 268 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 269 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 270 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 271 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 272 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 273 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), 274 #ifdef PCI_IOV 275 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 276 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 277 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 278 #endif /* PCI_IOV */ 279 DEVMETHOD_END 280 }; 281 282 /* 283 * TUNEABLE PARAMETERS: 284 */ 285 286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 287 "IXGBE driver parameters"); 288 static driver_t ixgbe_if_driver = { 289 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) 290 }; 291 292 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 293 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 294 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 295 296 /* Flow control setting, default to full */ 297 static int ixgbe_flow_control = ixgbe_fc_full; 298 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 299 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 300 301 /* Advertise Speed, default to 0 (auto) */ 302 static int ixgbe_advertise_speed = 0; 303 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 304 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 305 306 /* 307 * Smart speed setting, default to on 308 * this only works as a compile option 309 * right now as its during attach, set 310 * this to 'ixgbe_smart_speed_off' to 311 * disable. 312 */ 313 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 314 315 /* 316 * MSI-X should be the default for best performance, 317 * but this allows it to be forced off for testing. 318 */ 319 static int ixgbe_enable_msix = 1; 320 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 321 "Enable MSI-X interrupts"); 322 323 /* 324 * Defining this on will allow the use 325 * of unsupported SFP+ modules, note that 326 * doing so you are on your own :) 327 */ 328 static int allow_unsupported_sfp = false; 329 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 330 &allow_unsupported_sfp, 0, 331 "Allow unsupported SFP modules...use at your own risk"); 332 333 /* 334 * Not sure if Flow Director is fully baked, 335 * so we'll default to turning it off. 336 */ 337 static int ixgbe_enable_fdir = 0; 338 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 339 "Enable Flow Director"); 340 341 /* Receive-Side Scaling */ 342 static int ixgbe_enable_rss = 1; 343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 344 "Enable Receive-Side Scaling (RSS)"); 345 346 /* 347 * AIM: Adaptive Interrupt Moderation 348 * which means that the interrupt rate 349 * is varied over time based on the 350 * traffic for that interrupt vector 351 */ 352 static int ixgbe_enable_aim = false; 353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, 354 "Enable adaptive interrupt moderation"); 355 356 #if 0 357 /* Keep running tab on them for sanity check */ 358 static int ixgbe_total_ports; 359 #endif 360 361 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 362 363 /* 364 * For Flow Director: this is the number of TX packets we sample 365 * for the filter pool, this means every 20th packet will be probed. 366 * 367 * This feature can be disabled by setting this to 0. 368 */ 369 static int atr_sample_rate = 20; 370 371 extern struct if_txrx ixgbe_txrx; 372 373 static struct if_shared_ctx ixgbe_sctx_init = { 374 .isc_magic = IFLIB_MAGIC, 375 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 376 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 377 .isc_tx_maxsegsize = PAGE_SIZE, 378 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 379 .isc_tso_maxsegsize = PAGE_SIZE, 380 .isc_rx_maxsize = PAGE_SIZE*4, 381 .isc_rx_nsegments = 1, 382 .isc_rx_maxsegsize = PAGE_SIZE*4, 383 .isc_nfl = 1, 384 .isc_ntxqs = 1, 385 .isc_nrxqs = 1, 386 387 .isc_admin_intrcnt = 1, 388 .isc_vendor_info = ixgbe_vendor_info_array, 389 .isc_driver_version = ixgbe_driver_version, 390 .isc_driver = &ixgbe_if_driver, 391 .isc_flags = IFLIB_TSO_INIT_IP, 392 393 .isc_nrxd_min = {MIN_RXD}, 394 .isc_ntxd_min = {MIN_TXD}, 395 .isc_nrxd_max = {MAX_RXD}, 396 .isc_ntxd_max = {MAX_TXD}, 397 .isc_nrxd_default = {DEFAULT_RXD}, 398 .isc_ntxd_default = {DEFAULT_TXD}, 399 }; 400 401 /************************************************************************ 402 * ixgbe_if_tx_queues_alloc 403 ************************************************************************/ 404 static int 405 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 406 int ntxqs, int ntxqsets) 407 { 408 struct ixgbe_softc *sc = iflib_get_softc(ctx); 409 if_softc_ctx_t scctx = sc->shared; 410 struct ix_tx_queue *que; 411 int i, j, error; 412 413 MPASS(sc->num_tx_queues > 0); 414 MPASS(sc->num_tx_queues == ntxqsets); 415 MPASS(ntxqs == 1); 416 417 /* Allocate queue structure memory */ 418 sc->tx_queues = 419 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 420 M_IXGBE, M_NOWAIT | M_ZERO); 421 if (!sc->tx_queues) { 422 device_printf(iflib_get_dev(ctx), 423 "Unable to allocate TX ring memory\n"); 424 return (ENOMEM); 425 } 426 427 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 428 struct tx_ring *txr = &que->txr; 429 430 /* In case SR-IOV is enabled, align the index properly */ 431 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 432 i); 433 434 txr->sc = que->sc = sc; 435 436 /* Allocate report status array */ 437 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 438 if (txr->tx_rsq == NULL) { 439 error = ENOMEM; 440 goto fail; 441 } 442 for (j = 0; j < scctx->isc_ntxd[0]; j++) 443 txr->tx_rsq[j] = QIDX_INVALID; 444 /* get the virtual and physical address of the hardware queues */ 445 txr->tail = IXGBE_TDT(txr->me); 446 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 447 txr->tx_paddr = paddrs[i]; 448 449 txr->bytes = 0; 450 txr->total_packets = 0; 451 452 /* Set the rate at which we sample packets */ 453 if (sc->feat_en & IXGBE_FEATURE_FDIR) 454 txr->atr_sample = atr_sample_rate; 455 456 } 457 458 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 459 sc->num_tx_queues); 460 461 return (0); 462 463 fail: 464 ixgbe_if_queues_free(ctx); 465 466 return (error); 467 } /* ixgbe_if_tx_queues_alloc */ 468 469 /************************************************************************ 470 * ixgbe_if_rx_queues_alloc 471 ************************************************************************/ 472 static int 473 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 474 int nrxqs, int nrxqsets) 475 { 476 struct ixgbe_softc *sc = iflib_get_softc(ctx); 477 struct ix_rx_queue *que; 478 int i; 479 480 MPASS(sc->num_rx_queues > 0); 481 MPASS(sc->num_rx_queues == nrxqsets); 482 MPASS(nrxqs == 1); 483 484 /* Allocate queue structure memory */ 485 sc->rx_queues = 486 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 487 M_IXGBE, M_NOWAIT | M_ZERO); 488 if (!sc->rx_queues) { 489 device_printf(iflib_get_dev(ctx), 490 "Unable to allocate TX ring memory\n"); 491 return (ENOMEM); 492 } 493 494 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 495 struct rx_ring *rxr = &que->rxr; 496 497 /* In case SR-IOV is enabled, align the index properly */ 498 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, 499 i); 500 501 rxr->sc = que->sc = sc; 502 503 /* get the virtual and physical address of the hw queues */ 504 rxr->tail = IXGBE_RDT(rxr->me); 505 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 506 rxr->rx_paddr = paddrs[i]; 507 rxr->bytes = 0; 508 rxr->que = que; 509 } 510 511 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 512 sc->num_rx_queues); 513 514 return (0); 515 } /* ixgbe_if_rx_queues_alloc */ 516 517 /************************************************************************ 518 * ixgbe_if_queues_free 519 ************************************************************************/ 520 static void 521 ixgbe_if_queues_free(if_ctx_t ctx) 522 { 523 struct ixgbe_softc *sc = iflib_get_softc(ctx); 524 struct ix_tx_queue *tx_que = sc->tx_queues; 525 struct ix_rx_queue *rx_que = sc->rx_queues; 526 int i; 527 528 if (tx_que != NULL) { 529 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { 530 struct tx_ring *txr = &tx_que->txr; 531 if (txr->tx_rsq == NULL) 532 break; 533 534 free(txr->tx_rsq, M_IXGBE); 535 txr->tx_rsq = NULL; 536 } 537 538 free(sc->tx_queues, M_IXGBE); 539 sc->tx_queues = NULL; 540 } 541 if (rx_que != NULL) { 542 free(sc->rx_queues, M_IXGBE); 543 sc->rx_queues = NULL; 544 } 545 } /* ixgbe_if_queues_free */ 546 547 /************************************************************************ 548 * ixgbe_initialize_rss_mapping 549 ************************************************************************/ 550 static void 551 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 552 { 553 struct ixgbe_hw *hw = &sc->hw; 554 u32 reta = 0, mrqc, rss_key[10]; 555 int queue_id, table_size, index_mult; 556 int i, j; 557 u32 rss_hash_config; 558 559 if (sc->feat_en & IXGBE_FEATURE_RSS) { 560 /* Fetch the configured RSS key */ 561 rss_getkey((uint8_t *)&rss_key); 562 } else { 563 /* set up random bits */ 564 arc4rand(&rss_key, sizeof(rss_key), 0); 565 } 566 567 /* Set multiplier for RETA setup and table size based on MAC */ 568 index_mult = 0x1; 569 table_size = 128; 570 switch (sc->hw.mac.type) { 571 case ixgbe_mac_82598EB: 572 index_mult = 0x11; 573 break; 574 case ixgbe_mac_X550: 575 case ixgbe_mac_X550EM_x: 576 case ixgbe_mac_X550EM_a: 577 table_size = 512; 578 break; 579 default: 580 break; 581 } 582 583 /* Set up the redirection table */ 584 for (i = 0, j = 0; i < table_size; i++, j++) { 585 if (j == sc->num_rx_queues) 586 j = 0; 587 588 if (sc->feat_en & IXGBE_FEATURE_RSS) { 589 /* 590 * Fetch the RSS bucket id for the given indirection 591 * entry. Cap it at the number of configured buckets 592 * (which is num_rx_queues.) 593 */ 594 queue_id = rss_get_indirection_to_bucket(i); 595 queue_id = queue_id % sc->num_rx_queues; 596 } else 597 queue_id = (j * index_mult); 598 599 /* 600 * The low 8 bits are for hash value (n+0); 601 * The next 8 bits are for hash value (n+1), etc. 602 */ 603 reta = reta >> 8; 604 reta = reta | (((uint32_t)queue_id) << 24); 605 if ((i & 3) == 3) { 606 if (i < 128) 607 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 608 else 609 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 610 reta); 611 reta = 0; 612 } 613 } 614 615 /* Now fill our hash function seeds */ 616 for (i = 0; i < 10; i++) 617 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 618 619 /* Perform hash on these packet types */ 620 if (sc->feat_en & IXGBE_FEATURE_RSS) 621 rss_hash_config = rss_gethashconfig(); 622 else { 623 /* 624 * Disable UDP - IP fragments aren't currently being handled 625 * and so we end up with a mix of 2-tuple and 4-tuple 626 * traffic. 627 */ 628 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 629 | RSS_HASHTYPE_RSS_TCP_IPV4 630 | RSS_HASHTYPE_RSS_IPV6 631 | RSS_HASHTYPE_RSS_TCP_IPV6 632 | RSS_HASHTYPE_RSS_IPV6_EX 633 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 634 } 635 636 mrqc = IXGBE_MRQC_RSSEN; 637 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 639 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 641 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 643 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 645 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 647 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 649 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 651 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 653 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 655 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 656 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 657 } /* ixgbe_initialize_rss_mapping */ 658 659 /************************************************************************ 660 * ixgbe_initialize_receive_units - Setup receive registers and features. 661 ************************************************************************/ 662 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 663 664 static void 665 ixgbe_initialize_receive_units(if_ctx_t ctx) 666 { 667 struct ixgbe_softc *sc = iflib_get_softc(ctx); 668 if_softc_ctx_t scctx = sc->shared; 669 struct ixgbe_hw *hw = &sc->hw; 670 struct ifnet *ifp = iflib_get_ifp(ctx); 671 struct ix_rx_queue *que; 672 int i, j; 673 u32 bufsz, fctrl, srrctl, rxcsum; 674 u32 hlreg; 675 676 /* 677 * Make sure receives are disabled while 678 * setting up the descriptor ring 679 */ 680 ixgbe_disable_rx(hw); 681 682 /* Enable broadcasts */ 683 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 684 fctrl |= IXGBE_FCTRL_BAM; 685 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 686 fctrl |= IXGBE_FCTRL_DPF; 687 fctrl |= IXGBE_FCTRL_PMCF; 688 } 689 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 690 691 /* Set for Jumbo Frames? */ 692 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 693 if (ifp->if_mtu > ETHERMTU) 694 hlreg |= IXGBE_HLREG0_JUMBOEN; 695 else 696 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 697 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 698 699 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 700 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 701 702 /* Setup the Base and Length of the Rx Descriptor Ring */ 703 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { 704 struct rx_ring *rxr = &que->rxr; 705 u64 rdba = rxr->rx_paddr; 706 707 j = rxr->me; 708 709 /* Setup the Base and Length of the Rx Descriptor Ring */ 710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 711 (rdba & 0x00000000ffffffffULL)); 712 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 713 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 714 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 715 716 /* Set up the SRRCTL register */ 717 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 718 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 719 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 720 srrctl |= bufsz; 721 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 722 723 /* 724 * Set DROP_EN iff we have no flow control and >1 queue. 725 * Note that srrctl was cleared shortly before during reset, 726 * so we do not need to clear the bit, but do it just in case 727 * this code is moved elsewhere. 728 */ 729 if (sc->num_rx_queues > 1 && 730 sc->hw.fc.requested_mode == ixgbe_fc_none) { 731 srrctl |= IXGBE_SRRCTL_DROP_EN; 732 } else { 733 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 734 } 735 736 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 737 738 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 739 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 740 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 741 742 /* Set the driver rx tail address */ 743 rxr->tail = IXGBE_RDT(rxr->me); 744 } 745 746 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 747 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 748 | IXGBE_PSRTYPE_UDPHDR 749 | IXGBE_PSRTYPE_IPV4HDR 750 | IXGBE_PSRTYPE_IPV6HDR; 751 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 752 } 753 754 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 755 756 ixgbe_initialize_rss_mapping(sc); 757 758 if (sc->num_rx_queues > 1) { 759 /* RSS and RX IPP Checksum are mutually exclusive */ 760 rxcsum |= IXGBE_RXCSUM_PCSD; 761 } 762 763 if (ifp->if_capenable & IFCAP_RXCSUM) 764 rxcsum |= IXGBE_RXCSUM_PCSD; 765 766 /* This is useful for calculating UDP/IP fragment checksums */ 767 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 768 rxcsum |= IXGBE_RXCSUM_IPPCSE; 769 770 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 771 772 } /* ixgbe_initialize_receive_units */ 773 774 /************************************************************************ 775 * ixgbe_initialize_transmit_units - Enable transmit units. 776 ************************************************************************/ 777 static void 778 ixgbe_initialize_transmit_units(if_ctx_t ctx) 779 { 780 struct ixgbe_softc *sc = iflib_get_softc(ctx); 781 struct ixgbe_hw *hw = &sc->hw; 782 if_softc_ctx_t scctx = sc->shared; 783 struct ix_tx_queue *que; 784 int i; 785 786 /* Setup the Base and Length of the Tx Descriptor Ring */ 787 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; 788 i++, que++) { 789 struct tx_ring *txr = &que->txr; 790 u64 tdba = txr->tx_paddr; 791 u32 txctrl = 0; 792 int j = txr->me; 793 794 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 795 (tdba & 0x00000000ffffffffULL)); 796 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 797 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 798 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 799 800 /* Setup the HW Tx Head and Tail descriptor pointers */ 801 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 802 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 803 804 /* Cache the tail address */ 805 txr->tail = IXGBE_TDT(txr->me); 806 807 txr->tx_rs_cidx = txr->tx_rs_pidx; 808 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 809 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 810 txr->tx_rsq[k] = QIDX_INVALID; 811 812 /* Disable Head Writeback */ 813 /* 814 * Note: for X550 series devices, these registers are actually 815 * prefixed with TPH_ isntead of DCA_, but the addresses and 816 * fields remain the same. 817 */ 818 switch (hw->mac.type) { 819 case ixgbe_mac_82598EB: 820 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 821 break; 822 default: 823 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 824 break; 825 } 826 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 827 switch (hw->mac.type) { 828 case ixgbe_mac_82598EB: 829 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 830 break; 831 default: 832 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 833 break; 834 } 835 836 } 837 838 if (hw->mac.type != ixgbe_mac_82598EB) { 839 u32 dmatxctl, rttdcs; 840 841 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 842 dmatxctl |= IXGBE_DMATXCTL_TE; 843 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 844 /* Disable arbiter to set MTQC */ 845 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 846 rttdcs |= IXGBE_RTTDCS_ARBDIS; 847 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 848 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 849 ixgbe_get_mtqc(sc->iov_mode)); 850 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 851 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 852 } 853 854 } /* ixgbe_initialize_transmit_units */ 855 856 /************************************************************************ 857 * ixgbe_register 858 ************************************************************************/ 859 static void * 860 ixgbe_register(device_t dev) 861 { 862 return (&ixgbe_sctx_init); 863 } /* ixgbe_register */ 864 865 /************************************************************************ 866 * ixgbe_if_attach_pre - Device initialization routine, part 1 867 * 868 * Called when the driver is being loaded. 869 * Identifies the type of hardware, initializes the hardware, 870 * and initializes iflib structures. 871 * 872 * return 0 on success, positive on failure 873 ************************************************************************/ 874 static int 875 ixgbe_if_attach_pre(if_ctx_t ctx) 876 { 877 struct ixgbe_softc *sc; 878 device_t dev; 879 if_softc_ctx_t scctx; 880 struct ixgbe_hw *hw; 881 int error = 0; 882 u32 ctrl_ext; 883 884 INIT_DEBUGOUT("ixgbe_attach: begin"); 885 886 /* Allocate, clear, and link in our adapter structure */ 887 dev = iflib_get_dev(ctx); 888 sc = iflib_get_softc(ctx); 889 sc->hw.back = sc; 890 sc->ctx = ctx; 891 sc->dev = dev; 892 scctx = sc->shared = iflib_get_softc_ctx(ctx); 893 sc->media = iflib_get_media(ctx); 894 hw = &sc->hw; 895 896 /* Determine hardware revision */ 897 hw->vendor_id = pci_get_vendor(dev); 898 hw->device_id = pci_get_device(dev); 899 hw->revision_id = pci_get_revid(dev); 900 hw->subsystem_vendor_id = pci_get_subvendor(dev); 901 hw->subsystem_device_id = pci_get_subdevice(dev); 902 903 /* Do base PCI setup - map BAR0 */ 904 if (ixgbe_allocate_pci_resources(ctx)) { 905 device_printf(dev, "Allocation of PCI resources failed\n"); 906 return (ENXIO); 907 } 908 909 /* let hardware know driver is loaded */ 910 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 911 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 912 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 913 914 /* 915 * Initialize the shared code 916 */ 917 if (ixgbe_init_shared_code(hw) != 0) { 918 device_printf(dev, "Unable to initialize the shared code\n"); 919 error = ENXIO; 920 goto err_pci; 921 } 922 923 if (hw->mbx.ops.init_params) 924 hw->mbx.ops.init_params(hw); 925 926 hw->allow_unsupported_sfp = allow_unsupported_sfp; 927 928 if (hw->mac.type != ixgbe_mac_82598EB) 929 hw->phy.smart_speed = ixgbe_smart_speed; 930 931 ixgbe_init_device_features(sc); 932 933 /* Enable WoL (if supported) */ 934 ixgbe_check_wol_support(sc); 935 936 /* Verify adapter fan is still functional (if applicable) */ 937 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 938 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 939 ixgbe_check_fan_failure(sc, esdp, false); 940 } 941 942 /* Ensure SW/FW semaphore is free */ 943 ixgbe_init_swfw_semaphore(hw); 944 945 /* Set an initial default flow control value */ 946 hw->fc.requested_mode = ixgbe_flow_control; 947 948 hw->phy.reset_if_overtemp = true; 949 error = ixgbe_reset_hw(hw); 950 hw->phy.reset_if_overtemp = false; 951 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 952 /* 953 * No optics in this port, set up 954 * so the timer routine will probe 955 * for later insertion. 956 */ 957 sc->sfp_probe = true; 958 error = 0; 959 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 960 device_printf(dev, "Unsupported SFP+ module detected!\n"); 961 error = EIO; 962 goto err_pci; 963 } else if (error) { 964 device_printf(dev, "Hardware initialization failed\n"); 965 error = EIO; 966 goto err_pci; 967 } 968 969 /* Make sure we have a good EEPROM before we read from it */ 970 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 971 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 972 error = EIO; 973 goto err_pci; 974 } 975 976 error = ixgbe_start_hw(hw); 977 switch (error) { 978 case IXGBE_ERR_EEPROM_VERSION: 979 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 980 break; 981 case IXGBE_ERR_SFP_NOT_SUPPORTED: 982 device_printf(dev, "Unsupported SFP+ Module\n"); 983 error = EIO; 984 goto err_pci; 985 case IXGBE_ERR_SFP_NOT_PRESENT: 986 device_printf(dev, "No SFP+ Module found\n"); 987 /* falls thru */ 988 default: 989 break; 990 } 991 992 /* Most of the iflib initialization... */ 993 994 iflib_set_mac(ctx, hw->mac.addr); 995 switch (sc->hw.mac.type) { 996 case ixgbe_mac_X550: 997 case ixgbe_mac_X550EM_x: 998 case ixgbe_mac_X550EM_a: 999 scctx->isc_rss_table_size = 512; 1000 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1001 break; 1002 default: 1003 scctx->isc_rss_table_size = 128; 1004 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1005 } 1006 1007 /* Allow legacy interrupts */ 1008 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1009 1010 scctx->isc_txqsizes[0] = 1011 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1012 sizeof(u32), DBA_ALIGN), 1013 scctx->isc_rxqsizes[0] = 1014 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1015 DBA_ALIGN); 1016 1017 /* XXX */ 1018 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1019 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1020 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 1021 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1022 } else { 1023 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1024 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1025 } 1026 1027 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1028 1029 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1030 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1031 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1032 1033 scctx->isc_txrx = &ixgbe_txrx; 1034 1035 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1036 1037 return (0); 1038 1039 err_pci: 1040 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1041 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1042 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1043 ixgbe_free_pci_resources(ctx); 1044 1045 return (error); 1046 } /* ixgbe_if_attach_pre */ 1047 1048 /********************************************************************* 1049 * ixgbe_if_attach_post - Device initialization routine, part 2 1050 * 1051 * Called during driver load, but after interrupts and 1052 * resources have been allocated and configured. 1053 * Sets up some data structures not relevant to iflib. 1054 * 1055 * return 0 on success, positive on failure 1056 *********************************************************************/ 1057 static int 1058 ixgbe_if_attach_post(if_ctx_t ctx) 1059 { 1060 device_t dev; 1061 struct ixgbe_softc *sc; 1062 struct ixgbe_hw *hw; 1063 int error = 0; 1064 1065 dev = iflib_get_dev(ctx); 1066 sc = iflib_get_softc(ctx); 1067 hw = &sc->hw; 1068 1069 1070 if (sc->intr_type == IFLIB_INTR_LEGACY && 1071 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1072 device_printf(dev, "Device does not support legacy interrupts"); 1073 error = ENXIO; 1074 goto err; 1075 } 1076 1077 /* Allocate multicast array memory. */ 1078 sc->mta = malloc(sizeof(*sc->mta) * 1079 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1080 if (sc->mta == NULL) { 1081 device_printf(dev, "Can not allocate multicast setup array\n"); 1082 error = ENOMEM; 1083 goto err; 1084 } 1085 1086 /* hw.ix defaults init */ 1087 ixgbe_set_advertise(sc, ixgbe_advertise_speed); 1088 1089 /* Enable the optics for 82599 SFP+ fiber */ 1090 ixgbe_enable_tx_laser(hw); 1091 1092 /* Enable power to the phy. */ 1093 ixgbe_set_phy_power(hw, true); 1094 1095 ixgbe_initialize_iov(sc); 1096 1097 error = ixgbe_setup_interface(ctx); 1098 if (error) { 1099 device_printf(dev, "Interface setup failed: %d\n", error); 1100 goto err; 1101 } 1102 1103 ixgbe_if_update_admin_status(ctx); 1104 1105 /* Initialize statistics */ 1106 ixgbe_update_stats_counters(sc); 1107 ixgbe_add_hw_stats(sc); 1108 1109 /* Check PCIE slot type/speed/width */ 1110 ixgbe_get_slot_info(sc); 1111 1112 /* 1113 * Do time init and sysctl init here, but 1114 * only on the first port of a bypass sc. 1115 */ 1116 ixgbe_bypass_init(sc); 1117 1118 /* Display NVM and Option ROM versions */ 1119 ixgbe_print_fw_version(ctx); 1120 1121 /* Set an initial dmac value */ 1122 sc->dmac = 0; 1123 /* Set initial advertised speeds (if applicable) */ 1124 sc->advertise = ixgbe_get_default_advertise(sc); 1125 1126 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1127 ixgbe_define_iov_schemas(dev, &error); 1128 1129 /* Add sysctls */ 1130 ixgbe_add_device_sysctls(ctx); 1131 1132 return (0); 1133 err: 1134 return (error); 1135 } /* ixgbe_if_attach_post */ 1136 1137 /************************************************************************ 1138 * ixgbe_check_wol_support 1139 * 1140 * Checks whether the adapter's ports are capable of 1141 * Wake On LAN by reading the adapter's NVM. 1142 * 1143 * Sets each port's hw->wol_enabled value depending 1144 * on the value read here. 1145 ************************************************************************/ 1146 static void 1147 ixgbe_check_wol_support(struct ixgbe_softc *sc) 1148 { 1149 struct ixgbe_hw *hw = &sc->hw; 1150 u16 dev_caps = 0; 1151 1152 /* Find out WoL support for port */ 1153 sc->wol_support = hw->wol_enabled = 0; 1154 ixgbe_get_device_caps(hw, &dev_caps); 1155 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1156 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1157 hw->bus.func == 0)) 1158 sc->wol_support = hw->wol_enabled = 1; 1159 1160 /* Save initial wake up filter configuration */ 1161 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1162 1163 return; 1164 } /* ixgbe_check_wol_support */ 1165 1166 /************************************************************************ 1167 * ixgbe_setup_interface 1168 * 1169 * Setup networking device structure and register an interface. 1170 ************************************************************************/ 1171 static int 1172 ixgbe_setup_interface(if_ctx_t ctx) 1173 { 1174 struct ifnet *ifp = iflib_get_ifp(ctx); 1175 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1176 1177 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1178 1179 if_setbaudrate(ifp, IF_Gbps(10)); 1180 1181 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1182 1183 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1184 1185 ixgbe_add_media_types(ctx); 1186 1187 /* Autoselect media by default */ 1188 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1189 1190 return (0); 1191 } /* ixgbe_setup_interface */ 1192 1193 /************************************************************************ 1194 * ixgbe_if_get_counter 1195 ************************************************************************/ 1196 static uint64_t 1197 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1198 { 1199 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1200 if_t ifp = iflib_get_ifp(ctx); 1201 1202 switch (cnt) { 1203 case IFCOUNTER_IPACKETS: 1204 return (sc->ipackets); 1205 case IFCOUNTER_OPACKETS: 1206 return (sc->opackets); 1207 case IFCOUNTER_IBYTES: 1208 return (sc->ibytes); 1209 case IFCOUNTER_OBYTES: 1210 return (sc->obytes); 1211 case IFCOUNTER_IMCASTS: 1212 return (sc->imcasts); 1213 case IFCOUNTER_OMCASTS: 1214 return (sc->omcasts); 1215 case IFCOUNTER_COLLISIONS: 1216 return (0); 1217 case IFCOUNTER_IQDROPS: 1218 return (sc->iqdrops); 1219 case IFCOUNTER_OQDROPS: 1220 return (0); 1221 case IFCOUNTER_IERRORS: 1222 return (sc->ierrors); 1223 default: 1224 return (if_get_counter_default(ifp, cnt)); 1225 } 1226 } /* ixgbe_if_get_counter */ 1227 1228 /************************************************************************ 1229 * ixgbe_if_i2c_req 1230 ************************************************************************/ 1231 static int 1232 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1233 { 1234 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1235 struct ixgbe_hw *hw = &sc->hw; 1236 int i; 1237 1238 1239 if (hw->phy.ops.read_i2c_byte == NULL) 1240 return (ENXIO); 1241 for (i = 0; i < req->len; i++) 1242 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1243 req->dev_addr, &req->data[i]); 1244 return (0); 1245 } /* ixgbe_if_i2c_req */ 1246 1247 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1248 * @ctx: iflib context 1249 * @event: event code to check 1250 * 1251 * Defaults to returning true for unknown events. 1252 * 1253 * @returns true if iflib needs to reinit the interface 1254 */ 1255 static bool 1256 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1257 { 1258 switch (event) { 1259 case IFLIB_RESTART_VLAN_CONFIG: 1260 return (false); 1261 default: 1262 return (true); 1263 } 1264 } 1265 1266 /************************************************************************ 1267 * ixgbe_add_media_types 1268 ************************************************************************/ 1269 static void 1270 ixgbe_add_media_types(if_ctx_t ctx) 1271 { 1272 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1273 struct ixgbe_hw *hw = &sc->hw; 1274 device_t dev = iflib_get_dev(ctx); 1275 u64 layer; 1276 1277 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 1278 1279 /* Media types with matching FreeBSD media defines */ 1280 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1281 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1282 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1283 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1284 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1285 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1286 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1287 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1288 1289 if (hw->mac.type == ixgbe_mac_X550) { 1290 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1291 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL); 1292 } 1293 1294 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1295 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1296 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1297 NULL); 1298 1299 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1300 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1301 if (hw->phy.multispeed_fiber) 1302 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, 1303 NULL); 1304 } 1305 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1306 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1307 if (hw->phy.multispeed_fiber) 1308 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, 1309 NULL); 1310 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1311 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1312 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1313 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1314 1315 #ifdef IFM_ETH_XTYPE 1316 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1317 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1318 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1319 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1320 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1321 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1322 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1323 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1324 #else 1325 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1326 device_printf(dev, "Media supported: 10GbaseKR\n"); 1327 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1328 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1329 } 1330 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1331 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1332 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1333 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1334 } 1335 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1336 device_printf(dev, "Media supported: 1000baseKX\n"); 1337 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1338 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1339 } 1340 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1341 device_printf(dev, "Media supported: 2500baseKX\n"); 1342 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1343 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1344 } 1345 #endif 1346 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1347 device_printf(dev, "Media supported: 1000baseBX\n"); 1348 1349 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1350 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1351 0, NULL); 1352 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1353 } 1354 1355 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1356 } /* ixgbe_add_media_types */ 1357 1358 /************************************************************************ 1359 * ixgbe_is_sfp 1360 ************************************************************************/ 1361 static inline bool 1362 ixgbe_is_sfp(struct ixgbe_hw *hw) 1363 { 1364 switch (hw->mac.type) { 1365 case ixgbe_mac_82598EB: 1366 if (hw->phy.type == ixgbe_phy_nl) 1367 return (true); 1368 return (false); 1369 case ixgbe_mac_82599EB: 1370 switch (hw->mac.ops.get_media_type(hw)) { 1371 case ixgbe_media_type_fiber: 1372 case ixgbe_media_type_fiber_qsfp: 1373 return (true); 1374 default: 1375 return (false); 1376 } 1377 case ixgbe_mac_X550EM_x: 1378 case ixgbe_mac_X550EM_a: 1379 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1380 return (true); 1381 return (false); 1382 default: 1383 return (false); 1384 } 1385 } /* ixgbe_is_sfp */ 1386 1387 /************************************************************************ 1388 * ixgbe_config_link 1389 ************************************************************************/ 1390 static void 1391 ixgbe_config_link(if_ctx_t ctx) 1392 { 1393 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1394 struct ixgbe_hw *hw = &sc->hw; 1395 u32 autoneg, err = 0; 1396 bool sfp, negotiate; 1397 1398 sfp = ixgbe_is_sfp(hw); 1399 1400 if (sfp) { 1401 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 1402 iflib_admin_intr_deferred(ctx); 1403 } else { 1404 if (hw->mac.ops.check_link) 1405 err = ixgbe_check_link(hw, &sc->link_speed, 1406 &sc->link_up, false); 1407 if (err) 1408 return; 1409 autoneg = hw->phy.autoneg_advertised; 1410 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1411 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1412 &negotiate); 1413 if (err) 1414 return; 1415 1416 if (hw->mac.type == ixgbe_mac_X550 && 1417 hw->phy.autoneg_advertised == 0) { 1418 /* 1419 * 2.5G and 5G autonegotiation speeds on X550 1420 * are disabled by default due to reported 1421 * interoperability issues with some switches. 1422 * 1423 * The second condition checks if any operations 1424 * involving setting autonegotiation speeds have 1425 * been performed prior to this ixgbe_config_link() 1426 * call. 1427 * 1428 * If hw->phy.autoneg_advertised does not 1429 * equal 0, this means that the user might have 1430 * set autonegotiation speeds via the sysctl 1431 * before bringing the interface up. In this 1432 * case, we should not disable 2.5G and 5G 1433 * since that speeds might be selected by the 1434 * user. 1435 * 1436 * Otherwise (i.e. if hw->phy.autoneg_advertised 1437 * is set to 0), it is the first time we set 1438 * autonegotiation preferences and the default 1439 * set of speeds should exclude 2.5G and 5G. 1440 */ 1441 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 1442 IXGBE_LINK_SPEED_5GB_FULL); 1443 } 1444 1445 if (hw->mac.ops.setup_link) 1446 err = hw->mac.ops.setup_link(hw, autoneg, 1447 sc->link_up); 1448 } 1449 } /* ixgbe_config_link */ 1450 1451 /************************************************************************ 1452 * ixgbe_update_stats_counters - Update board statistics counters. 1453 ************************************************************************/ 1454 static void 1455 ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1456 { 1457 struct ixgbe_hw *hw = &sc->hw; 1458 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1459 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1460 u32 lxoffrxc; 1461 u64 total_missed_rx = 0; 1462 1463 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1464 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1465 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1466 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1467 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1468 1469 for (int i = 0; i < 16; i++) { 1470 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1471 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1472 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1473 } 1474 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1475 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1476 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1477 1478 /* Hardware workaround, gprc counts missed packets */ 1479 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1480 stats->gprc -= missed_rx; 1481 1482 if (hw->mac.type != ixgbe_mac_82598EB) { 1483 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1484 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1485 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1486 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1487 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1488 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1489 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1490 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1491 stats->lxoffrxc += lxoffrxc; 1492 } else { 1493 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1494 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1495 stats->lxoffrxc += lxoffrxc; 1496 /* 82598 only has a counter in the high register */ 1497 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1498 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1499 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1500 } 1501 1502 /* 1503 * For watchdog management we need to know if we have been paused 1504 * during the last interval, so capture that here. 1505 */ 1506 if (lxoffrxc) 1507 sc->shared->isc_pause_frames = 1; 1508 1509 /* 1510 * Workaround: mprc hardware is incorrectly counting 1511 * broadcasts, so for now we subtract those. 1512 */ 1513 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1514 stats->bprc += bprc; 1515 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1516 if (hw->mac.type == ixgbe_mac_82598EB) 1517 stats->mprc -= bprc; 1518 1519 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1520 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1521 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1522 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1523 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1524 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1525 1526 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1527 stats->lxontxc += lxon; 1528 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1529 stats->lxofftxc += lxoff; 1530 total = lxon + lxoff; 1531 1532 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1533 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1534 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1535 stats->gptc -= total; 1536 stats->mptc -= total; 1537 stats->ptc64 -= total; 1538 stats->gotc -= total * ETHER_MIN_LEN; 1539 1540 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1541 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1542 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1543 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1544 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1545 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1546 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1547 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1548 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1549 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1550 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1551 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1552 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1553 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1554 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1555 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1556 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1557 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1558 /* Only read FCOE on 82599 */ 1559 if (hw->mac.type != ixgbe_mac_82598EB) { 1560 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1561 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1562 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1563 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1564 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1565 } 1566 1567 /* Fill out the OS statistics structure */ 1568 IXGBE_SET_IPACKETS(sc, stats->gprc); 1569 IXGBE_SET_OPACKETS(sc, stats->gptc); 1570 IXGBE_SET_IBYTES(sc, stats->gorc); 1571 IXGBE_SET_OBYTES(sc, stats->gotc); 1572 IXGBE_SET_IMCASTS(sc, stats->mprc); 1573 IXGBE_SET_OMCASTS(sc, stats->mptc); 1574 IXGBE_SET_COLLISIONS(sc, 0); 1575 IXGBE_SET_IQDROPS(sc, total_missed_rx); 1576 1577 /* 1578 * Aggregate following types of errors as RX errors: 1579 * - CRC error count, 1580 * - illegal byte error count, 1581 * - checksum error count, 1582 * - missed packets count, 1583 * - length error count, 1584 * - undersized packets count, 1585 * - fragmented packets count, 1586 * - oversized packets count, 1587 * - jabber count. 1588 */ 1589 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + stats->xec + 1590 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + 1591 stats->rjc); 1592 } /* ixgbe_update_stats_counters */ 1593 1594 /************************************************************************ 1595 * ixgbe_add_hw_stats 1596 * 1597 * Add sysctl variables, one per statistic, to the system. 1598 ************************************************************************/ 1599 static void 1600 ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1601 { 1602 device_t dev = iflib_get_dev(sc->ctx); 1603 struct ix_rx_queue *rx_que; 1604 struct ix_tx_queue *tx_que; 1605 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1606 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1607 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1608 struct ixgbe_hw_stats *stats = &sc->stats.pf; 1609 struct sysctl_oid *stat_node, *queue_node; 1610 struct sysctl_oid_list *stat_list, *queue_list; 1611 int i; 1612 1613 #define QUEUE_NAME_LEN 32 1614 char namebuf[QUEUE_NAME_LEN]; 1615 1616 /* Driver Statistics */ 1617 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1618 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); 1619 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1620 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1621 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1622 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1623 1624 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 1625 struct tx_ring *txr = &tx_que->txr; 1626 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1627 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1628 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1629 queue_list = SYSCTL_CHILDREN(queue_node); 1630 1631 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1632 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1633 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1634 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1635 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, 1636 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1637 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1638 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1639 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1640 CTLFLAG_RD, &txr->total_packets, 1641 "Queue Packets Transmitted"); 1642 } 1643 1644 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 1645 struct rx_ring *rxr = &rx_que->rxr; 1646 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1647 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1648 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1649 queue_list = SYSCTL_CHILDREN(queue_node); 1650 1651 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1652 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 1653 &sc->rx_queues[i], 0, 1654 ixgbe_sysctl_interrupt_rate_handler, "IU", 1655 "Interrupt Rate"); 1656 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1657 CTLFLAG_RD, &(sc->rx_queues[i].irqs), 1658 "irqs on this queue"); 1659 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1660 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1661 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1662 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1663 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, 1664 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1665 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1666 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1667 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1668 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1669 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1670 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1671 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1672 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1673 } 1674 1675 /* MAC stats get their own sub node */ 1676 1677 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1678 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); 1679 stat_list = SYSCTL_CHILDREN(stat_node); 1680 1681 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", 1682 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); 1683 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1684 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1685 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1686 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1687 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1688 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1690 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1692 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1694 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1696 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1698 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1699 1700 /* Flow Control stats */ 1701 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1702 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1704 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1706 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1707 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1708 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1709 1710 /* Packet Reception Stats */ 1711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1712 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1714 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1716 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1718 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1720 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1722 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1724 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1726 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1728 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1730 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1732 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1734 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1736 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1738 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1740 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1742 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1744 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1746 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1747 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1748 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1749 1750 /* Packet Transmission Stats */ 1751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1752 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1753 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1754 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1755 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1756 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1757 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1758 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1759 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1760 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1761 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1762 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1763 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1764 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1765 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1766 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1767 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1768 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1769 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1770 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1771 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1772 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1773 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1774 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1775 } /* ixgbe_add_hw_stats */ 1776 1777 /************************************************************************ 1778 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1779 * 1780 * Retrieves the TDH value from the hardware 1781 ************************************************************************/ 1782 static int 1783 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1784 { 1785 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1786 int error; 1787 unsigned int val; 1788 1789 if (!txr) 1790 return (0); 1791 1792 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); 1793 error = sysctl_handle_int(oidp, &val, 0, req); 1794 if (error || !req->newptr) 1795 return error; 1796 1797 return (0); 1798 } /* ixgbe_sysctl_tdh_handler */ 1799 1800 /************************************************************************ 1801 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1802 * 1803 * Retrieves the TDT value from the hardware 1804 ************************************************************************/ 1805 static int 1806 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1807 { 1808 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1809 int error; 1810 unsigned int val; 1811 1812 if (!txr) 1813 return (0); 1814 1815 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); 1816 error = sysctl_handle_int(oidp, &val, 0, req); 1817 if (error || !req->newptr) 1818 return error; 1819 1820 return (0); 1821 } /* ixgbe_sysctl_tdt_handler */ 1822 1823 /************************************************************************ 1824 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1825 * 1826 * Retrieves the RDH value from the hardware 1827 ************************************************************************/ 1828 static int 1829 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1830 { 1831 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1832 int error; 1833 unsigned int val; 1834 1835 if (!rxr) 1836 return (0); 1837 1838 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); 1839 error = sysctl_handle_int(oidp, &val, 0, req); 1840 if (error || !req->newptr) 1841 return error; 1842 1843 return (0); 1844 } /* ixgbe_sysctl_rdh_handler */ 1845 1846 /************************************************************************ 1847 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1848 * 1849 * Retrieves the RDT value from the hardware 1850 ************************************************************************/ 1851 static int 1852 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1853 { 1854 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1855 int error; 1856 unsigned int val; 1857 1858 if (!rxr) 1859 return (0); 1860 1861 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); 1862 error = sysctl_handle_int(oidp, &val, 0, req); 1863 if (error || !req->newptr) 1864 return error; 1865 1866 return (0); 1867 } /* ixgbe_sysctl_rdt_handler */ 1868 1869 /************************************************************************ 1870 * ixgbe_if_vlan_register 1871 * 1872 * Run via vlan config EVENT, it enables us to use the 1873 * HW Filter table since we can get the vlan id. This 1874 * just creates the entry in the soft version of the 1875 * VFTA, init will repopulate the real table. 1876 ************************************************************************/ 1877 static void 1878 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1879 { 1880 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1881 u16 index, bit; 1882 1883 index = (vtag >> 5) & 0x7F; 1884 bit = vtag & 0x1F; 1885 sc->shadow_vfta[index] |= (1 << bit); 1886 ++sc->num_vlans; 1887 ixgbe_setup_vlan_hw_support(ctx); 1888 } /* ixgbe_if_vlan_register */ 1889 1890 /************************************************************************ 1891 * ixgbe_if_vlan_unregister 1892 * 1893 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1894 ************************************************************************/ 1895 static void 1896 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1897 { 1898 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1899 u16 index, bit; 1900 1901 index = (vtag >> 5) & 0x7F; 1902 bit = vtag & 0x1F; 1903 sc->shadow_vfta[index] &= ~(1 << bit); 1904 --sc->num_vlans; 1905 /* Re-init to load the changes */ 1906 ixgbe_setup_vlan_hw_support(ctx); 1907 } /* ixgbe_if_vlan_unregister */ 1908 1909 /************************************************************************ 1910 * ixgbe_setup_vlan_hw_support 1911 ************************************************************************/ 1912 static void 1913 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1914 { 1915 struct ifnet *ifp = iflib_get_ifp(ctx); 1916 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1917 struct ixgbe_hw *hw = &sc->hw; 1918 struct rx_ring *rxr; 1919 int i; 1920 u32 ctrl; 1921 1922 1923 /* 1924 * We get here thru init_locked, meaning 1925 * a soft reset, this has already cleared 1926 * the VFTA and other state, so if there 1927 * have been no vlan's registered do nothing. 1928 */ 1929 if (sc->num_vlans == 0) 1930 return; 1931 1932 /* Setup the queues for vlans */ 1933 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1934 for (i = 0; i < sc->num_rx_queues; i++) { 1935 rxr = &sc->rx_queues[i].rxr; 1936 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1937 if (hw->mac.type != ixgbe_mac_82598EB) { 1938 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1939 ctrl |= IXGBE_RXDCTL_VME; 1940 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1941 } 1942 rxr->vtag_strip = true; 1943 } 1944 } 1945 1946 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1947 return; 1948 /* 1949 * A soft reset zero's out the VFTA, so 1950 * we need to repopulate it now. 1951 */ 1952 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1953 if (sc->shadow_vfta[i] != 0) 1954 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1955 sc->shadow_vfta[i]); 1956 1957 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1958 /* Enable the Filter Table if enabled */ 1959 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1960 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1961 ctrl |= IXGBE_VLNCTRL_VFE; 1962 } 1963 if (hw->mac.type == ixgbe_mac_82598EB) 1964 ctrl |= IXGBE_VLNCTRL_VME; 1965 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1966 } /* ixgbe_setup_vlan_hw_support */ 1967 1968 /************************************************************************ 1969 * ixgbe_get_slot_info 1970 * 1971 * Get the width and transaction speed of 1972 * the slot this adapter is plugged into. 1973 ************************************************************************/ 1974 static void 1975 ixgbe_get_slot_info(struct ixgbe_softc *sc) 1976 { 1977 device_t dev = iflib_get_dev(sc->ctx); 1978 struct ixgbe_hw *hw = &sc->hw; 1979 int bus_info_valid = true; 1980 u32 offset; 1981 u16 link; 1982 1983 /* Some devices are behind an internal bridge */ 1984 switch (hw->device_id) { 1985 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1986 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1987 goto get_parent_info; 1988 default: 1989 break; 1990 } 1991 1992 ixgbe_get_bus_info(hw); 1993 1994 /* 1995 * Some devices don't use PCI-E, but there is no need 1996 * to display "Unknown" for bus speed and width. 1997 */ 1998 switch (hw->mac.type) { 1999 case ixgbe_mac_X550EM_x: 2000 case ixgbe_mac_X550EM_a: 2001 return; 2002 default: 2003 goto display; 2004 } 2005 2006 get_parent_info: 2007 /* 2008 * For the Quad port adapter we need to parse back 2009 * up the PCI tree to find the speed of the expansion 2010 * slot into which this adapter is plugged. A bit more work. 2011 */ 2012 dev = device_get_parent(device_get_parent(dev)); 2013 #ifdef IXGBE_DEBUG 2014 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2015 pci_get_slot(dev), pci_get_function(dev)); 2016 #endif 2017 dev = device_get_parent(device_get_parent(dev)); 2018 #ifdef IXGBE_DEBUG 2019 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2020 pci_get_slot(dev), pci_get_function(dev)); 2021 #endif 2022 /* Now get the PCI Express Capabilities offset */ 2023 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 2024 /* 2025 * Hmm...can't get PCI-Express capabilities. 2026 * Falling back to default method. 2027 */ 2028 bus_info_valid = false; 2029 ixgbe_get_bus_info(hw); 2030 goto display; 2031 } 2032 /* ...and read the Link Status Register */ 2033 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2034 ixgbe_set_pci_config_data_generic(hw, link); 2035 2036 display: 2037 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 2038 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2039 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2040 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2041 "Unknown"), 2042 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 2043 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 2044 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 2045 "Unknown")); 2046 2047 if (bus_info_valid) { 2048 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2049 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2050 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2051 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2052 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 2053 } 2054 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2055 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2056 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2057 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 2058 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 2059 } 2060 } else 2061 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2062 2063 return; 2064 } /* ixgbe_get_slot_info */ 2065 2066 /************************************************************************ 2067 * ixgbe_if_msix_intr_assign 2068 * 2069 * Setup MSI-X Interrupt resources and handlers 2070 ************************************************************************/ 2071 static int 2072 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2073 { 2074 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2075 struct ix_rx_queue *rx_que = sc->rx_queues; 2076 struct ix_tx_queue *tx_que; 2077 int error, rid, vector = 0; 2078 char buf[16]; 2079 2080 /* Admin Que is vector 0*/ 2081 rid = vector + 1; 2082 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 2083 rid = vector + 1; 2084 2085 snprintf(buf, sizeof(buf), "rxq%d", i); 2086 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2087 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2088 2089 if (error) { 2090 device_printf(iflib_get_dev(ctx), 2091 "Failed to allocate que int %d err: %d", i, error); 2092 sc->num_rx_queues = i + 1; 2093 goto fail; 2094 } 2095 2096 rx_que->msix = vector; 2097 } 2098 for (int i = 0; i < sc->num_tx_queues; i++) { 2099 snprintf(buf, sizeof(buf), "txq%d", i); 2100 tx_que = &sc->tx_queues[i]; 2101 tx_que->msix = i % sc->num_rx_queues; 2102 iflib_softirq_alloc_generic(ctx, 2103 &sc->rx_queues[tx_que->msix].que_irq, 2104 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2105 } 2106 rid = vector + 1; 2107 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 2108 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq"); 2109 if (error) { 2110 device_printf(iflib_get_dev(ctx), 2111 "Failed to register admin handler"); 2112 return (error); 2113 } 2114 2115 sc->vector = vector; 2116 2117 return (0); 2118 fail: 2119 iflib_irq_free(ctx, &sc->irq); 2120 rx_que = sc->rx_queues; 2121 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 2122 iflib_irq_free(ctx, &rx_que->que_irq); 2123 2124 return (error); 2125 } /* ixgbe_if_msix_intr_assign */ 2126 2127 static inline void 2128 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que) 2129 { 2130 uint32_t newitr = 0; 2131 struct rx_ring *rxr = &que->rxr; 2132 2133 /* 2134 * Do Adaptive Interrupt Moderation: 2135 * - Write out last calculated setting 2136 * - Calculate based on average size over 2137 * the last interval. 2138 */ 2139 if (que->eitr_setting) { 2140 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), 2141 que->eitr_setting); 2142 } 2143 2144 que->eitr_setting = 0; 2145 /* Idle, do nothing */ 2146 if (rxr->bytes == 0) { 2147 return; 2148 } 2149 2150 if ((rxr->bytes) && (rxr->packets)) { 2151 newitr = (rxr->bytes / rxr->packets); 2152 } 2153 2154 newitr += 24; /* account for hardware frame, crc */ 2155 /* set an upper boundary */ 2156 newitr = min(newitr, 3000); 2157 2158 /* Be nice to the mid range */ 2159 if ((newitr > 300) && (newitr < 1200)) { 2160 newitr = (newitr / 3); 2161 } else { 2162 newitr = (newitr / 2); 2163 } 2164 2165 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 2166 newitr |= newitr << 16; 2167 } else { 2168 newitr |= IXGBE_EITR_CNT_WDIS; 2169 } 2170 2171 /* save for next interrupt */ 2172 que->eitr_setting = newitr; 2173 2174 /* Reset state */ 2175 rxr->bytes = 0; 2176 rxr->packets = 0; 2177 2178 return; 2179 } 2180 2181 /********************************************************************* 2182 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2183 **********************************************************************/ 2184 static int 2185 ixgbe_msix_que(void *arg) 2186 { 2187 struct ix_rx_queue *que = arg; 2188 struct ixgbe_softc *sc = que->sc; 2189 struct ifnet *ifp = iflib_get_ifp(que->sc->ctx); 2190 2191 /* Protect against spurious interrupts */ 2192 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2193 return (FILTER_HANDLED); 2194 2195 ixgbe_disable_queue(sc, que->msix); 2196 ++que->irqs; 2197 2198 /* Check for AIM */ 2199 if (sc->enable_aim) { 2200 ixgbe_perform_aim(sc, que); 2201 } 2202 2203 return (FILTER_SCHEDULE_THREAD); 2204 } /* ixgbe_msix_que */ 2205 2206 /************************************************************************ 2207 * ixgbe_media_status - Media Ioctl callback 2208 * 2209 * Called whenever the user queries the status of 2210 * the interface using ifconfig. 2211 ************************************************************************/ 2212 static void 2213 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2214 { 2215 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2216 struct ixgbe_hw *hw = &sc->hw; 2217 int layer; 2218 2219 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2220 2221 ifmr->ifm_status = IFM_AVALID; 2222 ifmr->ifm_active = IFM_ETHER; 2223 2224 if (!sc->link_active) 2225 return; 2226 2227 ifmr->ifm_status |= IFM_ACTIVE; 2228 layer = sc->phy_layer; 2229 2230 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2231 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2232 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2233 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2234 switch (sc->link_speed) { 2235 case IXGBE_LINK_SPEED_10GB_FULL: 2236 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2237 break; 2238 case IXGBE_LINK_SPEED_1GB_FULL: 2239 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2240 break; 2241 case IXGBE_LINK_SPEED_100_FULL: 2242 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2243 break; 2244 case IXGBE_LINK_SPEED_10_FULL: 2245 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2246 break; 2247 } 2248 if (hw->mac.type == ixgbe_mac_X550) 2249 switch (sc->link_speed) { 2250 case IXGBE_LINK_SPEED_5GB_FULL: 2251 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2252 break; 2253 case IXGBE_LINK_SPEED_2_5GB_FULL: 2254 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2255 break; 2256 } 2257 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2258 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2259 switch (sc->link_speed) { 2260 case IXGBE_LINK_SPEED_10GB_FULL: 2261 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2262 break; 2263 } 2264 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2265 switch (sc->link_speed) { 2266 case IXGBE_LINK_SPEED_10GB_FULL: 2267 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2268 break; 2269 case IXGBE_LINK_SPEED_1GB_FULL: 2270 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2271 break; 2272 } 2273 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2274 switch (sc->link_speed) { 2275 case IXGBE_LINK_SPEED_10GB_FULL: 2276 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2277 break; 2278 case IXGBE_LINK_SPEED_1GB_FULL: 2279 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2280 break; 2281 } 2282 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2283 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2284 switch (sc->link_speed) { 2285 case IXGBE_LINK_SPEED_10GB_FULL: 2286 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2287 break; 2288 case IXGBE_LINK_SPEED_1GB_FULL: 2289 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2290 break; 2291 } 2292 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2293 switch (sc->link_speed) { 2294 case IXGBE_LINK_SPEED_10GB_FULL: 2295 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2296 break; 2297 } 2298 /* 2299 * XXX: These need to use the proper media types once 2300 * they're added. 2301 */ 2302 #ifndef IFM_ETH_XTYPE 2303 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2304 switch (sc->link_speed) { 2305 case IXGBE_LINK_SPEED_10GB_FULL: 2306 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2307 break; 2308 case IXGBE_LINK_SPEED_2_5GB_FULL: 2309 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2310 break; 2311 case IXGBE_LINK_SPEED_1GB_FULL: 2312 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2313 break; 2314 } 2315 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2316 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2317 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2318 switch (sc->link_speed) { 2319 case IXGBE_LINK_SPEED_10GB_FULL: 2320 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2321 break; 2322 case IXGBE_LINK_SPEED_2_5GB_FULL: 2323 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2324 break; 2325 case IXGBE_LINK_SPEED_1GB_FULL: 2326 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2327 break; 2328 } 2329 #else 2330 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2331 switch (sc->link_speed) { 2332 case IXGBE_LINK_SPEED_10GB_FULL: 2333 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2334 break; 2335 case IXGBE_LINK_SPEED_2_5GB_FULL: 2336 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2337 break; 2338 case IXGBE_LINK_SPEED_1GB_FULL: 2339 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2340 break; 2341 } 2342 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2343 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2344 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2345 switch (sc->link_speed) { 2346 case IXGBE_LINK_SPEED_10GB_FULL: 2347 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2348 break; 2349 case IXGBE_LINK_SPEED_2_5GB_FULL: 2350 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2351 break; 2352 case IXGBE_LINK_SPEED_1GB_FULL: 2353 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2354 break; 2355 } 2356 #endif 2357 2358 /* If nothing is recognized... */ 2359 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2360 ifmr->ifm_active |= IFM_UNKNOWN; 2361 2362 /* Display current flow control setting used on link */ 2363 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2364 hw->fc.current_mode == ixgbe_fc_full) 2365 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2366 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2367 hw->fc.current_mode == ixgbe_fc_full) 2368 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2369 } /* ixgbe_media_status */ 2370 2371 /************************************************************************ 2372 * ixgbe_media_change - Media Ioctl callback 2373 * 2374 * Called when the user changes speed/duplex using 2375 * media/mediopt option with ifconfig. 2376 ************************************************************************/ 2377 static int 2378 ixgbe_if_media_change(if_ctx_t ctx) 2379 { 2380 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2381 struct ifmedia *ifm = iflib_get_media(ctx); 2382 struct ixgbe_hw *hw = &sc->hw; 2383 ixgbe_link_speed speed = 0; 2384 2385 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2386 2387 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2388 return (EINVAL); 2389 2390 if (hw->phy.media_type == ixgbe_media_type_backplane) 2391 return (EPERM); 2392 2393 /* 2394 * We don't actually need to check against the supported 2395 * media types of the adapter; ifmedia will take care of 2396 * that for us. 2397 */ 2398 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2399 case IFM_AUTO: 2400 case IFM_10G_T: 2401 speed |= IXGBE_LINK_SPEED_100_FULL; 2402 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2403 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2404 break; 2405 case IFM_10G_LRM: 2406 case IFM_10G_LR: 2407 #ifndef IFM_ETH_XTYPE 2408 case IFM_10G_SR: /* KR, too */ 2409 case IFM_10G_CX4: /* KX4 */ 2410 #else 2411 case IFM_10G_KR: 2412 case IFM_10G_KX4: 2413 #endif 2414 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2415 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2416 break; 2417 #ifndef IFM_ETH_XTYPE 2418 case IFM_1000_CX: /* KX */ 2419 #else 2420 case IFM_1000_KX: 2421 #endif 2422 case IFM_1000_LX: 2423 case IFM_1000_SX: 2424 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2425 break; 2426 case IFM_1000_T: 2427 speed |= IXGBE_LINK_SPEED_100_FULL; 2428 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2429 break; 2430 case IFM_10G_TWINAX: 2431 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2432 break; 2433 case IFM_5000_T: 2434 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2435 break; 2436 case IFM_2500_T: 2437 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2438 break; 2439 case IFM_100_TX: 2440 speed |= IXGBE_LINK_SPEED_100_FULL; 2441 break; 2442 case IFM_10_T: 2443 speed |= IXGBE_LINK_SPEED_10_FULL; 2444 break; 2445 default: 2446 goto invalid; 2447 } 2448 2449 hw->mac.autotry_restart = true; 2450 hw->mac.ops.setup_link(hw, speed, true); 2451 sc->advertise = 2452 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 2453 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 2454 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 2455 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 2456 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 2457 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 2458 2459 return (0); 2460 2461 invalid: 2462 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2463 2464 return (EINVAL); 2465 } /* ixgbe_if_media_change */ 2466 2467 /************************************************************************ 2468 * ixgbe_set_promisc 2469 ************************************************************************/ 2470 static int 2471 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2472 { 2473 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2474 struct ifnet *ifp = iflib_get_ifp(ctx); 2475 u32 rctl; 2476 int mcnt = 0; 2477 2478 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 2479 rctl &= (~IXGBE_FCTRL_UPE); 2480 if (ifp->if_flags & IFF_ALLMULTI) 2481 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2482 else { 2483 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2484 } 2485 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2486 rctl &= (~IXGBE_FCTRL_MPE); 2487 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2488 2489 if (ifp->if_flags & IFF_PROMISC) { 2490 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2491 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2492 } else if (ifp->if_flags & IFF_ALLMULTI) { 2493 rctl |= IXGBE_FCTRL_MPE; 2494 rctl &= ~IXGBE_FCTRL_UPE; 2495 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); 2496 } 2497 return (0); 2498 } /* ixgbe_if_promisc_set */ 2499 2500 /************************************************************************ 2501 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2502 ************************************************************************/ 2503 static int 2504 ixgbe_msix_link(void *arg) 2505 { 2506 struct ixgbe_softc *sc = arg; 2507 struct ixgbe_hw *hw = &sc->hw; 2508 u32 eicr, eicr_mask; 2509 s32 retval; 2510 2511 ++sc->link_irq; 2512 2513 /* Pause other interrupts */ 2514 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2515 2516 /* First get the cause */ 2517 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2518 /* Be sure the queue bits are not cleared */ 2519 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2520 /* Clear interrupt with write */ 2521 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2522 2523 /* Link status change */ 2524 if (eicr & IXGBE_EICR_LSC) { 2525 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2526 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; 2527 } 2528 2529 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 2530 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 2531 (eicr & IXGBE_EICR_FLOW_DIR)) { 2532 /* This is probably overkill :) */ 2533 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) 2534 return (FILTER_HANDLED); 2535 /* Disable the interrupt */ 2536 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2537 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2538 } else 2539 if (eicr & IXGBE_EICR_ECC) { 2540 device_printf(iflib_get_dev(sc->ctx), 2541 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); 2542 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2543 } 2544 2545 /* Check for over temp condition */ 2546 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2547 switch (sc->hw.mac.type) { 2548 case ixgbe_mac_X550EM_a: 2549 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2550 break; 2551 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2552 IXGBE_EICR_GPI_SDP0_X550EM_a); 2553 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2554 IXGBE_EICR_GPI_SDP0_X550EM_a); 2555 retval = hw->phy.ops.check_overtemp(hw); 2556 if (retval != IXGBE_ERR_OVERTEMP) 2557 break; 2558 device_printf(iflib_get_dev(sc->ctx), 2559 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2560 device_printf(iflib_get_dev(sc->ctx), 2561 "System shutdown required!\n"); 2562 break; 2563 default: 2564 if (!(eicr & IXGBE_EICR_TS)) 2565 break; 2566 retval = hw->phy.ops.check_overtemp(hw); 2567 if (retval != IXGBE_ERR_OVERTEMP) 2568 break; 2569 device_printf(iflib_get_dev(sc->ctx), 2570 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2571 device_printf(iflib_get_dev(sc->ctx), 2572 "System shutdown required!\n"); 2573 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2574 break; 2575 } 2576 } 2577 2578 /* Check for VF message */ 2579 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 2580 (eicr & IXGBE_EICR_MAILBOX)) 2581 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; 2582 } 2583 2584 if (ixgbe_is_sfp(hw)) { 2585 /* Pluggable optics-related interrupt */ 2586 if (hw->mac.type >= ixgbe_mac_X540) 2587 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2588 else 2589 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2590 2591 if (eicr & eicr_mask) { 2592 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2593 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 2594 } 2595 2596 if ((hw->mac.type == ixgbe_mac_82599EB) && 2597 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2598 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2599 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2600 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 2601 } 2602 } 2603 2604 /* Check for fan failure */ 2605 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2606 ixgbe_check_fan_failure(sc, eicr, true); 2607 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2608 } 2609 2610 /* External PHY interrupt */ 2611 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2612 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2613 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2614 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 2615 } 2616 2617 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2618 } /* ixgbe_msix_link */ 2619 2620 /************************************************************************ 2621 * ixgbe_sysctl_interrupt_rate_handler 2622 ************************************************************************/ 2623 static int 2624 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2625 { 2626 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2627 int error; 2628 unsigned int reg, usec, rate; 2629 2630 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); 2631 usec = ((reg & 0x0FF8) >> 3); 2632 if (usec > 0) 2633 rate = 500000 / usec; 2634 else 2635 rate = 0; 2636 error = sysctl_handle_int(oidp, &rate, 0, req); 2637 if (error || !req->newptr) 2638 return error; 2639 reg &= ~0xfff; /* default, no limitation */ 2640 ixgbe_max_interrupt_rate = 0; 2641 if (rate > 0 && rate < 500000) { 2642 if (rate < 1000) 2643 rate = 1000; 2644 ixgbe_max_interrupt_rate = rate; 2645 reg |= ((4000000/rate) & 0xff8); 2646 } 2647 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); 2648 2649 return (0); 2650 } /* ixgbe_sysctl_interrupt_rate_handler */ 2651 2652 /************************************************************************ 2653 * ixgbe_add_device_sysctls 2654 ************************************************************************/ 2655 static void 2656 ixgbe_add_device_sysctls(if_ctx_t ctx) 2657 { 2658 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2659 device_t dev = iflib_get_dev(ctx); 2660 struct ixgbe_hw *hw = &sc->hw; 2661 struct sysctl_oid_list *child; 2662 struct sysctl_ctx_list *ctx_list; 2663 2664 ctx_list = device_get_sysctl_ctx(dev); 2665 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2666 2667 /* Sysctls for all devices */ 2668 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2669 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2670 sc, 0, ixgbe_sysctl_flowcntl, "I", 2671 IXGBE_SYSCTL_DESC_SET_FC); 2672 2673 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2674 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2675 sc, 0, ixgbe_sysctl_advertise, "I", 2676 IXGBE_SYSCTL_DESC_ADV_SPEED); 2677 2678 sc->enable_aim = ixgbe_enable_aim; 2679 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2680 &sc->enable_aim, 0, "Interrupt Moderation"); 2681 2682 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", 2683 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2684 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); 2685 2686 #ifdef IXGBE_DEBUG 2687 /* testing sysctls (for all devices) */ 2688 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2689 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2690 sc, 0, ixgbe_sysctl_power_state, 2691 "I", "PCI Power State"); 2692 2693 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2694 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2695 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2696 #endif 2697 /* for X550 series devices */ 2698 if (hw->mac.type >= ixgbe_mac_X550) 2699 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2700 CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2701 sc, 0, ixgbe_sysctl_dmac, 2702 "I", "DMA Coalesce"); 2703 2704 /* for WoL-capable devices */ 2705 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2706 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2707 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2708 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2709 2710 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2711 CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2712 sc, 0, ixgbe_sysctl_wufc, 2713 "I", "Enable/Disable Wake Up Filters"); 2714 } 2715 2716 /* for X552/X557-AT devices */ 2717 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2718 struct sysctl_oid *phy_node; 2719 struct sysctl_oid_list *phy_list; 2720 2721 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2722 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); 2723 phy_list = SYSCTL_CHILDREN(phy_node); 2724 2725 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2726 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2727 sc, 0, ixgbe_sysctl_phy_temp, 2728 "I", "Current External PHY Temperature (Celsius)"); 2729 2730 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2731 "overtemp_occurred", 2732 CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 2733 ixgbe_sysctl_phy_overtemp_occurred, "I", 2734 "External PHY High Temperature Event Occurred"); 2735 } 2736 2737 if (sc->feat_cap & IXGBE_FEATURE_EEE) { 2738 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2739 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2740 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2741 } 2742 } /* ixgbe_add_device_sysctls */ 2743 2744 /************************************************************************ 2745 * ixgbe_allocate_pci_resources 2746 ************************************************************************/ 2747 static int 2748 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2749 { 2750 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2751 device_t dev = iflib_get_dev(ctx); 2752 int rid; 2753 2754 rid = PCIR_BAR(0); 2755 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2756 RF_ACTIVE); 2757 2758 if (!(sc->pci_mem)) { 2759 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2760 return (ENXIO); 2761 } 2762 2763 /* Save bus_space values for READ/WRITE_REG macros */ 2764 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 2765 sc->osdep.mem_bus_space_handle = 2766 rman_get_bushandle(sc->pci_mem); 2767 /* Set hw values for shared code */ 2768 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 2769 2770 return (0); 2771 } /* ixgbe_allocate_pci_resources */ 2772 2773 /************************************************************************ 2774 * ixgbe_detach - Device removal routine 2775 * 2776 * Called when the driver is being removed. 2777 * Stops the adapter and deallocates all the resources 2778 * that were allocated for driver operation. 2779 * 2780 * return 0 on success, positive on failure 2781 ************************************************************************/ 2782 static int 2783 ixgbe_if_detach(if_ctx_t ctx) 2784 { 2785 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2786 device_t dev = iflib_get_dev(ctx); 2787 u32 ctrl_ext; 2788 2789 INIT_DEBUGOUT("ixgbe_detach: begin"); 2790 2791 if (ixgbe_pci_iov_detach(dev) != 0) { 2792 device_printf(dev, "SR-IOV in use; detach first.\n"); 2793 return (EBUSY); 2794 } 2795 2796 ixgbe_setup_low_power_mode(ctx); 2797 2798 /* let hardware know driver is unloading */ 2799 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 2800 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2801 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 2802 2803 ixgbe_free_pci_resources(ctx); 2804 free(sc->mta, M_IXGBE); 2805 2806 return (0); 2807 } /* ixgbe_if_detach */ 2808 2809 /************************************************************************ 2810 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2811 * 2812 * Prepare the adapter/port for LPLU and/or WoL 2813 ************************************************************************/ 2814 static int 2815 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2816 { 2817 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2818 struct ixgbe_hw *hw = &sc->hw; 2819 device_t dev = iflib_get_dev(ctx); 2820 s32 error = 0; 2821 2822 if (!hw->wol_enabled) 2823 ixgbe_set_phy_power(hw, false); 2824 2825 /* Limit power management flow to X550EM baseT */ 2826 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2827 hw->phy.ops.enter_lplu) { 2828 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2829 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2830 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2831 2832 /* 2833 * Clear Wake Up Status register to prevent any previous wakeup 2834 * events from waking us up immediately after we suspend. 2835 */ 2836 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2837 2838 /* 2839 * Program the Wakeup Filter Control register with user filter 2840 * settings 2841 */ 2842 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 2843 2844 /* Enable wakeups and power management in Wakeup Control */ 2845 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2846 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2847 2848 /* X550EM baseT adapters need a special LPLU flow */ 2849 hw->phy.reset_disable = true; 2850 ixgbe_if_stop(ctx); 2851 error = hw->phy.ops.enter_lplu(hw); 2852 if (error) 2853 device_printf(dev, "Error entering LPLU: %d\n", error); 2854 hw->phy.reset_disable = false; 2855 } else { 2856 /* Just stop for other adapters */ 2857 ixgbe_if_stop(ctx); 2858 } 2859 2860 return error; 2861 } /* ixgbe_setup_low_power_mode */ 2862 2863 /************************************************************************ 2864 * ixgbe_shutdown - Shutdown entry point 2865 ************************************************************************/ 2866 static int 2867 ixgbe_if_shutdown(if_ctx_t ctx) 2868 { 2869 int error = 0; 2870 2871 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2872 2873 error = ixgbe_setup_low_power_mode(ctx); 2874 2875 return (error); 2876 } /* ixgbe_if_shutdown */ 2877 2878 /************************************************************************ 2879 * ixgbe_suspend 2880 * 2881 * From D0 to D3 2882 ************************************************************************/ 2883 static int 2884 ixgbe_if_suspend(if_ctx_t ctx) 2885 { 2886 int error = 0; 2887 2888 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2889 2890 error = ixgbe_setup_low_power_mode(ctx); 2891 2892 return (error); 2893 } /* ixgbe_if_suspend */ 2894 2895 /************************************************************************ 2896 * ixgbe_resume 2897 * 2898 * From D3 to D0 2899 ************************************************************************/ 2900 static int 2901 ixgbe_if_resume(if_ctx_t ctx) 2902 { 2903 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2904 device_t dev = iflib_get_dev(ctx); 2905 struct ifnet *ifp = iflib_get_ifp(ctx); 2906 struct ixgbe_hw *hw = &sc->hw; 2907 u32 wus; 2908 2909 INIT_DEBUGOUT("ixgbe_resume: begin"); 2910 2911 /* Read & clear WUS register */ 2912 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2913 if (wus) 2914 device_printf(dev, "Woken up by (WUS): %#010x\n", 2915 IXGBE_READ_REG(hw, IXGBE_WUS)); 2916 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2917 /* And clear WUFC until next low-power transition */ 2918 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2919 2920 /* 2921 * Required after D3->D0 transition; 2922 * will re-advertise all previous advertised speeds 2923 */ 2924 if (ifp->if_flags & IFF_UP) 2925 ixgbe_if_init(ctx); 2926 2927 return (0); 2928 } /* ixgbe_if_resume */ 2929 2930 /************************************************************************ 2931 * ixgbe_if_mtu_set - Ioctl mtu entry point 2932 * 2933 * Return 0 on success, EINVAL on failure 2934 ************************************************************************/ 2935 static int 2936 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2937 { 2938 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2939 int error = 0; 2940 2941 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2942 2943 if (mtu > IXGBE_MAX_MTU) { 2944 error = EINVAL; 2945 } else { 2946 sc->max_frame_size = mtu + IXGBE_MTU_HDR; 2947 } 2948 2949 return error; 2950 } /* ixgbe_if_mtu_set */ 2951 2952 /************************************************************************ 2953 * ixgbe_if_crcstrip_set 2954 ************************************************************************/ 2955 static void 2956 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2957 { 2958 struct ixgbe_softc *sc = iflib_get_softc(ctx); 2959 struct ixgbe_hw *hw = &sc->hw; 2960 /* crc stripping is set in two places: 2961 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2962 * IXGBE_RDRXCTL (set by the original driver in 2963 * ixgbe_setup_hw_rsc() called in init_locked. 2964 * We disable the setting when netmap is compiled in). 2965 * We update the values here, but also in ixgbe.c because 2966 * init_locked sometimes is called outside our control. 2967 */ 2968 uint32_t hl, rxc; 2969 2970 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2971 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2972 #ifdef NETMAP 2973 if (netmap_verbose) 2974 D("%s read HLREG 0x%x rxc 0x%x", 2975 onoff ? "enter" : "exit", hl, rxc); 2976 #endif 2977 /* hw requirements ... */ 2978 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2979 rxc |= IXGBE_RDRXCTL_RSCACKC; 2980 if (onoff && !crcstrip) { 2981 /* keep the crc. Fast rx */ 2982 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 2983 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 2984 } else { 2985 /* reset default mode */ 2986 hl |= IXGBE_HLREG0_RXCRCSTRP; 2987 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 2988 } 2989 #ifdef NETMAP 2990 if (netmap_verbose) 2991 D("%s write HLREG 0x%x rxc 0x%x", 2992 onoff ? "enter" : "exit", hl, rxc); 2993 #endif 2994 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 2995 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 2996 } /* ixgbe_if_crcstrip_set */ 2997 2998 /********************************************************************* 2999 * ixgbe_if_init - Init entry point 3000 * 3001 * Used in two ways: It is used by the stack as an init 3002 * entry point in network interface structure. It is also 3003 * used by the driver as a hw/sw initialization routine to 3004 * get to a consistent state. 3005 * 3006 * Return 0 on success, positive on failure 3007 **********************************************************************/ 3008 void 3009 ixgbe_if_init(if_ctx_t ctx) 3010 { 3011 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3012 struct ifnet *ifp = iflib_get_ifp(ctx); 3013 device_t dev = iflib_get_dev(ctx); 3014 struct ixgbe_hw *hw = &sc->hw; 3015 struct ix_rx_queue *rx_que; 3016 struct ix_tx_queue *tx_que; 3017 u32 txdctl, mhadd; 3018 u32 rxdctl, rxctrl; 3019 u32 ctrl_ext; 3020 3021 int i, j, err; 3022 3023 INIT_DEBUGOUT("ixgbe_if_init: begin"); 3024 3025 /* Queue indices may change with IOV mode */ 3026 ixgbe_align_all_queue_indices(sc); 3027 3028 /* reprogram the RAR[0] in case user changed it. */ 3029 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 3030 3031 /* Get the latest mac address, User can use a LAA */ 3032 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3033 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 3034 hw->addr_ctrl.rar_used_count = 1; 3035 3036 ixgbe_init_hw(hw); 3037 3038 ixgbe_initialize_iov(sc); 3039 3040 ixgbe_initialize_transmit_units(ctx); 3041 3042 /* Setup Multicast table */ 3043 ixgbe_if_multi_set(ctx); 3044 3045 /* Determine the correct mbuf pool, based on frame size */ 3046 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 3047 3048 /* Configure RX settings */ 3049 ixgbe_initialize_receive_units(ctx); 3050 3051 /* 3052 * Initialize variable holding task enqueue requests 3053 * from MSI-X interrupts 3054 */ 3055 sc->task_requests = 0; 3056 3057 /* Enable SDP & MSI-X interrupts based on adapter */ 3058 ixgbe_config_gpie(sc); 3059 3060 /* Set MTU size */ 3061 if (ifp->if_mtu > ETHERMTU) { 3062 /* aka IXGBE_MAXFRS on 82599 and newer */ 3063 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3064 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3065 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3066 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3067 } 3068 3069 /* Now enable all the queues */ 3070 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { 3071 struct tx_ring *txr = &tx_que->txr; 3072 3073 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3074 txdctl |= IXGBE_TXDCTL_ENABLE; 3075 /* Set WTHRESH to 8, burst writeback */ 3076 txdctl |= (8 << 16); 3077 /* 3078 * When the internal queue falls below PTHRESH (32), 3079 * start prefetching as long as there are at least 3080 * HTHRESH (1) buffers ready. The values are taken 3081 * from the Intel linux driver 3.8.21. 3082 * Prefetching enables tx line rate even with 1 queue. 3083 */ 3084 txdctl |= (32 << 0) | (1 << 8); 3085 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 3086 } 3087 3088 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { 3089 struct rx_ring *rxr = &rx_que->rxr; 3090 3091 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 3092 if (hw->mac.type == ixgbe_mac_82598EB) { 3093 /* 3094 * PTHRESH = 21 3095 * HTHRESH = 4 3096 * WTHRESH = 8 3097 */ 3098 rxdctl &= ~0x3FFFFF; 3099 rxdctl |= 0x080420; 3100 } 3101 rxdctl |= IXGBE_RXDCTL_ENABLE; 3102 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 3103 for (j = 0; j < 10; j++) { 3104 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 3105 IXGBE_RXDCTL_ENABLE) 3106 break; 3107 else 3108 msec_delay(1); 3109 } 3110 wmb(); 3111 } 3112 3113 /* Enable Receive engine */ 3114 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3115 if (hw->mac.type == ixgbe_mac_82598EB) 3116 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3117 rxctrl |= IXGBE_RXCTRL_RXEN; 3118 ixgbe_enable_rx_dma(hw, rxctrl); 3119 3120 /* Set up MSI/MSI-X routing */ 3121 if (ixgbe_enable_msix) { 3122 ixgbe_configure_ivars(sc); 3123 /* Set up auto-mask */ 3124 if (hw->mac.type == ixgbe_mac_82598EB) 3125 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3126 else { 3127 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3128 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3129 } 3130 } else { /* Simple settings for Legacy/MSI */ 3131 ixgbe_set_ivar(sc, 0, 0, 0); 3132 ixgbe_set_ivar(sc, 0, 0, 1); 3133 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3134 } 3135 3136 ixgbe_init_fdir(sc); 3137 3138 /* 3139 * Check on any SFP devices that 3140 * need to be kick-started 3141 */ 3142 if (hw->phy.type == ixgbe_phy_none) { 3143 err = hw->phy.ops.identify(hw); 3144 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3145 device_printf(dev, 3146 "Unsupported SFP+ module type was detected.\n"); 3147 return; 3148 } 3149 } 3150 3151 /* Set moderation on the Link interrupt */ 3152 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); 3153 3154 /* Enable power to the phy. */ 3155 ixgbe_set_phy_power(hw, true); 3156 3157 /* Config/Enable Link */ 3158 ixgbe_config_link(ctx); 3159 3160 /* Hardware Packet Buffer & Flow Control setup */ 3161 ixgbe_config_delay_values(sc); 3162 3163 /* Initialize the FC settings */ 3164 ixgbe_start_hw(hw); 3165 3166 /* Set up VLAN support and filter */ 3167 ixgbe_setup_vlan_hw_support(ctx); 3168 3169 /* Setup DMA Coalescing */ 3170 ixgbe_config_dmac(sc); 3171 3172 /* And now turn on interrupts */ 3173 ixgbe_if_enable_intr(ctx); 3174 3175 /* Enable the use of the MBX by the VF's */ 3176 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 3177 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3178 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3179 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3180 } 3181 3182 } /* ixgbe_init_locked */ 3183 3184 /************************************************************************ 3185 * ixgbe_set_ivar 3186 * 3187 * Setup the correct IVAR register for a particular MSI-X interrupt 3188 * (yes this is all very magic and confusing :) 3189 * - entry is the register array entry 3190 * - vector is the MSI-X vector for this queue 3191 * - type is RX/TX/MISC 3192 ************************************************************************/ 3193 static void 3194 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 3195 { 3196 struct ixgbe_hw *hw = &sc->hw; 3197 u32 ivar, index; 3198 3199 vector |= IXGBE_IVAR_ALLOC_VAL; 3200 3201 switch (hw->mac.type) { 3202 case ixgbe_mac_82598EB: 3203 if (type == -1) 3204 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3205 else 3206 entry += (type * 64); 3207 index = (entry >> 2) & 0x1F; 3208 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3209 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3210 ivar |= (vector << (8 * (entry & 0x3))); 3211 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 3212 break; 3213 case ixgbe_mac_82599EB: 3214 case ixgbe_mac_X540: 3215 case ixgbe_mac_X550: 3216 case ixgbe_mac_X550EM_x: 3217 case ixgbe_mac_X550EM_a: 3218 if (type == -1) { /* MISC IVAR */ 3219 index = (entry & 1) * 8; 3220 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3221 ivar &= ~(0xFF << index); 3222 ivar |= (vector << index); 3223 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3224 } else { /* RX/TX IVARS */ 3225 index = (16 * (entry & 1)) + (8 * type); 3226 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3227 ivar &= ~(0xFF << index); 3228 ivar |= (vector << index); 3229 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3230 } 3231 default: 3232 break; 3233 } 3234 } /* ixgbe_set_ivar */ 3235 3236 /************************************************************************ 3237 * ixgbe_configure_ivars 3238 ************************************************************************/ 3239 static void 3240 ixgbe_configure_ivars(struct ixgbe_softc *sc) 3241 { 3242 struct ix_rx_queue *rx_que = sc->rx_queues; 3243 struct ix_tx_queue *tx_que = sc->tx_queues; 3244 u32 newitr; 3245 3246 if (ixgbe_max_interrupt_rate > 0) 3247 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3248 else { 3249 /* 3250 * Disable DMA coalescing if interrupt moderation is 3251 * disabled. 3252 */ 3253 sc->dmac = 0; 3254 newitr = 0; 3255 } 3256 3257 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 3258 struct rx_ring *rxr = &rx_que->rxr; 3259 3260 /* First the RX queue entry */ 3261 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); 3262 3263 /* Set an Initial EITR value */ 3264 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); 3265 } 3266 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 3267 struct tx_ring *txr = &tx_que->txr; 3268 3269 /* ... and the TX */ 3270 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); 3271 } 3272 /* For the Link interrupt */ 3273 ixgbe_set_ivar(sc, 1, sc->vector, -1); 3274 } /* ixgbe_configure_ivars */ 3275 3276 /************************************************************************ 3277 * ixgbe_config_gpie 3278 ************************************************************************/ 3279 static void 3280 ixgbe_config_gpie(struct ixgbe_softc *sc) 3281 { 3282 struct ixgbe_hw *hw = &sc->hw; 3283 u32 gpie; 3284 3285 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3286 3287 if (sc->intr_type == IFLIB_INTR_MSIX) { 3288 /* Enable Enhanced MSI-X mode */ 3289 gpie |= IXGBE_GPIE_MSIX_MODE 3290 | IXGBE_GPIE_EIAME 3291 | IXGBE_GPIE_PBA_SUPPORT 3292 | IXGBE_GPIE_OCD; 3293 } 3294 3295 /* Fan Failure Interrupt */ 3296 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3297 gpie |= IXGBE_SDP1_GPIEN; 3298 3299 /* Thermal Sensor Interrupt */ 3300 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3301 gpie |= IXGBE_SDP0_GPIEN_X540; 3302 3303 /* Link detection */ 3304 switch (hw->mac.type) { 3305 case ixgbe_mac_82599EB: 3306 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3307 break; 3308 case ixgbe_mac_X550EM_x: 3309 case ixgbe_mac_X550EM_a: 3310 gpie |= IXGBE_SDP0_GPIEN_X540; 3311 break; 3312 default: 3313 break; 3314 } 3315 3316 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3317 3318 } /* ixgbe_config_gpie */ 3319 3320 /************************************************************************ 3321 * ixgbe_config_delay_values 3322 * 3323 * Requires sc->max_frame_size to be set. 3324 ************************************************************************/ 3325 static void 3326 ixgbe_config_delay_values(struct ixgbe_softc *sc) 3327 { 3328 struct ixgbe_hw *hw = &sc->hw; 3329 u32 rxpb, frame, size, tmp; 3330 3331 frame = sc->max_frame_size; 3332 3333 /* Calculate High Water */ 3334 switch (hw->mac.type) { 3335 case ixgbe_mac_X540: 3336 case ixgbe_mac_X550: 3337 case ixgbe_mac_X550EM_x: 3338 case ixgbe_mac_X550EM_a: 3339 tmp = IXGBE_DV_X540(frame, frame); 3340 break; 3341 default: 3342 tmp = IXGBE_DV(frame, frame); 3343 break; 3344 } 3345 size = IXGBE_BT2KB(tmp); 3346 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3347 hw->fc.high_water[0] = rxpb - size; 3348 3349 /* Now calculate Low Water */ 3350 switch (hw->mac.type) { 3351 case ixgbe_mac_X540: 3352 case ixgbe_mac_X550: 3353 case ixgbe_mac_X550EM_x: 3354 case ixgbe_mac_X550EM_a: 3355 tmp = IXGBE_LOW_DV_X540(frame); 3356 break; 3357 default: 3358 tmp = IXGBE_LOW_DV(frame); 3359 break; 3360 } 3361 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3362 3363 hw->fc.pause_time = IXGBE_FC_PAUSE; 3364 hw->fc.send_xon = true; 3365 } /* ixgbe_config_delay_values */ 3366 3367 /************************************************************************ 3368 * ixgbe_set_multi - Multicast Update 3369 * 3370 * Called whenever multicast address list is updated. 3371 ************************************************************************/ 3372 static u_int 3373 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) 3374 { 3375 struct ixgbe_softc *sc = arg; 3376 struct ixgbe_mc_addr *mta = sc->mta; 3377 3378 if (idx == MAX_NUM_MULTICAST_ADDRESSES) 3379 return (0); 3380 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3381 mta[idx].vmdq = sc->pool; 3382 3383 return (1); 3384 } /* ixgbe_mc_filter_apply */ 3385 3386 static void 3387 ixgbe_if_multi_set(if_ctx_t ctx) 3388 { 3389 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3390 struct ixgbe_mc_addr *mta; 3391 struct ifnet *ifp = iflib_get_ifp(ctx); 3392 u8 *update_ptr; 3393 u32 fctrl; 3394 u_int mcnt; 3395 3396 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3397 3398 mta = sc->mta; 3399 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3400 3401 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc); 3402 3403 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3404 update_ptr = (u8 *)mta; 3405 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 3406 ixgbe_mc_array_itr, true); 3407 } 3408 3409 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 3410 3411 if (ifp->if_flags & IFF_PROMISC) 3412 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3413 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3414 ifp->if_flags & IFF_ALLMULTI) { 3415 fctrl |= IXGBE_FCTRL_MPE; 3416 fctrl &= ~IXGBE_FCTRL_UPE; 3417 } else 3418 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3419 3420 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 3421 } /* ixgbe_if_multi_set */ 3422 3423 /************************************************************************ 3424 * ixgbe_mc_array_itr 3425 * 3426 * An iterator function needed by the multicast shared code. 3427 * It feeds the shared code routine the addresses in the 3428 * array of ixgbe_set_multi() one by one. 3429 ************************************************************************/ 3430 static u8 * 3431 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3432 { 3433 struct ixgbe_mc_addr *mta; 3434 3435 mta = (struct ixgbe_mc_addr *)*update_ptr; 3436 *vmdq = mta->vmdq; 3437 3438 *update_ptr = (u8*)(mta + 1); 3439 3440 return (mta->addr); 3441 } /* ixgbe_mc_array_itr */ 3442 3443 /************************************************************************ 3444 * ixgbe_local_timer - Timer routine 3445 * 3446 * Checks for link status, updates statistics, 3447 * and runs the watchdog check. 3448 ************************************************************************/ 3449 static void 3450 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3451 { 3452 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3453 3454 if (qid != 0) 3455 return; 3456 3457 /* Check for pluggable optics */ 3458 if (sc->sfp_probe) 3459 if (!ixgbe_sfp_probe(ctx)) 3460 return; /* Nothing to do */ 3461 3462 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); 3463 3464 /* Fire off the adminq task */ 3465 iflib_admin_intr_deferred(ctx); 3466 3467 } /* ixgbe_if_timer */ 3468 3469 /************************************************************************ 3470 * ixgbe_sfp_probe 3471 * 3472 * Determine if a port had optics inserted. 3473 ************************************************************************/ 3474 static bool 3475 ixgbe_sfp_probe(if_ctx_t ctx) 3476 { 3477 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3478 struct ixgbe_hw *hw = &sc->hw; 3479 device_t dev = iflib_get_dev(ctx); 3480 bool result = false; 3481 3482 if ((hw->phy.type == ixgbe_phy_nl) && 3483 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3484 s32 ret = hw->phy.ops.identify_sfp(hw); 3485 if (ret) 3486 goto out; 3487 ret = hw->phy.ops.reset(hw); 3488 sc->sfp_probe = false; 3489 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3490 device_printf(dev, "Unsupported SFP+ module detected!"); 3491 device_printf(dev, 3492 "Reload driver with supported module.\n"); 3493 goto out; 3494 } else 3495 device_printf(dev, "SFP+ module detected!\n"); 3496 /* We now have supported optics */ 3497 result = true; 3498 } 3499 out: 3500 3501 return (result); 3502 } /* ixgbe_sfp_probe */ 3503 3504 /************************************************************************ 3505 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3506 ************************************************************************/ 3507 static void 3508 ixgbe_handle_mod(void *context) 3509 { 3510 if_ctx_t ctx = context; 3511 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3512 struct ixgbe_hw *hw = &sc->hw; 3513 device_t dev = iflib_get_dev(ctx); 3514 u32 err, cage_full = 0; 3515 3516 if (sc->hw.need_crosstalk_fix) { 3517 switch (hw->mac.type) { 3518 case ixgbe_mac_82599EB: 3519 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3520 IXGBE_ESDP_SDP2; 3521 break; 3522 case ixgbe_mac_X550EM_x: 3523 case ixgbe_mac_X550EM_a: 3524 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3525 IXGBE_ESDP_SDP0; 3526 break; 3527 default: 3528 break; 3529 } 3530 3531 if (!cage_full) 3532 goto handle_mod_out; 3533 } 3534 3535 err = hw->phy.ops.identify_sfp(hw); 3536 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3537 device_printf(dev, 3538 "Unsupported SFP+ module type was detected.\n"); 3539 goto handle_mod_out; 3540 } 3541 3542 if (hw->mac.type == ixgbe_mac_82598EB) 3543 err = hw->phy.ops.reset(hw); 3544 else 3545 err = hw->mac.ops.setup_sfp(hw); 3546 3547 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3548 device_printf(dev, 3549 "Setup failure - unsupported SFP+ module type.\n"); 3550 goto handle_mod_out; 3551 } 3552 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3553 return; 3554 3555 handle_mod_out: 3556 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3557 } /* ixgbe_handle_mod */ 3558 3559 3560 /************************************************************************ 3561 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3562 ************************************************************************/ 3563 static void 3564 ixgbe_handle_msf(void *context) 3565 { 3566 if_ctx_t ctx = context; 3567 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3568 struct ixgbe_hw *hw = &sc->hw; 3569 u32 autoneg; 3570 bool negotiate; 3571 3572 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3573 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 3574 3575 autoneg = hw->phy.autoneg_advertised; 3576 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3577 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3578 if (hw->mac.ops.setup_link) 3579 hw->mac.ops.setup_link(hw, autoneg, true); 3580 3581 /* Adjust media types shown in ifconfig */ 3582 ifmedia_removeall(sc->media); 3583 ixgbe_add_media_types(sc->ctx); 3584 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 3585 } /* ixgbe_handle_msf */ 3586 3587 /************************************************************************ 3588 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3589 ************************************************************************/ 3590 static void 3591 ixgbe_handle_phy(void *context) 3592 { 3593 if_ctx_t ctx = context; 3594 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3595 struct ixgbe_hw *hw = &sc->hw; 3596 int error; 3597 3598 error = hw->phy.ops.handle_lasi(hw); 3599 if (error == IXGBE_ERR_OVERTEMP) 3600 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3601 else if (error) 3602 device_printf(sc->dev, 3603 "Error handling LASI interrupt: %d\n", error); 3604 } /* ixgbe_handle_phy */ 3605 3606 /************************************************************************ 3607 * ixgbe_if_stop - Stop the hardware 3608 * 3609 * Disables all traffic on the adapter by issuing a 3610 * global reset on the MAC and deallocates TX/RX buffers. 3611 ************************************************************************/ 3612 static void 3613 ixgbe_if_stop(if_ctx_t ctx) 3614 { 3615 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3616 struct ixgbe_hw *hw = &sc->hw; 3617 3618 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3619 3620 ixgbe_reset_hw(hw); 3621 hw->adapter_stopped = false; 3622 ixgbe_stop_adapter(hw); 3623 if (hw->mac.type == ixgbe_mac_82599EB) 3624 ixgbe_stop_mac_link_on_d3_82599(hw); 3625 /* Turn off the laser - noop with no optics */ 3626 ixgbe_disable_tx_laser(hw); 3627 3628 /* Update the stack */ 3629 sc->link_up = false; 3630 ixgbe_if_update_admin_status(ctx); 3631 3632 /* reprogram the RAR[0] in case user changed it. */ 3633 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 3634 3635 return; 3636 } /* ixgbe_if_stop */ 3637 3638 /************************************************************************ 3639 * ixgbe_update_link_status - Update OS on link state 3640 * 3641 * Note: Only updates the OS on the cached link state. 3642 * The real check of the hardware only happens with 3643 * a link interrupt. 3644 ************************************************************************/ 3645 static void 3646 ixgbe_if_update_admin_status(if_ctx_t ctx) 3647 { 3648 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3649 device_t dev = iflib_get_dev(ctx); 3650 3651 if (sc->link_up) { 3652 if (sc->link_active == false) { 3653 if (bootverbose) 3654 device_printf(dev, "Link is up %d Gbps %s \n", 3655 ((sc->link_speed == 128) ? 10 : 1), 3656 "Full Duplex"); 3657 sc->link_active = true; 3658 /* Update any Flow Control changes */ 3659 ixgbe_fc_enable(&sc->hw); 3660 /* Update DMA coalescing config */ 3661 ixgbe_config_dmac(sc); 3662 /* should actually be negotiated value */ 3663 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3664 3665 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3666 ixgbe_ping_all_vfs(sc); 3667 } 3668 } else { /* Link down */ 3669 if (sc->link_active == true) { 3670 if (bootverbose) 3671 device_printf(dev, "Link is Down\n"); 3672 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3673 sc->link_active = false; 3674 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3675 ixgbe_ping_all_vfs(sc); 3676 } 3677 } 3678 3679 /* Handle task requests from msix_link() */ 3680 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) 3681 ixgbe_handle_mod(ctx); 3682 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) 3683 ixgbe_handle_msf(ctx); 3684 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) 3685 ixgbe_handle_mbx(ctx); 3686 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) 3687 ixgbe_reinit_fdir(ctx); 3688 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) 3689 ixgbe_handle_phy(ctx); 3690 sc->task_requests = 0; 3691 3692 ixgbe_update_stats_counters(sc); 3693 } /* ixgbe_if_update_admin_status */ 3694 3695 /************************************************************************ 3696 * ixgbe_config_dmac - Configure DMA Coalescing 3697 ************************************************************************/ 3698 static void 3699 ixgbe_config_dmac(struct ixgbe_softc *sc) 3700 { 3701 struct ixgbe_hw *hw = &sc->hw; 3702 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3703 3704 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3705 return; 3706 3707 if (dcfg->watchdog_timer ^ sc->dmac || 3708 dcfg->link_speed ^ sc->link_speed) { 3709 dcfg->watchdog_timer = sc->dmac; 3710 dcfg->fcoe_en = false; 3711 dcfg->link_speed = sc->link_speed; 3712 dcfg->num_tcs = 1; 3713 3714 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3715 dcfg->watchdog_timer, dcfg->link_speed); 3716 3717 hw->mac.ops.dmac_config(hw); 3718 } 3719 } /* ixgbe_config_dmac */ 3720 3721 /************************************************************************ 3722 * ixgbe_if_enable_intr 3723 ************************************************************************/ 3724 void 3725 ixgbe_if_enable_intr(if_ctx_t ctx) 3726 { 3727 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3728 struct ixgbe_hw *hw = &sc->hw; 3729 struct ix_rx_queue *que = sc->rx_queues; 3730 u32 mask, fwsm; 3731 3732 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3733 3734 switch (sc->hw.mac.type) { 3735 case ixgbe_mac_82599EB: 3736 mask |= IXGBE_EIMS_ECC; 3737 /* Temperature sensor on some scs */ 3738 mask |= IXGBE_EIMS_GPI_SDP0; 3739 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3740 mask |= IXGBE_EIMS_GPI_SDP1; 3741 mask |= IXGBE_EIMS_GPI_SDP2; 3742 break; 3743 case ixgbe_mac_X540: 3744 /* Detect if Thermal Sensor is enabled */ 3745 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3746 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3747 mask |= IXGBE_EIMS_TS; 3748 mask |= IXGBE_EIMS_ECC; 3749 break; 3750 case ixgbe_mac_X550: 3751 /* MAC thermal sensor is automatically enabled */ 3752 mask |= IXGBE_EIMS_TS; 3753 mask |= IXGBE_EIMS_ECC; 3754 break; 3755 case ixgbe_mac_X550EM_x: 3756 case ixgbe_mac_X550EM_a: 3757 /* Some devices use SDP0 for important information */ 3758 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3759 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3760 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3761 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3762 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3763 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3764 mask |= IXGBE_EICR_GPI_SDP0_X540; 3765 mask |= IXGBE_EIMS_ECC; 3766 break; 3767 default: 3768 break; 3769 } 3770 3771 /* Enable Fan Failure detection */ 3772 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3773 mask |= IXGBE_EIMS_GPI_SDP1; 3774 /* Enable SR-IOV */ 3775 if (sc->feat_en & IXGBE_FEATURE_SRIOV) 3776 mask |= IXGBE_EIMS_MAILBOX; 3777 /* Enable Flow Director */ 3778 if (sc->feat_en & IXGBE_FEATURE_FDIR) 3779 mask |= IXGBE_EIMS_FLOW_DIR; 3780 3781 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3782 3783 /* With MSI-X we use auto clear */ 3784 if (sc->intr_type == IFLIB_INTR_MSIX) { 3785 mask = IXGBE_EIMS_ENABLE_MASK; 3786 /* Don't autoclear Link */ 3787 mask &= ~IXGBE_EIMS_OTHER; 3788 mask &= ~IXGBE_EIMS_LSC; 3789 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 3790 mask &= ~IXGBE_EIMS_MAILBOX; 3791 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3792 } 3793 3794 /* 3795 * Now enable all queues, this is done separately to 3796 * allow for handling the extended (beyond 32) MSI-X 3797 * vectors that can be used by 82599 3798 */ 3799 for (int i = 0; i < sc->num_rx_queues; i++, que++) 3800 ixgbe_enable_queue(sc, que->msix); 3801 3802 IXGBE_WRITE_FLUSH(hw); 3803 3804 } /* ixgbe_if_enable_intr */ 3805 3806 /************************************************************************ 3807 * ixgbe_disable_intr 3808 ************************************************************************/ 3809 static void 3810 ixgbe_if_disable_intr(if_ctx_t ctx) 3811 { 3812 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3813 3814 if (sc->intr_type == IFLIB_INTR_MSIX) 3815 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 3816 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 3817 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); 3818 } else { 3819 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); 3820 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); 3821 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); 3822 } 3823 IXGBE_WRITE_FLUSH(&sc->hw); 3824 3825 } /* ixgbe_if_disable_intr */ 3826 3827 /************************************************************************ 3828 * ixgbe_link_intr_enable 3829 ************************************************************************/ 3830 static void 3831 ixgbe_link_intr_enable(if_ctx_t ctx) 3832 { 3833 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; 3834 3835 /* Re-enable other interrupts */ 3836 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3837 } /* ixgbe_link_intr_enable */ 3838 3839 /************************************************************************ 3840 * ixgbe_if_rx_queue_intr_enable 3841 ************************************************************************/ 3842 static int 3843 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3844 { 3845 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3846 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 3847 3848 ixgbe_enable_queue(sc, que->msix); 3849 3850 return (0); 3851 } /* ixgbe_if_rx_queue_intr_enable */ 3852 3853 /************************************************************************ 3854 * ixgbe_enable_queue 3855 ************************************************************************/ 3856 static void 3857 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 3858 { 3859 struct ixgbe_hw *hw = &sc->hw; 3860 u64 queue = 1ULL << vector; 3861 u32 mask; 3862 3863 if (hw->mac.type == ixgbe_mac_82598EB) { 3864 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3865 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3866 } else { 3867 mask = (queue & 0xFFFFFFFF); 3868 if (mask) 3869 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3870 mask = (queue >> 32); 3871 if (mask) 3872 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3873 } 3874 } /* ixgbe_enable_queue */ 3875 3876 /************************************************************************ 3877 * ixgbe_disable_queue 3878 ************************************************************************/ 3879 static void 3880 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 3881 { 3882 struct ixgbe_hw *hw = &sc->hw; 3883 u64 queue = 1ULL << vector; 3884 u32 mask; 3885 3886 if (hw->mac.type == ixgbe_mac_82598EB) { 3887 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3888 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3889 } else { 3890 mask = (queue & 0xFFFFFFFF); 3891 if (mask) 3892 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3893 mask = (queue >> 32); 3894 if (mask) 3895 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3896 } 3897 } /* ixgbe_disable_queue */ 3898 3899 /************************************************************************ 3900 * ixgbe_intr - Legacy Interrupt Service Routine 3901 ************************************************************************/ 3902 int 3903 ixgbe_intr(void *arg) 3904 { 3905 struct ixgbe_softc *sc = arg; 3906 struct ix_rx_queue *que = sc->rx_queues; 3907 struct ixgbe_hw *hw = &sc->hw; 3908 if_ctx_t ctx = sc->ctx; 3909 u32 eicr, eicr_mask; 3910 3911 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3912 3913 ++que->irqs; 3914 if (eicr == 0) { 3915 ixgbe_if_enable_intr(ctx); 3916 return (FILTER_HANDLED); 3917 } 3918 3919 /* Check for fan failure */ 3920 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3921 (eicr & IXGBE_EICR_GPI_SDP1)) { 3922 device_printf(sc->dev, 3923 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3924 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3925 } 3926 3927 /* Link status change */ 3928 if (eicr & IXGBE_EICR_LSC) { 3929 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3930 iflib_admin_intr_deferred(ctx); 3931 } 3932 3933 if (ixgbe_is_sfp(hw)) { 3934 /* Pluggable optics-related interrupt */ 3935 if (hw->mac.type >= ixgbe_mac_X540) 3936 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3937 else 3938 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3939 3940 if (eicr & eicr_mask) { 3941 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3942 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; 3943 } 3944 3945 if ((hw->mac.type == ixgbe_mac_82599EB) && 3946 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3947 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3948 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3949 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 3950 } 3951 } 3952 3953 /* External PHY interrupt */ 3954 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3955 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3956 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; 3957 3958 return (FILTER_SCHEDULE_THREAD); 3959 } /* ixgbe_intr */ 3960 3961 /************************************************************************ 3962 * ixgbe_free_pci_resources 3963 ************************************************************************/ 3964 static void 3965 ixgbe_free_pci_resources(if_ctx_t ctx) 3966 { 3967 struct ixgbe_softc *sc = iflib_get_softc(ctx); 3968 struct ix_rx_queue *que = sc->rx_queues; 3969 device_t dev = iflib_get_dev(ctx); 3970 3971 /* Release all MSI-X queue resources */ 3972 if (sc->intr_type == IFLIB_INTR_MSIX) 3973 iflib_irq_free(ctx, &sc->irq); 3974 3975 if (que != NULL) { 3976 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 3977 iflib_irq_free(ctx, &que->que_irq); 3978 } 3979 } 3980 3981 if (sc->pci_mem != NULL) 3982 bus_release_resource(dev, SYS_RES_MEMORY, 3983 rman_get_rid(sc->pci_mem), sc->pci_mem); 3984 } /* ixgbe_free_pci_resources */ 3985 3986 /************************************************************************ 3987 * ixgbe_sysctl_flowcntl 3988 * 3989 * SYSCTL wrapper around setting Flow Control 3990 ************************************************************************/ 3991 static int 3992 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 3993 { 3994 struct ixgbe_softc *sc; 3995 int error, fc; 3996 3997 sc = (struct ixgbe_softc *)arg1; 3998 fc = sc->hw.fc.current_mode; 3999 4000 error = sysctl_handle_int(oidp, &fc, 0, req); 4001 if ((error) || (req->newptr == NULL)) 4002 return (error); 4003 4004 /* Don't bother if it's not changed */ 4005 if (fc == sc->hw.fc.current_mode) 4006 return (0); 4007 4008 return ixgbe_set_flowcntl(sc, fc); 4009 } /* ixgbe_sysctl_flowcntl */ 4010 4011 /************************************************************************ 4012 * ixgbe_set_flowcntl - Set flow control 4013 * 4014 * Flow control values: 4015 * 0 - off 4016 * 1 - rx pause 4017 * 2 - tx pause 4018 * 3 - full 4019 ************************************************************************/ 4020 static int 4021 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 4022 { 4023 switch (fc) { 4024 case ixgbe_fc_rx_pause: 4025 case ixgbe_fc_tx_pause: 4026 case ixgbe_fc_full: 4027 sc->hw.fc.requested_mode = fc; 4028 if (sc->num_rx_queues > 1) 4029 ixgbe_disable_rx_drop(sc); 4030 break; 4031 case ixgbe_fc_none: 4032 sc->hw.fc.requested_mode = ixgbe_fc_none; 4033 if (sc->num_rx_queues > 1) 4034 ixgbe_enable_rx_drop(sc); 4035 break; 4036 default: 4037 return (EINVAL); 4038 } 4039 4040 /* Don't autoneg if forcing a value */ 4041 sc->hw.fc.disable_fc_autoneg = true; 4042 ixgbe_fc_enable(&sc->hw); 4043 4044 return (0); 4045 } /* ixgbe_set_flowcntl */ 4046 4047 /************************************************************************ 4048 * ixgbe_enable_rx_drop 4049 * 4050 * Enable the hardware to drop packets when the buffer is 4051 * full. This is useful with multiqueue, so that no single 4052 * queue being full stalls the entire RX engine. We only 4053 * enable this when Multiqueue is enabled AND Flow Control 4054 * is disabled. 4055 ************************************************************************/ 4056 static void 4057 ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 4058 { 4059 struct ixgbe_hw *hw = &sc->hw; 4060 struct rx_ring *rxr; 4061 u32 srrctl; 4062 4063 for (int i = 0; i < sc->num_rx_queues; i++) { 4064 rxr = &sc->rx_queues[i].rxr; 4065 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4066 srrctl |= IXGBE_SRRCTL_DROP_EN; 4067 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4068 } 4069 4070 /* enable drop for each vf */ 4071 for (int i = 0; i < sc->num_vfs; i++) { 4072 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4073 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4074 IXGBE_QDE_ENABLE)); 4075 } 4076 } /* ixgbe_enable_rx_drop */ 4077 4078 /************************************************************************ 4079 * ixgbe_disable_rx_drop 4080 ************************************************************************/ 4081 static void 4082 ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 4083 { 4084 struct ixgbe_hw *hw = &sc->hw; 4085 struct rx_ring *rxr; 4086 u32 srrctl; 4087 4088 for (int i = 0; i < sc->num_rx_queues; i++) { 4089 rxr = &sc->rx_queues[i].rxr; 4090 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4091 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4092 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4093 } 4094 4095 /* disable drop for each vf */ 4096 for (int i = 0; i < sc->num_vfs; i++) { 4097 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4098 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4099 } 4100 } /* ixgbe_disable_rx_drop */ 4101 4102 /************************************************************************ 4103 * ixgbe_sysctl_advertise 4104 * 4105 * SYSCTL wrapper around setting advertised speed 4106 ************************************************************************/ 4107 static int 4108 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4109 { 4110 struct ixgbe_softc *sc; 4111 int error, advertise; 4112 4113 sc = (struct ixgbe_softc *)arg1; 4114 advertise = sc->advertise; 4115 4116 error = sysctl_handle_int(oidp, &advertise, 0, req); 4117 if ((error) || (req->newptr == NULL)) 4118 return (error); 4119 4120 return ixgbe_set_advertise(sc, advertise); 4121 } /* ixgbe_sysctl_advertise */ 4122 4123 /************************************************************************ 4124 * ixgbe_set_advertise - Control advertised link speed 4125 * 4126 * Flags: 4127 * 0x1 - advertise 100 Mb 4128 * 0x2 - advertise 1G 4129 * 0x4 - advertise 10G 4130 * 0x8 - advertise 10 Mb (yes, Mb) 4131 * 0x10 - advertise 2.5G (disabled by default) 4132 * 0x20 - advertise 5G (disabled by default) 4133 * 4134 ************************************************************************/ 4135 static int 4136 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 4137 { 4138 device_t dev = iflib_get_dev(sc->ctx); 4139 struct ixgbe_hw *hw; 4140 ixgbe_link_speed speed = 0; 4141 ixgbe_link_speed link_caps = 0; 4142 s32 err = IXGBE_NOT_IMPLEMENTED; 4143 bool negotiate = false; 4144 4145 /* Checks to validate new value */ 4146 if (sc->advertise == advertise) /* no change */ 4147 return (0); 4148 4149 hw = &sc->hw; 4150 4151 /* No speed changes for backplane media */ 4152 if (hw->phy.media_type == ixgbe_media_type_backplane) 4153 return (ENODEV); 4154 4155 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4156 (hw->phy.multispeed_fiber))) { 4157 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4158 return (EINVAL); 4159 } 4160 4161 if (advertise < 0x1 || advertise > 0x3F) { 4162 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n"); 4163 return (EINVAL); 4164 } 4165 4166 if (hw->mac.ops.get_link_capabilities) { 4167 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4168 &negotiate); 4169 if (err != IXGBE_SUCCESS) { 4170 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4171 return (ENODEV); 4172 } 4173 } 4174 4175 /* Set new value and report new advertised mode */ 4176 if (advertise & 0x1) { 4177 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4178 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4179 return (EINVAL); 4180 } 4181 speed |= IXGBE_LINK_SPEED_100_FULL; 4182 } 4183 if (advertise & 0x2) { 4184 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4185 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4186 return (EINVAL); 4187 } 4188 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4189 } 4190 if (advertise & 0x4) { 4191 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4192 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4193 return (EINVAL); 4194 } 4195 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4196 } 4197 if (advertise & 0x8) { 4198 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4199 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4200 return (EINVAL); 4201 } 4202 speed |= IXGBE_LINK_SPEED_10_FULL; 4203 } 4204 if (advertise & 0x10) { 4205 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 4206 device_printf(dev, "Interface does not support 2.5G advertised speed\n"); 4207 return (EINVAL); 4208 } 4209 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 4210 } 4211 if (advertise & 0x20) { 4212 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 4213 device_printf(dev, "Interface does not support 5G advertised speed\n"); 4214 return (EINVAL); 4215 } 4216 speed |= IXGBE_LINK_SPEED_5GB_FULL; 4217 } 4218 4219 hw->mac.autotry_restart = true; 4220 hw->mac.ops.setup_link(hw, speed, true); 4221 sc->advertise = advertise; 4222 4223 return (0); 4224 } /* ixgbe_set_advertise */ 4225 4226 /************************************************************************ 4227 * ixgbe_get_default_advertise - Get default advertised speed settings 4228 * 4229 * Formatted for sysctl usage. 4230 * Flags: 4231 * 0x1 - advertise 100 Mb 4232 * 0x2 - advertise 1G 4233 * 0x4 - advertise 10G 4234 * 0x8 - advertise 10 Mb (yes, Mb) 4235 * 0x10 - advertise 2.5G (disabled by default) 4236 * 0x20 - advertise 5G (disabled by default) 4237 ************************************************************************/ 4238 static int 4239 ixgbe_get_default_advertise(struct ixgbe_softc *sc) 4240 { 4241 struct ixgbe_hw *hw = &sc->hw; 4242 int speed; 4243 ixgbe_link_speed link_caps = 0; 4244 s32 err; 4245 bool negotiate = false; 4246 4247 /* 4248 * Advertised speed means nothing unless it's copper or 4249 * multi-speed fiber 4250 */ 4251 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4252 !(hw->phy.multispeed_fiber)) 4253 return (0); 4254 4255 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4256 if (err != IXGBE_SUCCESS) 4257 return (0); 4258 4259 if (hw->mac.type == ixgbe_mac_X550) { 4260 /* 4261 * 2.5G and 5G autonegotiation speeds on X550 4262 * are disabled by default due to reported 4263 * interoperability issues with some switches. 4264 */ 4265 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL | 4266 IXGBE_LINK_SPEED_5GB_FULL); 4267 } 4268 4269 speed = 4270 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 4271 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 4272 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 4273 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 4274 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 4275 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 4276 4277 return speed; 4278 } /* ixgbe_get_default_advertise */ 4279 4280 /************************************************************************ 4281 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4282 * 4283 * Control values: 4284 * 0/1 - off / on (use default value of 1000) 4285 * 4286 * Legal timer values are: 4287 * 50,100,250,500,1000,2000,5000,10000 4288 * 4289 * Turning off interrupt moderation will also turn this off. 4290 ************************************************************************/ 4291 static int 4292 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4293 { 4294 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4295 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4296 int error; 4297 u16 newval; 4298 4299 newval = sc->dmac; 4300 error = sysctl_handle_16(oidp, &newval, 0, req); 4301 if ((error) || (req->newptr == NULL)) 4302 return (error); 4303 4304 switch (newval) { 4305 case 0: 4306 /* Disabled */ 4307 sc->dmac = 0; 4308 break; 4309 case 1: 4310 /* Enable and use default */ 4311 sc->dmac = 1000; 4312 break; 4313 case 50: 4314 case 100: 4315 case 250: 4316 case 500: 4317 case 1000: 4318 case 2000: 4319 case 5000: 4320 case 10000: 4321 /* Legal values - allow */ 4322 sc->dmac = newval; 4323 break; 4324 default: 4325 /* Do nothing, illegal value */ 4326 return (EINVAL); 4327 } 4328 4329 /* Re-initialize hardware if it's already running */ 4330 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4331 ifp->if_init(ifp); 4332 4333 return (0); 4334 } /* ixgbe_sysctl_dmac */ 4335 4336 #ifdef IXGBE_DEBUG 4337 /************************************************************************ 4338 * ixgbe_sysctl_power_state 4339 * 4340 * Sysctl to test power states 4341 * Values: 4342 * 0 - set device to D0 4343 * 3 - set device to D3 4344 * (none) - get current device power state 4345 ************************************************************************/ 4346 static int 4347 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4348 { 4349 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4350 device_t dev = sc->dev; 4351 int curr_ps, new_ps, error = 0; 4352 4353 curr_ps = new_ps = pci_get_powerstate(dev); 4354 4355 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4356 if ((error) || (req->newptr == NULL)) 4357 return (error); 4358 4359 if (new_ps == curr_ps) 4360 return (0); 4361 4362 if (new_ps == 3 && curr_ps == 0) 4363 error = DEVICE_SUSPEND(dev); 4364 else if (new_ps == 0 && curr_ps == 3) 4365 error = DEVICE_RESUME(dev); 4366 else 4367 return (EINVAL); 4368 4369 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4370 4371 return (error); 4372 } /* ixgbe_sysctl_power_state */ 4373 #endif 4374 4375 /************************************************************************ 4376 * ixgbe_sysctl_wol_enable 4377 * 4378 * Sysctl to enable/disable the WoL capability, 4379 * if supported by the adapter. 4380 * 4381 * Values: 4382 * 0 - disabled 4383 * 1 - enabled 4384 ************************************************************************/ 4385 static int 4386 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4387 { 4388 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4389 struct ixgbe_hw *hw = &sc->hw; 4390 int new_wol_enabled; 4391 int error = 0; 4392 4393 new_wol_enabled = hw->wol_enabled; 4394 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4395 if ((error) || (req->newptr == NULL)) 4396 return (error); 4397 new_wol_enabled = !!(new_wol_enabled); 4398 if (new_wol_enabled == hw->wol_enabled) 4399 return (0); 4400 4401 if (new_wol_enabled > 0 && !sc->wol_support) 4402 return (ENODEV); 4403 else 4404 hw->wol_enabled = new_wol_enabled; 4405 4406 return (0); 4407 } /* ixgbe_sysctl_wol_enable */ 4408 4409 /************************************************************************ 4410 * ixgbe_sysctl_wufc - Wake Up Filter Control 4411 * 4412 * Sysctl to enable/disable the types of packets that the 4413 * adapter will wake up on upon receipt. 4414 * Flags: 4415 * 0x1 - Link Status Change 4416 * 0x2 - Magic Packet 4417 * 0x4 - Direct Exact 4418 * 0x8 - Directed Multicast 4419 * 0x10 - Broadcast 4420 * 0x20 - ARP/IPv4 Request Packet 4421 * 0x40 - Direct IPv4 Packet 4422 * 0x80 - Direct IPv6 Packet 4423 * 4424 * Settings not listed above will cause the sysctl to return an error. 4425 ************************************************************************/ 4426 static int 4427 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4428 { 4429 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4430 int error = 0; 4431 u32 new_wufc; 4432 4433 new_wufc = sc->wufc; 4434 4435 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4436 if ((error) || (req->newptr == NULL)) 4437 return (error); 4438 if (new_wufc == sc->wufc) 4439 return (0); 4440 4441 if (new_wufc & 0xffffff00) 4442 return (EINVAL); 4443 4444 new_wufc &= 0xff; 4445 new_wufc |= (0xffffff & sc->wufc); 4446 sc->wufc = new_wufc; 4447 4448 return (0); 4449 } /* ixgbe_sysctl_wufc */ 4450 4451 #ifdef IXGBE_DEBUG 4452 /************************************************************************ 4453 * ixgbe_sysctl_print_rss_config 4454 ************************************************************************/ 4455 static int 4456 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4457 { 4458 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4459 struct ixgbe_hw *hw = &sc->hw; 4460 device_t dev = sc->dev; 4461 struct sbuf *buf; 4462 int error = 0, reta_size; 4463 u32 reg; 4464 4465 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4466 if (!buf) { 4467 device_printf(dev, "Could not allocate sbuf for output.\n"); 4468 return (ENOMEM); 4469 } 4470 4471 // TODO: use sbufs to make a string to print out 4472 /* Set multiplier for RETA setup and table size based on MAC */ 4473 switch (sc->hw.mac.type) { 4474 case ixgbe_mac_X550: 4475 case ixgbe_mac_X550EM_x: 4476 case ixgbe_mac_X550EM_a: 4477 reta_size = 128; 4478 break; 4479 default: 4480 reta_size = 32; 4481 break; 4482 } 4483 4484 /* Print out the redirection table */ 4485 sbuf_cat(buf, "\n"); 4486 for (int i = 0; i < reta_size; i++) { 4487 if (i < 32) { 4488 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4489 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4490 } else { 4491 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4492 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4493 } 4494 } 4495 4496 // TODO: print more config 4497 4498 error = sbuf_finish(buf); 4499 if (error) 4500 device_printf(dev, "Error finishing sbuf: %d\n", error); 4501 4502 sbuf_delete(buf); 4503 4504 return (0); 4505 } /* ixgbe_sysctl_print_rss_config */ 4506 #endif /* IXGBE_DEBUG */ 4507 4508 /************************************************************************ 4509 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4510 * 4511 * For X552/X557-AT devices using an external PHY 4512 ************************************************************************/ 4513 static int 4514 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4515 { 4516 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4517 struct ixgbe_hw *hw = &sc->hw; 4518 u16 reg; 4519 4520 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4521 device_printf(iflib_get_dev(sc->ctx), 4522 "Device has no supported external thermal sensor.\n"); 4523 return (ENODEV); 4524 } 4525 4526 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4527 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4528 device_printf(iflib_get_dev(sc->ctx), 4529 "Error reading from PHY's current temperature register\n"); 4530 return (EAGAIN); 4531 } 4532 4533 /* Shift temp for output */ 4534 reg = reg >> 8; 4535 4536 return (sysctl_handle_16(oidp, NULL, reg, req)); 4537 } /* ixgbe_sysctl_phy_temp */ 4538 4539 /************************************************************************ 4540 * ixgbe_sysctl_phy_overtemp_occurred 4541 * 4542 * Reports (directly from the PHY) whether the current PHY 4543 * temperature is over the overtemp threshold. 4544 ************************************************************************/ 4545 static int 4546 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4547 { 4548 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4549 struct ixgbe_hw *hw = &sc->hw; 4550 u16 reg; 4551 4552 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4553 device_printf(iflib_get_dev(sc->ctx), 4554 "Device has no supported external thermal sensor.\n"); 4555 return (ENODEV); 4556 } 4557 4558 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4559 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4560 device_printf(iflib_get_dev(sc->ctx), 4561 "Error reading from PHY's temperature status register\n"); 4562 return (EAGAIN); 4563 } 4564 4565 /* Get occurrence bit */ 4566 reg = !!(reg & 0x4000); 4567 4568 return (sysctl_handle_16(oidp, 0, reg, req)); 4569 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4570 4571 /************************************************************************ 4572 * ixgbe_sysctl_eee_state 4573 * 4574 * Sysctl to set EEE power saving feature 4575 * Values: 4576 * 0 - disable EEE 4577 * 1 - enable EEE 4578 * (none) - get current device EEE state 4579 ************************************************************************/ 4580 static int 4581 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4582 { 4583 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4584 device_t dev = sc->dev; 4585 struct ifnet *ifp = iflib_get_ifp(sc->ctx); 4586 int curr_eee, new_eee, error = 0; 4587 s32 retval; 4588 4589 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 4590 4591 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4592 if ((error) || (req->newptr == NULL)) 4593 return (error); 4594 4595 /* Nothing to do */ 4596 if (new_eee == curr_eee) 4597 return (0); 4598 4599 /* Not supported */ 4600 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 4601 return (EINVAL); 4602 4603 /* Bounds checking */ 4604 if ((new_eee < 0) || (new_eee > 1)) 4605 return (EINVAL); 4606 4607 retval = ixgbe_setup_eee(&sc->hw, new_eee); 4608 if (retval) { 4609 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4610 return (EINVAL); 4611 } 4612 4613 /* Restart auto-neg */ 4614 ifp->if_init(ifp); 4615 4616 device_printf(dev, "New EEE state: %d\n", new_eee); 4617 4618 /* Cache new value */ 4619 if (new_eee) 4620 sc->feat_en |= IXGBE_FEATURE_EEE; 4621 else 4622 sc->feat_en &= ~IXGBE_FEATURE_EEE; 4623 4624 return (error); 4625 } /* ixgbe_sysctl_eee_state */ 4626 4627 /************************************************************************ 4628 * ixgbe_init_device_features 4629 ************************************************************************/ 4630 static void 4631 ixgbe_init_device_features(struct ixgbe_softc *sc) 4632 { 4633 sc->feat_cap = IXGBE_FEATURE_NETMAP 4634 | IXGBE_FEATURE_RSS 4635 | IXGBE_FEATURE_MSI 4636 | IXGBE_FEATURE_MSIX 4637 | IXGBE_FEATURE_LEGACY_IRQ; 4638 4639 /* Set capabilities first... */ 4640 switch (sc->hw.mac.type) { 4641 case ixgbe_mac_82598EB: 4642 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 4643 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4644 break; 4645 case ixgbe_mac_X540: 4646 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4647 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4648 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4649 (sc->hw.bus.func == 0)) 4650 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4651 break; 4652 case ixgbe_mac_X550: 4653 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4654 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4655 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4656 break; 4657 case ixgbe_mac_X550EM_x: 4658 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4659 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4660 break; 4661 case ixgbe_mac_X550EM_a: 4662 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4663 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4664 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4665 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4666 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4667 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4668 sc->feat_cap |= IXGBE_FEATURE_EEE; 4669 } 4670 break; 4671 case ixgbe_mac_82599EB: 4672 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 4673 sc->feat_cap |= IXGBE_FEATURE_FDIR; 4674 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4675 (sc->hw.bus.func == 0)) 4676 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 4677 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4678 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4679 break; 4680 default: 4681 break; 4682 } 4683 4684 /* Enabled by default... */ 4685 /* Fan failure detection */ 4686 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4687 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4688 /* Netmap */ 4689 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 4690 sc->feat_en |= IXGBE_FEATURE_NETMAP; 4691 /* EEE */ 4692 if (sc->feat_cap & IXGBE_FEATURE_EEE) 4693 sc->feat_en |= IXGBE_FEATURE_EEE; 4694 /* Thermal Sensor */ 4695 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4696 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4697 4698 /* Enabled via global sysctl... */ 4699 /* Flow Director */ 4700 if (ixgbe_enable_fdir) { 4701 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 4702 sc->feat_en |= IXGBE_FEATURE_FDIR; 4703 else 4704 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled."); 4705 } 4706 /* 4707 * Message Signal Interrupts - Extended (MSI-X) 4708 * Normal MSI is only enabled if MSI-X calls fail. 4709 */ 4710 if (!ixgbe_enable_msix) 4711 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 4712 /* Receive-Side Scaling (RSS) */ 4713 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4714 sc->feat_en |= IXGBE_FEATURE_RSS; 4715 4716 /* Disable features with unmet dependencies... */ 4717 /* No MSI-X */ 4718 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 4719 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 4720 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4721 sc->feat_en &= ~IXGBE_FEATURE_RSS; 4722 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 4723 } 4724 } /* ixgbe_init_device_features */ 4725 4726 /************************************************************************ 4727 * ixgbe_check_fan_failure 4728 ************************************************************************/ 4729 static void 4730 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 4731 { 4732 u32 mask; 4733 4734 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 4735 IXGBE_ESDP_SDP1; 4736 4737 if (reg & mask) 4738 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4739 } /* ixgbe_check_fan_failure */ 4740 4741 /************************************************************************ 4742 * ixgbe_sbuf_fw_version 4743 ************************************************************************/ 4744 static void 4745 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) 4746 { 4747 struct ixgbe_nvm_version nvm_ver = {0}; 4748 uint16_t phyfw = 0; 4749 int status; 4750 const char *space = ""; 4751 4752 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ 4753 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ 4754 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ 4755 status = ixgbe_get_phy_firmware_version(hw, &phyfw); 4756 4757 if (nvm_ver.oem_valid) { 4758 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, 4759 nvm_ver.oem_minor, nvm_ver.oem_release); 4760 space = " "; 4761 } 4762 4763 if (nvm_ver.or_valid) { 4764 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", 4765 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); 4766 space = " "; 4767 } 4768 4769 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | 4770 NVM_VER_INVALID)) { 4771 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); 4772 space = " "; 4773 } 4774 4775 if (phyfw != 0 && status == IXGBE_SUCCESS) 4776 sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); 4777 } /* ixgbe_sbuf_fw_version */ 4778 4779 /************************************************************************ 4780 * ixgbe_print_fw_version 4781 ************************************************************************/ 4782 static void 4783 ixgbe_print_fw_version(if_ctx_t ctx) 4784 { 4785 struct ixgbe_softc *sc = iflib_get_softc(ctx); 4786 struct ixgbe_hw *hw = &sc->hw; 4787 device_t dev = sc->dev; 4788 struct sbuf *buf; 4789 int error = 0; 4790 4791 buf = sbuf_new_auto(); 4792 if (!buf) { 4793 device_printf(dev, "Could not allocate sbuf for output.\n"); 4794 return; 4795 } 4796 4797 ixgbe_sbuf_fw_version(hw, buf); 4798 4799 error = sbuf_finish(buf); 4800 if (error) 4801 device_printf(dev, "Error finishing sbuf: %d\n", error); 4802 else if (sbuf_len(buf)) 4803 device_printf(dev, "%s\n", sbuf_data(buf)); 4804 4805 sbuf_delete(buf); 4806 } /* ixgbe_print_fw_version */ 4807 4808 /************************************************************************ 4809 * ixgbe_sysctl_print_fw_version 4810 ************************************************************************/ 4811 static int 4812 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) 4813 { 4814 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1; 4815 struct ixgbe_hw *hw = &sc->hw; 4816 device_t dev = sc->dev; 4817 struct sbuf *buf; 4818 int error = 0; 4819 4820 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4821 if (!buf) { 4822 device_printf(dev, "Could not allocate sbuf for output.\n"); 4823 return (ENOMEM); 4824 } 4825 4826 ixgbe_sbuf_fw_version(hw, buf); 4827 4828 error = sbuf_finish(buf); 4829 if (error) 4830 device_printf(dev, "Error finishing sbuf: %d\n", error); 4831 4832 sbuf_delete(buf); 4833 4834 return (0); 4835 } /* ixgbe_sysctl_print_fw_version */ 4836