1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_rss.h" 39 40 #include "ixgbe.h" 41 #include "ixgbe_sriov.h" 42 #include "ifdi_if.h" 43 44 #include <net/netmap.h> 45 #include <dev/netmap/netmap_kern.h> 46 47 /************************************************************************ 48 * Driver version 49 ************************************************************************/ 50 char ixgbe_driver_version[] = "4.0.1-k"; 51 52 53 /************************************************************************ 54 * PCI Device ID Table 55 * 56 * Used by probe to select devices to load on 57 * Last field stores an index into ixgbe_strings 58 * Last entry must be all 0s 59 * 60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 61 ************************************************************************/ 62 static pci_vendor_info_t ixgbe_vendor_info_array[] = 63 { 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 108 /* required last entry */ 109 PVID_END 110 }; 111 112 static void *ixgbe_register(device_t dev); 113 static int ixgbe_if_attach_pre(if_ctx_t ctx); 114 static int ixgbe_if_attach_post(if_ctx_t ctx); 115 static int ixgbe_if_detach(if_ctx_t ctx); 116 static int ixgbe_if_shutdown(if_ctx_t ctx); 117 static int ixgbe_if_suspend(if_ctx_t ctx); 118 static int ixgbe_if_resume(if_ctx_t ctx); 119 120 static void ixgbe_if_stop(if_ctx_t ctx); 121 void ixgbe_if_enable_intr(if_ctx_t ctx); 122 static void ixgbe_if_disable_intr(if_ctx_t ctx); 123 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); 124 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); 125 static int ixgbe_if_media_change(if_ctx_t ctx); 126 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 127 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 128 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip); 129 static void ixgbe_if_multi_set(if_ctx_t ctx); 130 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags); 131 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 132 uint64_t *paddrs, int nrxqs, int nrxqsets); 133 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 134 uint64_t *paddrs, int nrxqs, int nrxqsets); 135 static void ixgbe_if_queues_free(if_ctx_t ctx); 136 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t); 137 static void ixgbe_if_update_admin_status(if_ctx_t ctx); 138 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag); 139 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 140 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 141 int ixgbe_intr(void *arg); 142 143 /************************************************************************ 144 * Function prototypes 145 ************************************************************************/ 146 #if __FreeBSD_version >= 1100036 147 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 148 #endif 149 150 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector); 151 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector); 152 static void ixgbe_add_device_sysctls(if_ctx_t ctx); 153 static int ixgbe_allocate_pci_resources(if_ctx_t ctx); 154 static int ixgbe_setup_low_power_mode(if_ctx_t ctx); 155 156 static void ixgbe_config_dmac(struct adapter *adapter); 157 static void ixgbe_configure_ivars(struct adapter *adapter); 158 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, 159 s8 type); 160 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 161 static bool ixgbe_sfp_probe(if_ctx_t ctx); 162 163 static void ixgbe_free_pci_resources(if_ctx_t ctx); 164 165 static int ixgbe_msix_link(void *arg); 166 static int ixgbe_msix_que(void *arg); 167 static void ixgbe_initialize_rss_mapping(struct adapter *adapter); 168 static void ixgbe_initialize_receive_units(if_ctx_t ctx); 169 static void ixgbe_initialize_transmit_units(if_ctx_t ctx); 170 171 static int ixgbe_setup_interface(if_ctx_t ctx); 172 static void ixgbe_init_device_features(struct adapter *adapter); 173 static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 174 static void ixgbe_add_media_types(if_ctx_t ctx); 175 static void ixgbe_update_stats_counters(struct adapter *adapter); 176 static void ixgbe_config_link(struct adapter *adapter); 177 static void ixgbe_get_slot_info(struct adapter *); 178 static void ixgbe_check_wol_support(struct adapter *adapter); 179 static void ixgbe_enable_rx_drop(struct adapter *); 180 static void ixgbe_disable_rx_drop(struct adapter *); 181 182 static void ixgbe_add_hw_stats(struct adapter *adapter); 183 static int ixgbe_set_flowcntl(struct adapter *, int); 184 static int ixgbe_set_advertise(struct adapter *, int); 185 static int ixgbe_get_advertise(struct adapter *); 186 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx); 187 static void ixgbe_config_gpie(struct adapter *adapter); 188 static void ixgbe_config_delay_values(struct adapter *adapter); 189 190 /* Sysctl handlers */ 191 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 195 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 196 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 197 #ifdef IXGBE_DEBUG 198 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 199 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 200 #endif 201 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 206 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 207 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 208 209 /* Deferred interrupt tasklets */ 210 static void ixgbe_handle_msf(void *); 211 static void ixgbe_handle_mod(void *); 212 static void ixgbe_handle_phy(void *); 213 214 /************************************************************************ 215 * FreeBSD Device Interface Entry Points 216 ************************************************************************/ 217 static device_method_t ix_methods[] = { 218 /* Device interface */ 219 DEVMETHOD(device_register, ixgbe_register), 220 DEVMETHOD(device_probe, iflib_device_probe), 221 DEVMETHOD(device_attach, iflib_device_attach), 222 DEVMETHOD(device_detach, iflib_device_detach), 223 DEVMETHOD(device_shutdown, iflib_device_shutdown), 224 DEVMETHOD(device_suspend, iflib_device_suspend), 225 DEVMETHOD(device_resume, iflib_device_resume), 226 #ifdef PCI_IOV 227 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 228 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 229 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 230 #endif /* PCI_IOV */ 231 DEVMETHOD_END 232 }; 233 234 static driver_t ix_driver = { 235 "ix", ix_methods, sizeof(struct adapter), 236 }; 237 238 devclass_t ix_devclass; 239 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 240 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ix, ixgbe_vendor_info_array, 241 sizeof(ixgbe_vendor_info_array[0]), nitems(ixgbe_vendor_info_array) - 1); 242 243 MODULE_DEPEND(ix, pci, 1, 1, 1); 244 MODULE_DEPEND(ix, ether, 1, 1, 1); 245 MODULE_DEPEND(ix, iflib, 1, 1, 1); 246 247 static device_method_t ixgbe_if_methods[] = { 248 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 249 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 250 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 251 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 252 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 253 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 254 DEVMETHOD(ifdi_init, ixgbe_if_init), 255 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 256 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 257 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 258 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 259 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 260 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 261 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 262 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 263 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 264 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 265 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 266 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 267 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 268 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 269 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 270 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 271 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 272 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 273 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 274 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 275 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 276 #ifdef PCI_IOV 277 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 278 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 279 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 280 #endif /* PCI_IOV */ 281 DEVMETHOD_END 282 }; 283 284 /* 285 * TUNEABLE PARAMETERS: 286 */ 287 288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters"); 289 static driver_t ixgbe_if_driver = { 290 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter) 291 }; 292 293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 295 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 296 297 /* Flow control setting, default to full */ 298 static int ixgbe_flow_control = ixgbe_fc_full; 299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 300 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 301 302 /* Advertise Speed, default to 0 (auto) */ 303 static int ixgbe_advertise_speed = 0; 304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 305 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 306 307 /* 308 * Smart speed setting, default to on 309 * this only works as a compile option 310 * right now as its during attach, set 311 * this to 'ixgbe_smart_speed_off' to 312 * disable. 313 */ 314 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 315 316 /* 317 * MSI-X should be the default for best performance, 318 * but this allows it to be forced off for testing. 319 */ 320 static int ixgbe_enable_msix = 1; 321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 322 "Enable MSI-X interrupts"); 323 324 /* 325 * Defining this on will allow the use 326 * of unsupported SFP+ modules, note that 327 * doing so you are on your own :) 328 */ 329 static int allow_unsupported_sfp = FALSE; 330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 331 &allow_unsupported_sfp, 0, 332 "Allow unsupported SFP modules...use at your own risk"); 333 334 /* 335 * Not sure if Flow Director is fully baked, 336 * so we'll default to turning it off. 337 */ 338 static int ixgbe_enable_fdir = 0; 339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 340 "Enable Flow Director"); 341 342 /* Receive-Side Scaling */ 343 static int ixgbe_enable_rss = 1; 344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 345 "Enable Receive-Side Scaling (RSS)"); 346 347 #if 0 348 /* Keep running tab on them for sanity check */ 349 static int ixgbe_total_ports; 350 #endif 351 352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 353 354 /* 355 * For Flow Director: this is the number of TX packets we sample 356 * for the filter pool, this means every 20th packet will be probed. 357 * 358 * This feature can be disabled by setting this to 0. 359 */ 360 static int atr_sample_rate = 20; 361 362 extern struct if_txrx ixgbe_txrx; 363 364 static struct if_shared_ctx ixgbe_sctx_init = { 365 .isc_magic = IFLIB_MAGIC, 366 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 367 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 368 .isc_tx_maxsegsize = PAGE_SIZE, 369 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 370 .isc_tso_maxsegsize = PAGE_SIZE, 371 .isc_rx_maxsize = PAGE_SIZE*4, 372 .isc_rx_nsegments = 1, 373 .isc_rx_maxsegsize = PAGE_SIZE*4, 374 .isc_nfl = 1, 375 .isc_ntxqs = 1, 376 .isc_nrxqs = 1, 377 378 .isc_admin_intrcnt = 1, 379 .isc_vendor_info = ixgbe_vendor_info_array, 380 .isc_driver_version = ixgbe_driver_version, 381 .isc_driver = &ixgbe_if_driver, 382 383 .isc_nrxd_min = {MIN_RXD}, 384 .isc_ntxd_min = {MIN_TXD}, 385 .isc_nrxd_max = {MAX_RXD}, 386 .isc_ntxd_max = {MAX_TXD}, 387 .isc_nrxd_default = {DEFAULT_RXD}, 388 .isc_ntxd_default = {DEFAULT_TXD}, 389 }; 390 391 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init; 392 393 /************************************************************************ 394 * ixgbe_if_tx_queues_alloc 395 ************************************************************************/ 396 static int 397 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 398 int ntxqs, int ntxqsets) 399 { 400 struct adapter *adapter = iflib_get_softc(ctx); 401 if_softc_ctx_t scctx = adapter->shared; 402 struct ix_tx_queue *que; 403 int i, j, error; 404 405 MPASS(adapter->num_tx_queues > 0); 406 MPASS(adapter->num_tx_queues == ntxqsets); 407 MPASS(ntxqs == 1); 408 409 /* Allocate queue structure memory */ 410 adapter->tx_queues = 411 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 412 M_IXGBE, M_NOWAIT | M_ZERO); 413 if (!adapter->tx_queues) { 414 device_printf(iflib_get_dev(ctx), 415 "Unable to allocate TX ring memory\n"); 416 return (ENOMEM); 417 } 418 419 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { 420 struct tx_ring *txr = &que->txr; 421 422 /* In case SR-IOV is enabled, align the index properly */ 423 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 424 i); 425 426 txr->adapter = que->adapter = adapter; 427 adapter->active_queues |= (u64)1 << txr->me; 428 429 /* Allocate report status array */ 430 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 431 if (txr->tx_rsq == NULL) { 432 error = ENOMEM; 433 goto fail; 434 } 435 for (j = 0; j < scctx->isc_ntxd[0]; j++) 436 txr->tx_rsq[j] = QIDX_INVALID; 437 /* get the virtual and physical address of the hardware queues */ 438 txr->tail = IXGBE_TDT(txr->me); 439 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 440 txr->tx_paddr = paddrs[i]; 441 442 txr->bytes = 0; 443 txr->total_packets = 0; 444 445 /* Set the rate at which we sample packets */ 446 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 447 txr->atr_sample = atr_sample_rate; 448 449 } 450 451 iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod, 452 "mod_task"); 453 iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf, 454 "msf_task"); 455 iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy, 456 "phy_task"); 457 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 458 iflib_config_gtask_init(ctx, &adapter->mbx_task, 459 ixgbe_handle_mbx, "mbx_task"); 460 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 461 iflib_config_gtask_init(ctx, &adapter->fdir_task, 462 ixgbe_reinit_fdir, "fdir_task"); 463 464 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 465 adapter->num_tx_queues); 466 467 return (0); 468 469 fail: 470 ixgbe_if_queues_free(ctx); 471 472 return (error); 473 } /* ixgbe_if_tx_queues_alloc */ 474 475 /************************************************************************ 476 * ixgbe_if_rx_queues_alloc 477 ************************************************************************/ 478 static int 479 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 480 int nrxqs, int nrxqsets) 481 { 482 struct adapter *adapter = iflib_get_softc(ctx); 483 struct ix_rx_queue *que; 484 int i; 485 486 MPASS(adapter->num_rx_queues > 0); 487 MPASS(adapter->num_rx_queues == nrxqsets); 488 MPASS(nrxqs == 1); 489 490 /* Allocate queue structure memory */ 491 adapter->rx_queues = 492 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 493 M_IXGBE, M_NOWAIT | M_ZERO); 494 if (!adapter->rx_queues) { 495 device_printf(iflib_get_dev(ctx), 496 "Unable to allocate TX ring memory\n"); 497 return (ENOMEM); 498 } 499 500 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { 501 struct rx_ring *rxr = &que->rxr; 502 503 /* In case SR-IOV is enabled, align the index properly */ 504 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 505 i); 506 507 rxr->adapter = que->adapter = adapter; 508 509 /* get the virtual and physical address of the hw queues */ 510 rxr->tail = IXGBE_RDT(rxr->me); 511 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 512 rxr->rx_paddr = paddrs[i]; 513 rxr->bytes = 0; 514 rxr->que = que; 515 } 516 517 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 518 adapter->num_rx_queues); 519 520 return (0); 521 } /* ixgbe_if_rx_queues_alloc */ 522 523 /************************************************************************ 524 * ixgbe_if_queues_free 525 ************************************************************************/ 526 static void 527 ixgbe_if_queues_free(if_ctx_t ctx) 528 { 529 struct adapter *adapter = iflib_get_softc(ctx); 530 struct ix_tx_queue *tx_que = adapter->tx_queues; 531 struct ix_rx_queue *rx_que = adapter->rx_queues; 532 int i; 533 534 if (tx_que != NULL) { 535 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 536 struct tx_ring *txr = &tx_que->txr; 537 if (txr->tx_rsq == NULL) 538 break; 539 540 free(txr->tx_rsq, M_IXGBE); 541 txr->tx_rsq = NULL; 542 } 543 544 free(adapter->tx_queues, M_IXGBE); 545 adapter->tx_queues = NULL; 546 } 547 if (rx_que != NULL) { 548 free(adapter->rx_queues, M_IXGBE); 549 adapter->rx_queues = NULL; 550 } 551 } /* ixgbe_if_queues_free */ 552 553 /************************************************************************ 554 * ixgbe_initialize_rss_mapping 555 ************************************************************************/ 556 static void 557 ixgbe_initialize_rss_mapping(struct adapter *adapter) 558 { 559 struct ixgbe_hw *hw = &adapter->hw; 560 u32 reta = 0, mrqc, rss_key[10]; 561 int queue_id, table_size, index_mult; 562 int i, j; 563 u32 rss_hash_config; 564 565 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 566 /* Fetch the configured RSS key */ 567 rss_getkey((uint8_t *)&rss_key); 568 } else { 569 /* set up random bits */ 570 arc4rand(&rss_key, sizeof(rss_key), 0); 571 } 572 573 /* Set multiplier for RETA setup and table size based on MAC */ 574 index_mult = 0x1; 575 table_size = 128; 576 switch (adapter->hw.mac.type) { 577 case ixgbe_mac_82598EB: 578 index_mult = 0x11; 579 break; 580 case ixgbe_mac_X550: 581 case ixgbe_mac_X550EM_x: 582 case ixgbe_mac_X550EM_a: 583 table_size = 512; 584 break; 585 default: 586 break; 587 } 588 589 /* Set up the redirection table */ 590 for (i = 0, j = 0; i < table_size; i++, j++) { 591 if (j == adapter->num_rx_queues) 592 j = 0; 593 594 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 595 /* 596 * Fetch the RSS bucket id for the given indirection 597 * entry. Cap it at the number of configured buckets 598 * (which is num_rx_queues.) 599 */ 600 queue_id = rss_get_indirection_to_bucket(i); 601 queue_id = queue_id % adapter->num_rx_queues; 602 } else 603 queue_id = (j * index_mult); 604 605 /* 606 * The low 8 bits are for hash value (n+0); 607 * The next 8 bits are for hash value (n+1), etc. 608 */ 609 reta = reta >> 8; 610 reta = reta | (((uint32_t)queue_id) << 24); 611 if ((i & 3) == 3) { 612 if (i < 128) 613 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 614 else 615 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 616 reta); 617 reta = 0; 618 } 619 } 620 621 /* Now fill our hash function seeds */ 622 for (i = 0; i < 10; i++) 623 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 624 625 /* Perform hash on these packet types */ 626 if (adapter->feat_en & IXGBE_FEATURE_RSS) 627 rss_hash_config = rss_gethashconfig(); 628 else { 629 /* 630 * Disable UDP - IP fragments aren't currently being handled 631 * and so we end up with a mix of 2-tuple and 4-tuple 632 * traffic. 633 */ 634 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 635 | RSS_HASHTYPE_RSS_TCP_IPV4 636 | RSS_HASHTYPE_RSS_IPV6 637 | RSS_HASHTYPE_RSS_TCP_IPV6 638 | RSS_HASHTYPE_RSS_IPV6_EX 639 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 640 } 641 642 mrqc = IXGBE_MRQC_RSSEN; 643 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 645 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 647 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 649 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 651 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 653 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 655 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 656 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 657 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 658 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 659 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 660 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 661 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 662 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 663 } /* ixgbe_initialize_rss_mapping */ 664 665 /************************************************************************ 666 * ixgbe_initialize_receive_units - Setup receive registers and features. 667 ************************************************************************/ 668 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 669 670 static void 671 ixgbe_initialize_receive_units(if_ctx_t ctx) 672 { 673 struct adapter *adapter = iflib_get_softc(ctx); 674 if_softc_ctx_t scctx = adapter->shared; 675 struct ixgbe_hw *hw = &adapter->hw; 676 struct ifnet *ifp = iflib_get_ifp(ctx); 677 struct ix_rx_queue *que; 678 int i, j; 679 u32 bufsz, fctrl, srrctl, rxcsum; 680 u32 hlreg; 681 682 /* 683 * Make sure receives are disabled while 684 * setting up the descriptor ring 685 */ 686 ixgbe_disable_rx(hw); 687 688 /* Enable broadcasts */ 689 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 690 fctrl |= IXGBE_FCTRL_BAM; 691 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 692 fctrl |= IXGBE_FCTRL_DPF; 693 fctrl |= IXGBE_FCTRL_PMCF; 694 } 695 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 696 697 /* Set for Jumbo Frames? */ 698 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 699 if (ifp->if_mtu > ETHERMTU) 700 hlreg |= IXGBE_HLREG0_JUMBOEN; 701 else 702 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 703 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 704 705 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 706 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 707 708 /* Setup the Base and Length of the Rx Descriptor Ring */ 709 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) { 710 struct rx_ring *rxr = &que->rxr; 711 u64 rdba = rxr->rx_paddr; 712 713 j = rxr->me; 714 715 /* Setup the Base and Length of the Rx Descriptor Ring */ 716 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 717 (rdba & 0x00000000ffffffffULL)); 718 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 719 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 720 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 721 722 /* Set up the SRRCTL register */ 723 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 724 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 725 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 726 srrctl |= bufsz; 727 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 728 729 /* 730 * Set DROP_EN iff we have no flow control and >1 queue. 731 * Note that srrctl was cleared shortly before during reset, 732 * so we do not need to clear the bit, but do it just in case 733 * this code is moved elsewhere. 734 */ 735 if (adapter->num_rx_queues > 1 && 736 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 737 srrctl |= IXGBE_SRRCTL_DROP_EN; 738 } else { 739 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 740 } 741 742 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 743 744 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 745 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 746 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 747 748 /* Set the driver rx tail address */ 749 rxr->tail = IXGBE_RDT(rxr->me); 750 } 751 752 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 753 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 754 | IXGBE_PSRTYPE_UDPHDR 755 | IXGBE_PSRTYPE_IPV4HDR 756 | IXGBE_PSRTYPE_IPV6HDR; 757 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 758 } 759 760 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 761 762 ixgbe_initialize_rss_mapping(adapter); 763 764 if (adapter->num_rx_queues > 1) { 765 /* RSS and RX IPP Checksum are mutually exclusive */ 766 rxcsum |= IXGBE_RXCSUM_PCSD; 767 } 768 769 if (ifp->if_capenable & IFCAP_RXCSUM) 770 rxcsum |= IXGBE_RXCSUM_PCSD; 771 772 /* This is useful for calculating UDP/IP fragment checksums */ 773 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 774 rxcsum |= IXGBE_RXCSUM_IPPCSE; 775 776 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 777 778 } /* ixgbe_initialize_receive_units */ 779 780 /************************************************************************ 781 * ixgbe_initialize_transmit_units - Enable transmit units. 782 ************************************************************************/ 783 static void 784 ixgbe_initialize_transmit_units(if_ctx_t ctx) 785 { 786 struct adapter *adapter = iflib_get_softc(ctx); 787 struct ixgbe_hw *hw = &adapter->hw; 788 if_softc_ctx_t scctx = adapter->shared; 789 struct ix_tx_queue *que; 790 int i; 791 792 /* Setup the Base and Length of the Tx Descriptor Ring */ 793 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues; 794 i++, que++) { 795 struct tx_ring *txr = &que->txr; 796 u64 tdba = txr->tx_paddr; 797 u32 txctrl = 0; 798 int j = txr->me; 799 800 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 801 (tdba & 0x00000000ffffffffULL)); 802 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 803 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 804 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 805 806 /* Setup the HW Tx Head and Tail descriptor pointers */ 807 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 808 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 809 810 /* Cache the tail address */ 811 txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; 812 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 813 txr->tx_rsq[k] = QIDX_INVALID; 814 815 /* Disable Head Writeback */ 816 /* 817 * Note: for X550 series devices, these registers are actually 818 * prefixed with TPH_ isntead of DCA_, but the addresses and 819 * fields remain the same. 820 */ 821 switch (hw->mac.type) { 822 case ixgbe_mac_82598EB: 823 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 824 break; 825 default: 826 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 827 break; 828 } 829 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 830 switch (hw->mac.type) { 831 case ixgbe_mac_82598EB: 832 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 833 break; 834 default: 835 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 836 break; 837 } 838 839 } 840 841 if (hw->mac.type != ixgbe_mac_82598EB) { 842 u32 dmatxctl, rttdcs; 843 844 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 845 dmatxctl |= IXGBE_DMATXCTL_TE; 846 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 847 /* Disable arbiter to set MTQC */ 848 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 849 rttdcs |= IXGBE_RTTDCS_ARBDIS; 850 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 851 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 852 ixgbe_get_mtqc(adapter->iov_mode)); 853 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 854 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 855 } 856 857 } /* ixgbe_initialize_transmit_units */ 858 859 /************************************************************************ 860 * ixgbe_register 861 ************************************************************************/ 862 static void * 863 ixgbe_register(device_t dev) 864 { 865 return (ixgbe_sctx); 866 } /* ixgbe_register */ 867 868 /************************************************************************ 869 * ixgbe_if_attach_pre - Device initialization routine, part 1 870 * 871 * Called when the driver is being loaded. 872 * Identifies the type of hardware, initializes the hardware, 873 * and initializes iflib structures. 874 * 875 * return 0 on success, positive on failure 876 ************************************************************************/ 877 static int 878 ixgbe_if_attach_pre(if_ctx_t ctx) 879 { 880 struct adapter *adapter; 881 device_t dev; 882 if_softc_ctx_t scctx; 883 struct ixgbe_hw *hw; 884 int error = 0; 885 u32 ctrl_ext; 886 887 INIT_DEBUGOUT("ixgbe_attach: begin"); 888 889 /* Allocate, clear, and link in our adapter structure */ 890 dev = iflib_get_dev(ctx); 891 adapter = iflib_get_softc(ctx); 892 adapter->hw.back = adapter; 893 adapter->ctx = ctx; 894 adapter->dev = dev; 895 scctx = adapter->shared = iflib_get_softc_ctx(ctx); 896 adapter->media = iflib_get_media(ctx); 897 hw = &adapter->hw; 898 899 /* Determine hardware revision */ 900 hw->vendor_id = pci_get_vendor(dev); 901 hw->device_id = pci_get_device(dev); 902 hw->revision_id = pci_get_revid(dev); 903 hw->subsystem_vendor_id = pci_get_subvendor(dev); 904 hw->subsystem_device_id = pci_get_subdevice(dev); 905 906 /* Do base PCI setup - map BAR0 */ 907 if (ixgbe_allocate_pci_resources(ctx)) { 908 device_printf(dev, "Allocation of PCI resources failed\n"); 909 return (ENXIO); 910 } 911 912 /* let hardware know driver is loaded */ 913 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 914 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 915 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 916 917 /* 918 * Initialize the shared code 919 */ 920 if (ixgbe_init_shared_code(hw) != 0) { 921 device_printf(dev, "Unable to initialize the shared code\n"); 922 error = ENXIO; 923 goto err_pci; 924 } 925 926 if (hw->mbx.ops.init_params) 927 hw->mbx.ops.init_params(hw); 928 929 hw->allow_unsupported_sfp = allow_unsupported_sfp; 930 931 if (hw->mac.type != ixgbe_mac_82598EB) 932 hw->phy.smart_speed = ixgbe_smart_speed; 933 934 ixgbe_init_device_features(adapter); 935 936 /* Enable WoL (if supported) */ 937 ixgbe_check_wol_support(adapter); 938 939 /* Verify adapter fan is still functional (if applicable) */ 940 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 941 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 942 ixgbe_check_fan_failure(adapter, esdp, FALSE); 943 } 944 945 /* Ensure SW/FW semaphore is free */ 946 ixgbe_init_swfw_semaphore(hw); 947 948 /* Set an initial default flow control value */ 949 hw->fc.requested_mode = ixgbe_flow_control; 950 951 hw->phy.reset_if_overtemp = TRUE; 952 error = ixgbe_reset_hw(hw); 953 hw->phy.reset_if_overtemp = FALSE; 954 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 955 /* 956 * No optics in this port, set up 957 * so the timer routine will probe 958 * for later insertion. 959 */ 960 adapter->sfp_probe = TRUE; 961 error = 0; 962 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 963 device_printf(dev, "Unsupported SFP+ module detected!\n"); 964 error = EIO; 965 goto err_pci; 966 } else if (error) { 967 device_printf(dev, "Hardware initialization failed\n"); 968 error = EIO; 969 goto err_pci; 970 } 971 972 /* Make sure we have a good EEPROM before we read from it */ 973 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 974 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 975 error = EIO; 976 goto err_pci; 977 } 978 979 error = ixgbe_start_hw(hw); 980 switch (error) { 981 case IXGBE_ERR_EEPROM_VERSION: 982 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 983 break; 984 case IXGBE_ERR_SFP_NOT_SUPPORTED: 985 device_printf(dev, "Unsupported SFP+ Module\n"); 986 error = EIO; 987 goto err_pci; 988 case IXGBE_ERR_SFP_NOT_PRESENT: 989 device_printf(dev, "No SFP+ Module found\n"); 990 /* falls thru */ 991 default: 992 break; 993 } 994 995 /* Most of the iflib initialization... */ 996 997 iflib_set_mac(ctx, hw->mac.addr); 998 switch (adapter->hw.mac.type) { 999 case ixgbe_mac_X550: 1000 case ixgbe_mac_X550EM_x: 1001 case ixgbe_mac_X550EM_a: 1002 scctx->isc_rss_table_size = 512; 1003 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1004 break; 1005 default: 1006 scctx->isc_rss_table_size = 128; 1007 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1008 } 1009 1010 /* Allow legacy interrupts */ 1011 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1012 1013 scctx->isc_txqsizes[0] = 1014 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1015 sizeof(u32), DBA_ALIGN), 1016 scctx->isc_rxqsizes[0] = 1017 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1018 DBA_ALIGN); 1019 1020 /* XXX */ 1021 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1022 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1023 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1024 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1025 scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR); 1026 } else { 1027 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1028 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1029 scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR); 1030 } 1031 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1032 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1033 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1034 1035 scctx->isc_txrx = &ixgbe_txrx; 1036 1037 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1038 1039 return (0); 1040 1041 err_pci: 1042 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 1043 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 1045 ixgbe_free_pci_resources(ctx); 1046 1047 return (error); 1048 } /* ixgbe_if_attach_pre */ 1049 1050 /********************************************************************* 1051 * ixgbe_if_attach_post - Device initialization routine, part 2 1052 * 1053 * Called during driver load, but after interrupts and 1054 * resources have been allocated and configured. 1055 * Sets up some data structures not relevant to iflib. 1056 * 1057 * return 0 on success, positive on failure 1058 *********************************************************************/ 1059 static int 1060 ixgbe_if_attach_post(if_ctx_t ctx) 1061 { 1062 device_t dev; 1063 struct adapter *adapter; 1064 struct ixgbe_hw *hw; 1065 int error = 0; 1066 1067 dev = iflib_get_dev(ctx); 1068 adapter = iflib_get_softc(ctx); 1069 hw = &adapter->hw; 1070 1071 1072 if (adapter->intr_type == IFLIB_INTR_LEGACY && 1073 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1074 device_printf(dev, "Device does not support legacy interrupts"); 1075 error = ENXIO; 1076 goto err; 1077 } 1078 1079 /* Allocate multicast array memory. */ 1080 adapter->mta = malloc(sizeof(*adapter->mta) * 1081 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1082 if (adapter->mta == NULL) { 1083 device_printf(dev, "Can not allocate multicast setup array\n"); 1084 error = ENOMEM; 1085 goto err; 1086 } 1087 1088 /* hw.ix defaults init */ 1089 ixgbe_set_advertise(adapter, ixgbe_advertise_speed); 1090 1091 /* Enable the optics for 82599 SFP+ fiber */ 1092 ixgbe_enable_tx_laser(hw); 1093 1094 /* Enable power to the phy. */ 1095 ixgbe_set_phy_power(hw, TRUE); 1096 1097 ixgbe_initialize_iov(adapter); 1098 1099 error = ixgbe_setup_interface(ctx); 1100 if (error) { 1101 device_printf(dev, "Interface setup failed: %d\n", error); 1102 goto err; 1103 } 1104 1105 ixgbe_if_update_admin_status(ctx); 1106 1107 /* Initialize statistics */ 1108 ixgbe_update_stats_counters(adapter); 1109 ixgbe_add_hw_stats(adapter); 1110 1111 /* Check PCIE slot type/speed/width */ 1112 ixgbe_get_slot_info(adapter); 1113 1114 /* 1115 * Do time init and sysctl init here, but 1116 * only on the first port of a bypass adapter. 1117 */ 1118 ixgbe_bypass_init(adapter); 1119 1120 /* Set an initial dmac value */ 1121 adapter->dmac = 0; 1122 /* Set initial advertised speeds (if applicable) */ 1123 adapter->advertise = ixgbe_get_advertise(adapter); 1124 1125 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1126 ixgbe_define_iov_schemas(dev, &error); 1127 1128 /* Add sysctls */ 1129 ixgbe_add_device_sysctls(ctx); 1130 1131 return (0); 1132 err: 1133 return (error); 1134 } /* ixgbe_if_attach_post */ 1135 1136 /************************************************************************ 1137 * ixgbe_check_wol_support 1138 * 1139 * Checks whether the adapter's ports are capable of 1140 * Wake On LAN by reading the adapter's NVM. 1141 * 1142 * Sets each port's hw->wol_enabled value depending 1143 * on the value read here. 1144 ************************************************************************/ 1145 static void 1146 ixgbe_check_wol_support(struct adapter *adapter) 1147 { 1148 struct ixgbe_hw *hw = &adapter->hw; 1149 u16 dev_caps = 0; 1150 1151 /* Find out WoL support for port */ 1152 adapter->wol_support = hw->wol_enabled = 0; 1153 ixgbe_get_device_caps(hw, &dev_caps); 1154 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1155 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1156 hw->bus.func == 0)) 1157 adapter->wol_support = hw->wol_enabled = 1; 1158 1159 /* Save initial wake up filter configuration */ 1160 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1161 1162 return; 1163 } /* ixgbe_check_wol_support */ 1164 1165 /************************************************************************ 1166 * ixgbe_setup_interface 1167 * 1168 * Setup networking device structure and register an interface. 1169 ************************************************************************/ 1170 static int 1171 ixgbe_setup_interface(if_ctx_t ctx) 1172 { 1173 struct ifnet *ifp = iflib_get_ifp(ctx); 1174 struct adapter *adapter = iflib_get_softc(ctx); 1175 1176 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1177 1178 if_setbaudrate(ifp, IF_Gbps(10)); 1179 1180 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1181 1182 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1183 1184 ixgbe_add_media_types(ctx); 1185 1186 /* Autoselect media by default */ 1187 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 1188 1189 return (0); 1190 } /* ixgbe_setup_interface */ 1191 1192 /************************************************************************ 1193 * ixgbe_if_get_counter 1194 ************************************************************************/ 1195 static uint64_t 1196 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1197 { 1198 struct adapter *adapter = iflib_get_softc(ctx); 1199 if_t ifp = iflib_get_ifp(ctx); 1200 1201 switch (cnt) { 1202 case IFCOUNTER_IPACKETS: 1203 return (adapter->ipackets); 1204 case IFCOUNTER_OPACKETS: 1205 return (adapter->opackets); 1206 case IFCOUNTER_IBYTES: 1207 return (adapter->ibytes); 1208 case IFCOUNTER_OBYTES: 1209 return (adapter->obytes); 1210 case IFCOUNTER_IMCASTS: 1211 return (adapter->imcasts); 1212 case IFCOUNTER_OMCASTS: 1213 return (adapter->omcasts); 1214 case IFCOUNTER_COLLISIONS: 1215 return (0); 1216 case IFCOUNTER_IQDROPS: 1217 return (adapter->iqdrops); 1218 case IFCOUNTER_OQDROPS: 1219 return (0); 1220 case IFCOUNTER_IERRORS: 1221 return (adapter->ierrors); 1222 default: 1223 return (if_get_counter_default(ifp, cnt)); 1224 } 1225 } /* ixgbe_if_get_counter */ 1226 1227 /************************************************************************ 1228 * ixgbe_if_i2c_req 1229 ************************************************************************/ 1230 static int 1231 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1232 { 1233 struct adapter *adapter = iflib_get_softc(ctx); 1234 struct ixgbe_hw *hw = &adapter->hw; 1235 int i; 1236 1237 1238 if (hw->phy.ops.read_i2c_byte == NULL) 1239 return (ENXIO); 1240 for (i = 0; i < req->len; i++) 1241 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1242 req->dev_addr, &req->data[i]); 1243 return (0); 1244 } /* ixgbe_if_i2c_req */ 1245 1246 /************************************************************************ 1247 * ixgbe_add_media_types 1248 ************************************************************************/ 1249 static void 1250 ixgbe_add_media_types(if_ctx_t ctx) 1251 { 1252 struct adapter *adapter = iflib_get_softc(ctx); 1253 struct ixgbe_hw *hw = &adapter->hw; 1254 device_t dev = iflib_get_dev(ctx); 1255 u64 layer; 1256 1257 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 1258 1259 /* Media types with matching FreeBSD media defines */ 1260 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1261 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1262 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1263 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1264 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1265 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1266 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1267 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1268 1269 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1270 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1271 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1272 NULL); 1273 1274 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1275 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1276 if (hw->phy.multispeed_fiber) 1277 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0, 1278 NULL); 1279 } 1280 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1281 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1282 if (hw->phy.multispeed_fiber) 1283 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, 1284 NULL); 1285 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1286 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1287 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1288 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1289 1290 #ifdef IFM_ETH_XTYPE 1291 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1292 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1293 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1294 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1295 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1296 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1297 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1298 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1299 #else 1300 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1301 device_printf(dev, "Media supported: 10GbaseKR\n"); 1302 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1303 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1304 } 1305 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1306 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1307 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1308 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1309 } 1310 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1311 device_printf(dev, "Media supported: 1000baseKX\n"); 1312 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1313 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1314 } 1315 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1316 device_printf(dev, "Media supported: 2500baseKX\n"); 1317 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1318 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1319 } 1320 #endif 1321 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1322 device_printf(dev, "Media supported: 1000baseBX\n"); 1323 1324 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1325 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1326 0, NULL); 1327 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1328 } 1329 1330 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1331 } /* ixgbe_add_media_types */ 1332 1333 /************************************************************************ 1334 * ixgbe_is_sfp 1335 ************************************************************************/ 1336 static inline bool 1337 ixgbe_is_sfp(struct ixgbe_hw *hw) 1338 { 1339 switch (hw->mac.type) { 1340 case ixgbe_mac_82598EB: 1341 if (hw->phy.type == ixgbe_phy_nl) 1342 return (TRUE); 1343 return (FALSE); 1344 case ixgbe_mac_82599EB: 1345 switch (hw->mac.ops.get_media_type(hw)) { 1346 case ixgbe_media_type_fiber: 1347 case ixgbe_media_type_fiber_qsfp: 1348 return (TRUE); 1349 default: 1350 return (FALSE); 1351 } 1352 case ixgbe_mac_X550EM_x: 1353 case ixgbe_mac_X550EM_a: 1354 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1355 return (TRUE); 1356 return (FALSE); 1357 default: 1358 return (FALSE); 1359 } 1360 } /* ixgbe_is_sfp */ 1361 1362 /************************************************************************ 1363 * ixgbe_config_link 1364 ************************************************************************/ 1365 static void 1366 ixgbe_config_link(struct adapter *adapter) 1367 { 1368 struct ixgbe_hw *hw = &adapter->hw; 1369 u32 autoneg, err = 0; 1370 bool sfp, negotiate; 1371 1372 sfp = ixgbe_is_sfp(hw); 1373 1374 if (sfp) { 1375 GROUPTASK_ENQUEUE(&adapter->mod_task); 1376 } else { 1377 if (hw->mac.ops.check_link) 1378 err = ixgbe_check_link(hw, &adapter->link_speed, 1379 &adapter->link_up, FALSE); 1380 if (err) 1381 return; 1382 autoneg = hw->phy.autoneg_advertised; 1383 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1384 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1385 &negotiate); 1386 if (err) 1387 return; 1388 if (hw->mac.ops.setup_link) 1389 err = hw->mac.ops.setup_link(hw, autoneg, 1390 adapter->link_up); 1391 } 1392 1393 } /* ixgbe_config_link */ 1394 1395 /************************************************************************ 1396 * ixgbe_update_stats_counters - Update board statistics counters. 1397 ************************************************************************/ 1398 static void 1399 ixgbe_update_stats_counters(struct adapter *adapter) 1400 { 1401 struct ixgbe_hw *hw = &adapter->hw; 1402 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1403 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1404 u64 total_missed_rx = 0; 1405 1406 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1407 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1408 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1409 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1410 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1411 1412 for (int i = 0; i < 16; i++) { 1413 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1414 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1415 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1416 } 1417 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1418 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1419 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1420 1421 /* Hardware workaround, gprc counts missed packets */ 1422 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1423 stats->gprc -= missed_rx; 1424 1425 if (hw->mac.type != ixgbe_mac_82598EB) { 1426 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1427 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1428 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1429 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1430 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1431 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1432 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1433 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1434 } else { 1435 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1436 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1437 /* 82598 only has a counter in the high register */ 1438 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1439 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1440 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1441 } 1442 1443 /* 1444 * Workaround: mprc hardware is incorrectly counting 1445 * broadcasts, so for now we subtract those. 1446 */ 1447 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1448 stats->bprc += bprc; 1449 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1450 if (hw->mac.type == ixgbe_mac_82598EB) 1451 stats->mprc -= bprc; 1452 1453 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1454 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1455 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1456 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1457 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1458 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1459 1460 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1461 stats->lxontxc += lxon; 1462 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1463 stats->lxofftxc += lxoff; 1464 total = lxon + lxoff; 1465 1466 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1467 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1468 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1469 stats->gptc -= total; 1470 stats->mptc -= total; 1471 stats->ptc64 -= total; 1472 stats->gotc -= total * ETHER_MIN_LEN; 1473 1474 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1475 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1476 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1477 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1478 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1479 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1480 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1481 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1482 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1483 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1484 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1485 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1486 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1487 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1488 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1489 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1490 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1491 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1492 /* Only read FCOE on 82599 */ 1493 if (hw->mac.type != ixgbe_mac_82598EB) { 1494 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1495 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1496 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1497 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1498 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1499 } 1500 1501 /* Fill out the OS statistics structure */ 1502 IXGBE_SET_IPACKETS(adapter, stats->gprc); 1503 IXGBE_SET_OPACKETS(adapter, stats->gptc); 1504 IXGBE_SET_IBYTES(adapter, stats->gorc); 1505 IXGBE_SET_OBYTES(adapter, stats->gotc); 1506 IXGBE_SET_IMCASTS(adapter, stats->mprc); 1507 IXGBE_SET_OMCASTS(adapter, stats->mptc); 1508 IXGBE_SET_COLLISIONS(adapter, 0); 1509 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 1510 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec); 1511 } /* ixgbe_update_stats_counters */ 1512 1513 /************************************************************************ 1514 * ixgbe_add_hw_stats 1515 * 1516 * Add sysctl variables, one per statistic, to the system. 1517 ************************************************************************/ 1518 static void 1519 ixgbe_add_hw_stats(struct adapter *adapter) 1520 { 1521 device_t dev = iflib_get_dev(adapter->ctx); 1522 struct ix_rx_queue *rx_que; 1523 struct ix_tx_queue *tx_que; 1524 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1525 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1526 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1527 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1528 struct sysctl_oid *stat_node, *queue_node; 1529 struct sysctl_oid_list *stat_list, *queue_list; 1530 int i; 1531 1532 #define QUEUE_NAME_LEN 32 1533 char namebuf[QUEUE_NAME_LEN]; 1534 1535 /* Driver Statistics */ 1536 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1537 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); 1538 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1539 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); 1540 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1541 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); 1542 1543 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 1544 struct tx_ring *txr = &tx_que->txr; 1545 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1546 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1547 CTLFLAG_RD, NULL, "Queue Name"); 1548 queue_list = SYSCTL_CHILDREN(queue_node); 1549 1550 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1551 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1552 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1553 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1554 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1555 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1556 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1557 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1558 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1559 CTLFLAG_RD, &txr->total_packets, 1560 "Queue Packets Transmitted"); 1561 } 1562 1563 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 1564 struct rx_ring *rxr = &rx_que->rxr; 1565 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1566 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1567 CTLFLAG_RD, NULL, "Queue Name"); 1568 queue_list = SYSCTL_CHILDREN(queue_node); 1569 1570 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1571 CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i], 1572 sizeof(&adapter->rx_queues[i]), 1573 ixgbe_sysctl_interrupt_rate_handler, "IU", 1574 "Interrupt Rate"); 1575 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1576 CTLFLAG_RD, &(adapter->rx_queues[i].irqs), 1577 "irqs on this queue"); 1578 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1579 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1580 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1581 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1582 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1583 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1584 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1585 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1586 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1587 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1588 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1589 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1590 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1591 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1592 } 1593 1594 /* MAC stats get their own sub node */ 1595 1596 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1597 CTLFLAG_RD, NULL, "MAC Statistics"); 1598 stat_list = SYSCTL_CHILDREN(stat_node); 1599 1600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1601 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1603 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1604 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1605 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1607 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1609 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1610 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1611 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1613 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1615 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1616 1617 /* Flow Control stats */ 1618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1619 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1621 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1623 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1625 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1626 1627 /* Packet Reception Stats */ 1628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1629 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1631 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1633 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1635 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1637 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1639 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1641 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1643 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1645 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1647 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1649 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1651 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1653 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1655 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1657 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1659 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1661 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1663 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1664 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1665 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1666 1667 /* Packet Transmission Stats */ 1668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1669 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1671 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1673 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1675 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1677 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1679 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1681 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1683 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1685 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1687 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1689 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1691 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1692 } /* ixgbe_add_hw_stats */ 1693 1694 /************************************************************************ 1695 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1696 * 1697 * Retrieves the TDH value from the hardware 1698 ************************************************************************/ 1699 static int 1700 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1701 { 1702 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1703 int error; 1704 unsigned int val; 1705 1706 if (!txr) 1707 return (0); 1708 1709 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 1710 error = sysctl_handle_int(oidp, &val, 0, req); 1711 if (error || !req->newptr) 1712 return error; 1713 1714 return (0); 1715 } /* ixgbe_sysctl_tdh_handler */ 1716 1717 /************************************************************************ 1718 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1719 * 1720 * Retrieves the TDT value from the hardware 1721 ************************************************************************/ 1722 static int 1723 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1724 { 1725 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1726 int error; 1727 unsigned int val; 1728 1729 if (!txr) 1730 return (0); 1731 1732 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 1733 error = sysctl_handle_int(oidp, &val, 0, req); 1734 if (error || !req->newptr) 1735 return error; 1736 1737 return (0); 1738 } /* ixgbe_sysctl_tdt_handler */ 1739 1740 /************************************************************************ 1741 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1742 * 1743 * Retrieves the RDH value from the hardware 1744 ************************************************************************/ 1745 static int 1746 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1747 { 1748 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1749 int error; 1750 unsigned int val; 1751 1752 if (!rxr) 1753 return (0); 1754 1755 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 1756 error = sysctl_handle_int(oidp, &val, 0, req); 1757 if (error || !req->newptr) 1758 return error; 1759 1760 return (0); 1761 } /* ixgbe_sysctl_rdh_handler */ 1762 1763 /************************************************************************ 1764 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1765 * 1766 * Retrieves the RDT value from the hardware 1767 ************************************************************************/ 1768 static int 1769 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1770 { 1771 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1772 int error; 1773 unsigned int val; 1774 1775 if (!rxr) 1776 return (0); 1777 1778 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 1779 error = sysctl_handle_int(oidp, &val, 0, req); 1780 if (error || !req->newptr) 1781 return error; 1782 1783 return (0); 1784 } /* ixgbe_sysctl_rdt_handler */ 1785 1786 /************************************************************************ 1787 * ixgbe_if_vlan_register 1788 * 1789 * Run via vlan config EVENT, it enables us to use the 1790 * HW Filter table since we can get the vlan id. This 1791 * just creates the entry in the soft version of the 1792 * VFTA, init will repopulate the real table. 1793 ************************************************************************/ 1794 static void 1795 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1796 { 1797 struct adapter *adapter = iflib_get_softc(ctx); 1798 u16 index, bit; 1799 1800 index = (vtag >> 5) & 0x7F; 1801 bit = vtag & 0x1F; 1802 adapter->shadow_vfta[index] |= (1 << bit); 1803 ++adapter->num_vlans; 1804 ixgbe_setup_vlan_hw_support(ctx); 1805 } /* ixgbe_if_vlan_register */ 1806 1807 /************************************************************************ 1808 * ixgbe_if_vlan_unregister 1809 * 1810 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1811 ************************************************************************/ 1812 static void 1813 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1814 { 1815 struct adapter *adapter = iflib_get_softc(ctx); 1816 u16 index, bit; 1817 1818 index = (vtag >> 5) & 0x7F; 1819 bit = vtag & 0x1F; 1820 adapter->shadow_vfta[index] &= ~(1 << bit); 1821 --adapter->num_vlans; 1822 /* Re-init to load the changes */ 1823 ixgbe_setup_vlan_hw_support(ctx); 1824 } /* ixgbe_if_vlan_unregister */ 1825 1826 /************************************************************************ 1827 * ixgbe_setup_vlan_hw_support 1828 ************************************************************************/ 1829 static void 1830 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1831 { 1832 struct ifnet *ifp = iflib_get_ifp(ctx); 1833 struct adapter *adapter = iflib_get_softc(ctx); 1834 struct ixgbe_hw *hw = &adapter->hw; 1835 struct rx_ring *rxr; 1836 int i; 1837 u32 ctrl; 1838 1839 1840 /* 1841 * We get here thru init_locked, meaning 1842 * a soft reset, this has already cleared 1843 * the VFTA and other state, so if there 1844 * have been no vlan's registered do nothing. 1845 */ 1846 if (adapter->num_vlans == 0) 1847 return; 1848 1849 /* Setup the queues for vlans */ 1850 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1851 for (i = 0; i < adapter->num_rx_queues; i++) { 1852 rxr = &adapter->rx_queues[i].rxr; 1853 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1854 if (hw->mac.type != ixgbe_mac_82598EB) { 1855 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1856 ctrl |= IXGBE_RXDCTL_VME; 1857 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1858 } 1859 rxr->vtag_strip = TRUE; 1860 } 1861 } 1862 1863 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1864 return; 1865 /* 1866 * A soft reset zero's out the VFTA, so 1867 * we need to repopulate it now. 1868 */ 1869 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1870 if (adapter->shadow_vfta[i] != 0) 1871 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1872 adapter->shadow_vfta[i]); 1873 1874 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1875 /* Enable the Filter Table if enabled */ 1876 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1877 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1878 ctrl |= IXGBE_VLNCTRL_VFE; 1879 } 1880 if (hw->mac.type == ixgbe_mac_82598EB) 1881 ctrl |= IXGBE_VLNCTRL_VME; 1882 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1883 } /* ixgbe_setup_vlan_hw_support */ 1884 1885 /************************************************************************ 1886 * ixgbe_get_slot_info 1887 * 1888 * Get the width and transaction speed of 1889 * the slot this adapter is plugged into. 1890 ************************************************************************/ 1891 static void 1892 ixgbe_get_slot_info(struct adapter *adapter) 1893 { 1894 device_t dev = iflib_get_dev(adapter->ctx); 1895 struct ixgbe_hw *hw = &adapter->hw; 1896 int bus_info_valid = TRUE; 1897 u32 offset; 1898 u16 link; 1899 1900 /* Some devices are behind an internal bridge */ 1901 switch (hw->device_id) { 1902 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1903 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1904 goto get_parent_info; 1905 default: 1906 break; 1907 } 1908 1909 ixgbe_get_bus_info(hw); 1910 1911 /* 1912 * Some devices don't use PCI-E, but there is no need 1913 * to display "Unknown" for bus speed and width. 1914 */ 1915 switch (hw->mac.type) { 1916 case ixgbe_mac_X550EM_x: 1917 case ixgbe_mac_X550EM_a: 1918 return; 1919 default: 1920 goto display; 1921 } 1922 1923 get_parent_info: 1924 /* 1925 * For the Quad port adapter we need to parse back 1926 * up the PCI tree to find the speed of the expansion 1927 * slot into which this adapter is plugged. A bit more work. 1928 */ 1929 dev = device_get_parent(device_get_parent(dev)); 1930 #ifdef IXGBE_DEBUG 1931 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 1932 pci_get_slot(dev), pci_get_function(dev)); 1933 #endif 1934 dev = device_get_parent(device_get_parent(dev)); 1935 #ifdef IXGBE_DEBUG 1936 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 1937 pci_get_slot(dev), pci_get_function(dev)); 1938 #endif 1939 /* Now get the PCI Express Capabilities offset */ 1940 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 1941 /* 1942 * Hmm...can't get PCI-Express capabilities. 1943 * Falling back to default method. 1944 */ 1945 bus_info_valid = FALSE; 1946 ixgbe_get_bus_info(hw); 1947 goto display; 1948 } 1949 /* ...and read the Link Status Register */ 1950 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 1951 ixgbe_set_pci_config_data_generic(hw, link); 1952 1953 display: 1954 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 1955 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 1956 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 1957 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 1958 "Unknown"), 1959 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 1960 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 1961 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 1962 "Unknown")); 1963 1964 if (bus_info_valid) { 1965 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 1966 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 1967 (hw->bus.speed == ixgbe_bus_speed_2500))) { 1968 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1969 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 1970 } 1971 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 1972 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 1973 (hw->bus.speed < ixgbe_bus_speed_8000))) { 1974 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1975 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 1976 } 1977 } else 1978 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 1979 1980 return; 1981 } /* ixgbe_get_slot_info */ 1982 1983 /************************************************************************ 1984 * ixgbe_if_msix_intr_assign 1985 * 1986 * Setup MSI-X Interrupt resources and handlers 1987 ************************************************************************/ 1988 static int 1989 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 1990 { 1991 struct adapter *adapter = iflib_get_softc(ctx); 1992 struct ix_rx_queue *rx_que = adapter->rx_queues; 1993 struct ix_tx_queue *tx_que; 1994 int error, rid, vector = 0; 1995 int cpu_id = 0; 1996 char buf[16]; 1997 1998 /* Admin Que is vector 0*/ 1999 rid = vector + 1; 2000 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { 2001 rid = vector + 1; 2002 2003 snprintf(buf, sizeof(buf), "rxq%d", i); 2004 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2005 IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2006 2007 if (error) { 2008 device_printf(iflib_get_dev(ctx), 2009 "Failed to allocate que int %d err: %d", i, error); 2010 adapter->num_rx_queues = i + 1; 2011 goto fail; 2012 } 2013 2014 rx_que->msix = vector; 2015 adapter->active_queues |= (u64)(1 << rx_que->msix); 2016 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 2017 /* 2018 * The queue ID is used as the RSS layer bucket ID. 2019 * We look up the queue ID -> RSS CPU ID and select 2020 * that. 2021 */ 2022 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2023 } else { 2024 /* 2025 * Bind the msix vector, and thus the 2026 * rings to the corresponding cpu. 2027 * 2028 * This just happens to match the default RSS 2029 * round-robin bucket -> queue -> CPU allocation. 2030 */ 2031 if (adapter->num_rx_queues > 1) 2032 cpu_id = i; 2033 } 2034 2035 } 2036 for (int i = 0; i < adapter->num_tx_queues; i++) { 2037 snprintf(buf, sizeof(buf), "txq%d", i); 2038 tx_que = &adapter->tx_queues[i]; 2039 tx_que->msix = i % adapter->num_rx_queues; 2040 iflib_softirq_alloc_generic(ctx, 2041 &adapter->rx_queues[tx_que->msix].que_irq, 2042 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2043 } 2044 rid = vector + 1; 2045 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, 2046 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq"); 2047 if (error) { 2048 device_printf(iflib_get_dev(ctx), 2049 "Failed to register admin handler"); 2050 return (error); 2051 } 2052 2053 adapter->vector = vector; 2054 2055 return (0); 2056 fail: 2057 iflib_irq_free(ctx, &adapter->irq); 2058 rx_que = adapter->rx_queues; 2059 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) 2060 iflib_irq_free(ctx, &rx_que->que_irq); 2061 2062 return (error); 2063 } /* ixgbe_if_msix_intr_assign */ 2064 2065 /********************************************************************* 2066 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2067 **********************************************************************/ 2068 static int 2069 ixgbe_msix_que(void *arg) 2070 { 2071 struct ix_rx_queue *que = arg; 2072 struct adapter *adapter = que->adapter; 2073 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx); 2074 2075 /* Protect against spurious interrupts */ 2076 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2077 return 0; 2078 2079 ixgbe_disable_queue(adapter, que->msix); 2080 ++que->irqs; 2081 2082 return (FILTER_SCHEDULE_THREAD); 2083 } /* ixgbe_msix_que */ 2084 2085 /************************************************************************ 2086 * ixgbe_media_status - Media Ioctl callback 2087 * 2088 * Called whenever the user queries the status of 2089 * the interface using ifconfig. 2090 ************************************************************************/ 2091 static void 2092 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2093 { 2094 struct adapter *adapter = iflib_get_softc(ctx); 2095 struct ixgbe_hw *hw = &adapter->hw; 2096 int layer; 2097 2098 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2099 2100 iflib_admin_intr_deferred(ctx); 2101 2102 ifmr->ifm_status = IFM_AVALID; 2103 ifmr->ifm_active = IFM_ETHER; 2104 2105 if (!adapter->link_active) 2106 return; 2107 2108 ifmr->ifm_status |= IFM_ACTIVE; 2109 layer = adapter->phy_layer; 2110 2111 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2112 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2113 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2114 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2115 switch (adapter->link_speed) { 2116 case IXGBE_LINK_SPEED_10GB_FULL: 2117 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2118 break; 2119 case IXGBE_LINK_SPEED_1GB_FULL: 2120 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2121 break; 2122 case IXGBE_LINK_SPEED_100_FULL: 2123 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2124 break; 2125 case IXGBE_LINK_SPEED_10_FULL: 2126 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2127 break; 2128 } 2129 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2130 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2131 switch (adapter->link_speed) { 2132 case IXGBE_LINK_SPEED_10GB_FULL: 2133 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2134 break; 2135 } 2136 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2137 switch (adapter->link_speed) { 2138 case IXGBE_LINK_SPEED_10GB_FULL: 2139 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2140 break; 2141 case IXGBE_LINK_SPEED_1GB_FULL: 2142 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2143 break; 2144 } 2145 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2146 switch (adapter->link_speed) { 2147 case IXGBE_LINK_SPEED_10GB_FULL: 2148 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2149 break; 2150 case IXGBE_LINK_SPEED_1GB_FULL: 2151 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2152 break; 2153 } 2154 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2155 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2156 switch (adapter->link_speed) { 2157 case IXGBE_LINK_SPEED_10GB_FULL: 2158 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2159 break; 2160 case IXGBE_LINK_SPEED_1GB_FULL: 2161 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2162 break; 2163 } 2164 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2165 switch (adapter->link_speed) { 2166 case IXGBE_LINK_SPEED_10GB_FULL: 2167 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2168 break; 2169 } 2170 /* 2171 * XXX: These need to use the proper media types once 2172 * they're added. 2173 */ 2174 #ifndef IFM_ETH_XTYPE 2175 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2176 switch (adapter->link_speed) { 2177 case IXGBE_LINK_SPEED_10GB_FULL: 2178 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2179 break; 2180 case IXGBE_LINK_SPEED_2_5GB_FULL: 2181 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2182 break; 2183 case IXGBE_LINK_SPEED_1GB_FULL: 2184 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2185 break; 2186 } 2187 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2188 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2189 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2190 switch (adapter->link_speed) { 2191 case IXGBE_LINK_SPEED_10GB_FULL: 2192 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2193 break; 2194 case IXGBE_LINK_SPEED_2_5GB_FULL: 2195 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2196 break; 2197 case IXGBE_LINK_SPEED_1GB_FULL: 2198 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2199 break; 2200 } 2201 #else 2202 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2203 switch (adapter->link_speed) { 2204 case IXGBE_LINK_SPEED_10GB_FULL: 2205 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2206 break; 2207 case IXGBE_LINK_SPEED_2_5GB_FULL: 2208 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2209 break; 2210 case IXGBE_LINK_SPEED_1GB_FULL: 2211 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2212 break; 2213 } 2214 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2215 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2216 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2217 switch (adapter->link_speed) { 2218 case IXGBE_LINK_SPEED_10GB_FULL: 2219 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2220 break; 2221 case IXGBE_LINK_SPEED_2_5GB_FULL: 2222 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2223 break; 2224 case IXGBE_LINK_SPEED_1GB_FULL: 2225 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2226 break; 2227 } 2228 #endif 2229 2230 /* If nothing is recognized... */ 2231 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2232 ifmr->ifm_active |= IFM_UNKNOWN; 2233 2234 /* Display current flow control setting used on link */ 2235 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2236 hw->fc.current_mode == ixgbe_fc_full) 2237 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2238 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2239 hw->fc.current_mode == ixgbe_fc_full) 2240 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2241 } /* ixgbe_media_status */ 2242 2243 /************************************************************************ 2244 * ixgbe_media_change - Media Ioctl callback 2245 * 2246 * Called when the user changes speed/duplex using 2247 * media/mediopt option with ifconfig. 2248 ************************************************************************/ 2249 static int 2250 ixgbe_if_media_change(if_ctx_t ctx) 2251 { 2252 struct adapter *adapter = iflib_get_softc(ctx); 2253 struct ifmedia *ifm = iflib_get_media(ctx); 2254 struct ixgbe_hw *hw = &adapter->hw; 2255 ixgbe_link_speed speed = 0; 2256 2257 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2258 2259 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2260 return (EINVAL); 2261 2262 if (hw->phy.media_type == ixgbe_media_type_backplane) 2263 return (EPERM); 2264 2265 /* 2266 * We don't actually need to check against the supported 2267 * media types of the adapter; ifmedia will take care of 2268 * that for us. 2269 */ 2270 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2271 case IFM_AUTO: 2272 case IFM_10G_T: 2273 speed |= IXGBE_LINK_SPEED_100_FULL; 2274 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2275 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2276 break; 2277 case IFM_10G_LRM: 2278 case IFM_10G_LR: 2279 #ifndef IFM_ETH_XTYPE 2280 case IFM_10G_SR: /* KR, too */ 2281 case IFM_10G_CX4: /* KX4 */ 2282 #else 2283 case IFM_10G_KR: 2284 case IFM_10G_KX4: 2285 #endif 2286 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2287 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2288 break; 2289 #ifndef IFM_ETH_XTYPE 2290 case IFM_1000_CX: /* KX */ 2291 #else 2292 case IFM_1000_KX: 2293 #endif 2294 case IFM_1000_LX: 2295 case IFM_1000_SX: 2296 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2297 break; 2298 case IFM_1000_T: 2299 speed |= IXGBE_LINK_SPEED_100_FULL; 2300 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2301 break; 2302 case IFM_10G_TWINAX: 2303 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2304 break; 2305 case IFM_100_TX: 2306 speed |= IXGBE_LINK_SPEED_100_FULL; 2307 break; 2308 case IFM_10_T: 2309 speed |= IXGBE_LINK_SPEED_10_FULL; 2310 break; 2311 default: 2312 goto invalid; 2313 } 2314 2315 hw->mac.autotry_restart = TRUE; 2316 hw->mac.ops.setup_link(hw, speed, TRUE); 2317 adapter->advertise = 2318 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 2319 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 2320 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 2321 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 2322 2323 return (0); 2324 2325 invalid: 2326 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2327 2328 return (EINVAL); 2329 } /* ixgbe_if_media_change */ 2330 2331 /************************************************************************ 2332 * ixgbe_set_promisc 2333 ************************************************************************/ 2334 static int 2335 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2336 { 2337 struct adapter *adapter = iflib_get_softc(ctx); 2338 struct ifnet *ifp = iflib_get_ifp(ctx); 2339 u32 rctl; 2340 int mcnt = 0; 2341 2342 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2343 rctl &= (~IXGBE_FCTRL_UPE); 2344 if (ifp->if_flags & IFF_ALLMULTI) 2345 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2346 else { 2347 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES); 2348 } 2349 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2350 rctl &= (~IXGBE_FCTRL_MPE); 2351 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2352 2353 if (ifp->if_flags & IFF_PROMISC) { 2354 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2355 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2356 } else if (ifp->if_flags & IFF_ALLMULTI) { 2357 rctl |= IXGBE_FCTRL_MPE; 2358 rctl &= ~IXGBE_FCTRL_UPE; 2359 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2360 } 2361 return (0); 2362 } /* ixgbe_if_promisc_set */ 2363 2364 /************************************************************************ 2365 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2366 ************************************************************************/ 2367 static int 2368 ixgbe_msix_link(void *arg) 2369 { 2370 struct adapter *adapter = arg; 2371 struct ixgbe_hw *hw = &adapter->hw; 2372 u32 eicr, eicr_mask; 2373 s32 retval; 2374 2375 ++adapter->link_irq; 2376 2377 /* Pause other interrupts */ 2378 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2379 2380 /* First get the cause */ 2381 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2382 /* Be sure the queue bits are not cleared */ 2383 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2384 /* Clear interrupt with write */ 2385 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2386 2387 /* Link status change */ 2388 if (eicr & IXGBE_EICR_LSC) { 2389 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2390 iflib_admin_intr_deferred(adapter->ctx); 2391 } 2392 2393 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 2394 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 2395 (eicr & IXGBE_EICR_FLOW_DIR)) { 2396 /* This is probably overkill :) */ 2397 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 2398 return (FILTER_HANDLED); 2399 /* Disable the interrupt */ 2400 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2401 GROUPTASK_ENQUEUE(&adapter->fdir_task); 2402 } else 2403 if (eicr & IXGBE_EICR_ECC) { 2404 device_printf(iflib_get_dev(adapter->ctx), 2405 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); 2406 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2407 } 2408 2409 /* Check for over temp condition */ 2410 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2411 switch (adapter->hw.mac.type) { 2412 case ixgbe_mac_X550EM_a: 2413 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2414 break; 2415 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2416 IXGBE_EICR_GPI_SDP0_X550EM_a); 2417 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2418 IXGBE_EICR_GPI_SDP0_X550EM_a); 2419 retval = hw->phy.ops.check_overtemp(hw); 2420 if (retval != IXGBE_ERR_OVERTEMP) 2421 break; 2422 device_printf(iflib_get_dev(adapter->ctx), 2423 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2424 device_printf(iflib_get_dev(adapter->ctx), 2425 "System shutdown required!\n"); 2426 break; 2427 default: 2428 if (!(eicr & IXGBE_EICR_TS)) 2429 break; 2430 retval = hw->phy.ops.check_overtemp(hw); 2431 if (retval != IXGBE_ERR_OVERTEMP) 2432 break; 2433 device_printf(iflib_get_dev(adapter->ctx), 2434 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2435 device_printf(iflib_get_dev(adapter->ctx), 2436 "System shutdown required!\n"); 2437 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2438 break; 2439 } 2440 } 2441 2442 /* Check for VF message */ 2443 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 2444 (eicr & IXGBE_EICR_MAILBOX)) 2445 GROUPTASK_ENQUEUE(&adapter->mbx_task); 2446 } 2447 2448 if (ixgbe_is_sfp(hw)) { 2449 /* Pluggable optics-related interrupt */ 2450 if (hw->mac.type >= ixgbe_mac_X540) 2451 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2452 else 2453 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2454 2455 if (eicr & eicr_mask) { 2456 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2457 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1)) 2458 GROUPTASK_ENQUEUE(&adapter->mod_task); 2459 } 2460 2461 if ((hw->mac.type == ixgbe_mac_82599EB) && 2462 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2463 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2464 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2465 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1)) 2466 GROUPTASK_ENQUEUE(&adapter->msf_task); 2467 } 2468 } 2469 2470 /* Check for fan failure */ 2471 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2472 ixgbe_check_fan_failure(adapter, eicr, TRUE); 2473 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2474 } 2475 2476 /* External PHY interrupt */ 2477 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2478 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2479 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2480 GROUPTASK_ENQUEUE(&adapter->phy_task); 2481 } 2482 2483 /* Re-enable other interrupts */ 2484 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 2485 2486 return (FILTER_HANDLED); 2487 } /* ixgbe_msix_link */ 2488 2489 /************************************************************************ 2490 * ixgbe_sysctl_interrupt_rate_handler 2491 ************************************************************************/ 2492 static int 2493 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2494 { 2495 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2496 int error; 2497 unsigned int reg, usec, rate; 2498 2499 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 2500 usec = ((reg & 0x0FF8) >> 3); 2501 if (usec > 0) 2502 rate = 500000 / usec; 2503 else 2504 rate = 0; 2505 error = sysctl_handle_int(oidp, &rate, 0, req); 2506 if (error || !req->newptr) 2507 return error; 2508 reg &= ~0xfff; /* default, no limitation */ 2509 ixgbe_max_interrupt_rate = 0; 2510 if (rate > 0 && rate < 500000) { 2511 if (rate < 1000) 2512 rate = 1000; 2513 ixgbe_max_interrupt_rate = rate; 2514 reg |= ((4000000/rate) & 0xff8); 2515 } 2516 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 2517 2518 return (0); 2519 } /* ixgbe_sysctl_interrupt_rate_handler */ 2520 2521 /************************************************************************ 2522 * ixgbe_add_device_sysctls 2523 ************************************************************************/ 2524 static void 2525 ixgbe_add_device_sysctls(if_ctx_t ctx) 2526 { 2527 struct adapter *adapter = iflib_get_softc(ctx); 2528 device_t dev = iflib_get_dev(ctx); 2529 struct ixgbe_hw *hw = &adapter->hw; 2530 struct sysctl_oid_list *child; 2531 struct sysctl_ctx_list *ctx_list; 2532 2533 ctx_list = device_get_sysctl_ctx(dev); 2534 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2535 2536 /* Sysctls for all devices */ 2537 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2538 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I", 2539 IXGBE_SYSCTL_DESC_SET_FC); 2540 2541 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2542 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I", 2543 IXGBE_SYSCTL_DESC_ADV_SPEED); 2544 2545 #ifdef IXGBE_DEBUG 2546 /* testing sysctls (for all devices) */ 2547 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2548 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state, 2549 "I", "PCI Power State"); 2550 2551 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2552 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, 2553 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2554 #endif 2555 /* for X550 series devices */ 2556 if (hw->mac.type >= ixgbe_mac_X550) 2557 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2558 CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac, 2559 "I", "DMA Coalesce"); 2560 2561 /* for WoL-capable devices */ 2562 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2563 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2564 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2565 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2566 2567 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2568 CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc, 2569 "I", "Enable/Disable Wake Up Filters"); 2570 } 2571 2572 /* for X552/X557-AT devices */ 2573 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2574 struct sysctl_oid *phy_node; 2575 struct sysctl_oid_list *phy_list; 2576 2577 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2578 CTLFLAG_RD, NULL, "External PHY sysctls"); 2579 phy_list = SYSCTL_CHILDREN(phy_node); 2580 2581 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2582 CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp, 2583 "I", "Current External PHY Temperature (Celsius)"); 2584 2585 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2586 "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, 2587 ixgbe_sysctl_phy_overtemp_occurred, "I", 2588 "External PHY High Temperature Event Occurred"); 2589 } 2590 2591 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 2592 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2593 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2594 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2595 } 2596 } /* ixgbe_add_device_sysctls */ 2597 2598 /************************************************************************ 2599 * ixgbe_allocate_pci_resources 2600 ************************************************************************/ 2601 static int 2602 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2603 { 2604 struct adapter *adapter = iflib_get_softc(ctx); 2605 device_t dev = iflib_get_dev(ctx); 2606 int rid; 2607 2608 rid = PCIR_BAR(0); 2609 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2610 RF_ACTIVE); 2611 2612 if (!(adapter->pci_mem)) { 2613 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2614 return (ENXIO); 2615 } 2616 2617 /* Save bus_space values for READ/WRITE_REG macros */ 2618 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); 2619 adapter->osdep.mem_bus_space_handle = 2620 rman_get_bushandle(adapter->pci_mem); 2621 /* Set hw values for shared code */ 2622 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2623 2624 return (0); 2625 } /* ixgbe_allocate_pci_resources */ 2626 2627 /************************************************************************ 2628 * ixgbe_detach - Device removal routine 2629 * 2630 * Called when the driver is being removed. 2631 * Stops the adapter and deallocates all the resources 2632 * that were allocated for driver operation. 2633 * 2634 * return 0 on success, positive on failure 2635 ************************************************************************/ 2636 static int 2637 ixgbe_if_detach(if_ctx_t ctx) 2638 { 2639 struct adapter *adapter = iflib_get_softc(ctx); 2640 device_t dev = iflib_get_dev(ctx); 2641 u32 ctrl_ext; 2642 2643 INIT_DEBUGOUT("ixgbe_detach: begin"); 2644 2645 if (ixgbe_pci_iov_detach(dev) != 0) { 2646 device_printf(dev, "SR-IOV in use; detach first.\n"); 2647 return (EBUSY); 2648 } 2649 2650 iflib_config_gtask_deinit(&adapter->mod_task); 2651 iflib_config_gtask_deinit(&adapter->msf_task); 2652 iflib_config_gtask_deinit(&adapter->phy_task); 2653 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 2654 iflib_config_gtask_deinit(&adapter->mbx_task); 2655 2656 ixgbe_setup_low_power_mode(ctx); 2657 2658 /* let hardware know driver is unloading */ 2659 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 2660 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 2662 2663 ixgbe_free_pci_resources(ctx); 2664 free(adapter->mta, M_IXGBE); 2665 2666 return (0); 2667 } /* ixgbe_if_detach */ 2668 2669 /************************************************************************ 2670 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2671 * 2672 * Prepare the adapter/port for LPLU and/or WoL 2673 ************************************************************************/ 2674 static int 2675 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2676 { 2677 struct adapter *adapter = iflib_get_softc(ctx); 2678 struct ixgbe_hw *hw = &adapter->hw; 2679 device_t dev = iflib_get_dev(ctx); 2680 s32 error = 0; 2681 2682 if (!hw->wol_enabled) 2683 ixgbe_set_phy_power(hw, FALSE); 2684 2685 /* Limit power management flow to X550EM baseT */ 2686 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2687 hw->phy.ops.enter_lplu) { 2688 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2689 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2690 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2691 2692 /* 2693 * Clear Wake Up Status register to prevent any previous wakeup 2694 * events from waking us up immediately after we suspend. 2695 */ 2696 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2697 2698 /* 2699 * Program the Wakeup Filter Control register with user filter 2700 * settings 2701 */ 2702 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 2703 2704 /* Enable wakeups and power management in Wakeup Control */ 2705 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2706 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2707 2708 /* X550EM baseT adapters need a special LPLU flow */ 2709 hw->phy.reset_disable = TRUE; 2710 ixgbe_if_stop(ctx); 2711 error = hw->phy.ops.enter_lplu(hw); 2712 if (error) 2713 device_printf(dev, "Error entering LPLU: %d\n", error); 2714 hw->phy.reset_disable = FALSE; 2715 } else { 2716 /* Just stop for other adapters */ 2717 ixgbe_if_stop(ctx); 2718 } 2719 2720 return error; 2721 } /* ixgbe_setup_low_power_mode */ 2722 2723 /************************************************************************ 2724 * ixgbe_shutdown - Shutdown entry point 2725 ************************************************************************/ 2726 static int 2727 ixgbe_if_shutdown(if_ctx_t ctx) 2728 { 2729 int error = 0; 2730 2731 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2732 2733 error = ixgbe_setup_low_power_mode(ctx); 2734 2735 return (error); 2736 } /* ixgbe_if_shutdown */ 2737 2738 /************************************************************************ 2739 * ixgbe_suspend 2740 * 2741 * From D0 to D3 2742 ************************************************************************/ 2743 static int 2744 ixgbe_if_suspend(if_ctx_t ctx) 2745 { 2746 int error = 0; 2747 2748 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2749 2750 error = ixgbe_setup_low_power_mode(ctx); 2751 2752 return (error); 2753 } /* ixgbe_if_suspend */ 2754 2755 /************************************************************************ 2756 * ixgbe_resume 2757 * 2758 * From D3 to D0 2759 ************************************************************************/ 2760 static int 2761 ixgbe_if_resume(if_ctx_t ctx) 2762 { 2763 struct adapter *adapter = iflib_get_softc(ctx); 2764 device_t dev = iflib_get_dev(ctx); 2765 struct ifnet *ifp = iflib_get_ifp(ctx); 2766 struct ixgbe_hw *hw = &adapter->hw; 2767 u32 wus; 2768 2769 INIT_DEBUGOUT("ixgbe_resume: begin"); 2770 2771 /* Read & clear WUS register */ 2772 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2773 if (wus) 2774 device_printf(dev, "Woken up by (WUS): %#010x\n", 2775 IXGBE_READ_REG(hw, IXGBE_WUS)); 2776 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2777 /* And clear WUFC until next low-power transition */ 2778 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2779 2780 /* 2781 * Required after D3->D0 transition; 2782 * will re-advertise all previous advertised speeds 2783 */ 2784 if (ifp->if_flags & IFF_UP) 2785 ixgbe_if_init(ctx); 2786 2787 return (0); 2788 } /* ixgbe_if_resume */ 2789 2790 /************************************************************************ 2791 * ixgbe_if_mtu_set - Ioctl mtu entry point 2792 * 2793 * Return 0 on success, EINVAL on failure 2794 ************************************************************************/ 2795 static int 2796 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2797 { 2798 struct adapter *adapter = iflib_get_softc(ctx); 2799 int error = 0; 2800 2801 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2802 2803 if (mtu > IXGBE_MAX_MTU) { 2804 error = EINVAL; 2805 } else { 2806 adapter->max_frame_size = mtu + IXGBE_MTU_HDR; 2807 } 2808 2809 return error; 2810 } /* ixgbe_if_mtu_set */ 2811 2812 /************************************************************************ 2813 * ixgbe_if_crcstrip_set 2814 ************************************************************************/ 2815 static void 2816 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2817 { 2818 struct adapter *sc = iflib_get_softc(ctx); 2819 struct ixgbe_hw *hw = &sc->hw; 2820 /* crc stripping is set in two places: 2821 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2822 * IXGBE_RDRXCTL (set by the original driver in 2823 * ixgbe_setup_hw_rsc() called in init_locked. 2824 * We disable the setting when netmap is compiled in). 2825 * We update the values here, but also in ixgbe.c because 2826 * init_locked sometimes is called outside our control. 2827 */ 2828 uint32_t hl, rxc; 2829 2830 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2831 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2832 #ifdef NETMAP 2833 if (netmap_verbose) 2834 D("%s read HLREG 0x%x rxc 0x%x", 2835 onoff ? "enter" : "exit", hl, rxc); 2836 #endif 2837 /* hw requirements ... */ 2838 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2839 rxc |= IXGBE_RDRXCTL_RSCACKC; 2840 if (onoff && !crcstrip) { 2841 /* keep the crc. Fast rx */ 2842 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 2843 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 2844 } else { 2845 /* reset default mode */ 2846 hl |= IXGBE_HLREG0_RXCRCSTRP; 2847 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 2848 } 2849 #ifdef NETMAP 2850 if (netmap_verbose) 2851 D("%s write HLREG 0x%x rxc 0x%x", 2852 onoff ? "enter" : "exit", hl, rxc); 2853 #endif 2854 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 2855 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 2856 } /* ixgbe_if_crcstrip_set */ 2857 2858 /********************************************************************* 2859 * ixgbe_if_init - Init entry point 2860 * 2861 * Used in two ways: It is used by the stack as an init 2862 * entry point in network interface structure. It is also 2863 * used by the driver as a hw/sw initialization routine to 2864 * get to a consistent state. 2865 * 2866 * Return 0 on success, positive on failure 2867 **********************************************************************/ 2868 void 2869 ixgbe_if_init(if_ctx_t ctx) 2870 { 2871 struct adapter *adapter = iflib_get_softc(ctx); 2872 struct ifnet *ifp = iflib_get_ifp(ctx); 2873 device_t dev = iflib_get_dev(ctx); 2874 struct ixgbe_hw *hw = &adapter->hw; 2875 struct ix_rx_queue *rx_que; 2876 struct ix_tx_queue *tx_que; 2877 u32 txdctl, mhadd; 2878 u32 rxdctl, rxctrl; 2879 u32 ctrl_ext; 2880 2881 int i, j, err; 2882 2883 INIT_DEBUGOUT("ixgbe_if_init: begin"); 2884 2885 /* Queue indices may change with IOV mode */ 2886 ixgbe_align_all_queue_indices(adapter); 2887 2888 /* reprogram the RAR[0] in case user changed it. */ 2889 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 2890 2891 /* Get the latest mac address, User can use a LAA */ 2892 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 2893 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 2894 hw->addr_ctrl.rar_used_count = 1; 2895 2896 ixgbe_init_hw(hw); 2897 2898 ixgbe_initialize_iov(adapter); 2899 2900 ixgbe_initialize_transmit_units(ctx); 2901 2902 /* Setup Multicast table */ 2903 ixgbe_if_multi_set(ctx); 2904 2905 /* Determine the correct mbuf pool, based on frame size */ 2906 if (adapter->max_frame_size <= MCLBYTES) 2907 adapter->rx_mbuf_sz = MCLBYTES; 2908 else 2909 adapter->rx_mbuf_sz = MJUMPAGESIZE; 2910 2911 /* Configure RX settings */ 2912 ixgbe_initialize_receive_units(ctx); 2913 2914 /* Enable SDP & MSI-X interrupts based on adapter */ 2915 ixgbe_config_gpie(adapter); 2916 2917 /* Set MTU size */ 2918 if (ifp->if_mtu > ETHERMTU) { 2919 /* aka IXGBE_MAXFRS on 82599 and newer */ 2920 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2921 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2922 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 2923 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 2924 } 2925 2926 /* Now enable all the queues */ 2927 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 2928 struct tx_ring *txr = &tx_que->txr; 2929 2930 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 2931 txdctl |= IXGBE_TXDCTL_ENABLE; 2932 /* Set WTHRESH to 8, burst writeback */ 2933 txdctl |= (8 << 16); 2934 /* 2935 * When the internal queue falls below PTHRESH (32), 2936 * start prefetching as long as there are at least 2937 * HTHRESH (1) buffers ready. The values are taken 2938 * from the Intel linux driver 3.8.21. 2939 * Prefetching enables tx line rate even with 1 queue. 2940 */ 2941 txdctl |= (32 << 0) | (1 << 8); 2942 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 2943 } 2944 2945 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 2946 struct rx_ring *rxr = &rx_que->rxr; 2947 2948 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2949 if (hw->mac.type == ixgbe_mac_82598EB) { 2950 /* 2951 * PTHRESH = 21 2952 * HTHRESH = 4 2953 * WTHRESH = 8 2954 */ 2955 rxdctl &= ~0x3FFFFF; 2956 rxdctl |= 0x080420; 2957 } 2958 rxdctl |= IXGBE_RXDCTL_ENABLE; 2959 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 2960 for (j = 0; j < 10; j++) { 2961 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 2962 IXGBE_RXDCTL_ENABLE) 2963 break; 2964 else 2965 msec_delay(1); 2966 } 2967 wmb(); 2968 } 2969 2970 /* Enable Receive engine */ 2971 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2972 if (hw->mac.type == ixgbe_mac_82598EB) 2973 rxctrl |= IXGBE_RXCTRL_DMBYPS; 2974 rxctrl |= IXGBE_RXCTRL_RXEN; 2975 ixgbe_enable_rx_dma(hw, rxctrl); 2976 2977 /* Set up MSI/MSI-X routing */ 2978 if (ixgbe_enable_msix) { 2979 ixgbe_configure_ivars(adapter); 2980 /* Set up auto-mask */ 2981 if (hw->mac.type == ixgbe_mac_82598EB) 2982 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2983 else { 2984 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 2985 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 2986 } 2987 } else { /* Simple settings for Legacy/MSI */ 2988 ixgbe_set_ivar(adapter, 0, 0, 0); 2989 ixgbe_set_ivar(adapter, 0, 0, 1); 2990 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2991 } 2992 2993 ixgbe_init_fdir(adapter); 2994 2995 /* 2996 * Check on any SFP devices that 2997 * need to be kick-started 2998 */ 2999 if (hw->phy.type == ixgbe_phy_none) { 3000 err = hw->phy.ops.identify(hw); 3001 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3002 device_printf(dev, 3003 "Unsupported SFP+ module type was detected.\n"); 3004 return; 3005 } 3006 } 3007 3008 /* Set moderation on the Link interrupt */ 3009 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 3010 3011 /* Enable power to the phy. */ 3012 ixgbe_set_phy_power(hw, TRUE); 3013 3014 /* Config/Enable Link */ 3015 ixgbe_config_link(adapter); 3016 3017 /* Hardware Packet Buffer & Flow Control setup */ 3018 ixgbe_config_delay_values(adapter); 3019 3020 /* Initialize the FC settings */ 3021 ixgbe_start_hw(hw); 3022 3023 /* Set up VLAN support and filter */ 3024 ixgbe_setup_vlan_hw_support(ctx); 3025 3026 /* Setup DMA Coalescing */ 3027 ixgbe_config_dmac(adapter); 3028 3029 /* And now turn on interrupts */ 3030 ixgbe_if_enable_intr(ctx); 3031 3032 /* Enable the use of the MBX by the VF's */ 3033 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 3034 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3035 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3036 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3037 } 3038 3039 } /* ixgbe_init_locked */ 3040 3041 /************************************************************************ 3042 * ixgbe_set_ivar 3043 * 3044 * Setup the correct IVAR register for a particular MSI-X interrupt 3045 * (yes this is all very magic and confusing :) 3046 * - entry is the register array entry 3047 * - vector is the MSI-X vector for this queue 3048 * - type is RX/TX/MISC 3049 ************************************************************************/ 3050 static void 3051 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3052 { 3053 struct ixgbe_hw *hw = &adapter->hw; 3054 u32 ivar, index; 3055 3056 vector |= IXGBE_IVAR_ALLOC_VAL; 3057 3058 switch (hw->mac.type) { 3059 case ixgbe_mac_82598EB: 3060 if (type == -1) 3061 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3062 else 3063 entry += (type * 64); 3064 index = (entry >> 2) & 0x1F; 3065 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3066 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3067 ivar |= (vector << (8 * (entry & 0x3))); 3068 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3069 break; 3070 case ixgbe_mac_82599EB: 3071 case ixgbe_mac_X540: 3072 case ixgbe_mac_X550: 3073 case ixgbe_mac_X550EM_x: 3074 case ixgbe_mac_X550EM_a: 3075 if (type == -1) { /* MISC IVAR */ 3076 index = (entry & 1) * 8; 3077 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3078 ivar &= ~(0xFF << index); 3079 ivar |= (vector << index); 3080 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3081 } else { /* RX/TX IVARS */ 3082 index = (16 * (entry & 1)) + (8 * type); 3083 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3084 ivar &= ~(0xFF << index); 3085 ivar |= (vector << index); 3086 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3087 } 3088 default: 3089 break; 3090 } 3091 } /* ixgbe_set_ivar */ 3092 3093 /************************************************************************ 3094 * ixgbe_configure_ivars 3095 ************************************************************************/ 3096 static void 3097 ixgbe_configure_ivars(struct adapter *adapter) 3098 { 3099 struct ix_rx_queue *rx_que = adapter->rx_queues; 3100 struct ix_tx_queue *tx_que = adapter->tx_queues; 3101 u32 newitr; 3102 3103 if (ixgbe_max_interrupt_rate > 0) 3104 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3105 else { 3106 /* 3107 * Disable DMA coalescing if interrupt moderation is 3108 * disabled. 3109 */ 3110 adapter->dmac = 0; 3111 newitr = 0; 3112 } 3113 3114 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { 3115 struct rx_ring *rxr = &rx_que->rxr; 3116 3117 /* First the RX queue entry */ 3118 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0); 3119 3120 /* Set an Initial EITR value */ 3121 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr); 3122 } 3123 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 3124 struct tx_ring *txr = &tx_que->txr; 3125 3126 /* ... and the TX */ 3127 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1); 3128 } 3129 /* For the Link interrupt */ 3130 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3131 } /* ixgbe_configure_ivars */ 3132 3133 /************************************************************************ 3134 * ixgbe_config_gpie 3135 ************************************************************************/ 3136 static void 3137 ixgbe_config_gpie(struct adapter *adapter) 3138 { 3139 struct ixgbe_hw *hw = &adapter->hw; 3140 u32 gpie; 3141 3142 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3143 3144 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3145 /* Enable Enhanced MSI-X mode */ 3146 gpie |= IXGBE_GPIE_MSIX_MODE 3147 | IXGBE_GPIE_EIAME 3148 | IXGBE_GPIE_PBA_SUPPORT 3149 | IXGBE_GPIE_OCD; 3150 } 3151 3152 /* Fan Failure Interrupt */ 3153 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3154 gpie |= IXGBE_SDP1_GPIEN; 3155 3156 /* Thermal Sensor Interrupt */ 3157 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3158 gpie |= IXGBE_SDP0_GPIEN_X540; 3159 3160 /* Link detection */ 3161 switch (hw->mac.type) { 3162 case ixgbe_mac_82599EB: 3163 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3164 break; 3165 case ixgbe_mac_X550EM_x: 3166 case ixgbe_mac_X550EM_a: 3167 gpie |= IXGBE_SDP0_GPIEN_X540; 3168 break; 3169 default: 3170 break; 3171 } 3172 3173 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3174 3175 } /* ixgbe_config_gpie */ 3176 3177 /************************************************************************ 3178 * ixgbe_config_delay_values 3179 * 3180 * Requires adapter->max_frame_size to be set. 3181 ************************************************************************/ 3182 static void 3183 ixgbe_config_delay_values(struct adapter *adapter) 3184 { 3185 struct ixgbe_hw *hw = &adapter->hw; 3186 u32 rxpb, frame, size, tmp; 3187 3188 frame = adapter->max_frame_size; 3189 3190 /* Calculate High Water */ 3191 switch (hw->mac.type) { 3192 case ixgbe_mac_X540: 3193 case ixgbe_mac_X550: 3194 case ixgbe_mac_X550EM_x: 3195 case ixgbe_mac_X550EM_a: 3196 tmp = IXGBE_DV_X540(frame, frame); 3197 break; 3198 default: 3199 tmp = IXGBE_DV(frame, frame); 3200 break; 3201 } 3202 size = IXGBE_BT2KB(tmp); 3203 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3204 hw->fc.high_water[0] = rxpb - size; 3205 3206 /* Now calculate Low Water */ 3207 switch (hw->mac.type) { 3208 case ixgbe_mac_X540: 3209 case ixgbe_mac_X550: 3210 case ixgbe_mac_X550EM_x: 3211 case ixgbe_mac_X550EM_a: 3212 tmp = IXGBE_LOW_DV_X540(frame); 3213 break; 3214 default: 3215 tmp = IXGBE_LOW_DV(frame); 3216 break; 3217 } 3218 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3219 3220 hw->fc.pause_time = IXGBE_FC_PAUSE; 3221 hw->fc.send_xon = TRUE; 3222 } /* ixgbe_config_delay_values */ 3223 3224 /************************************************************************ 3225 * ixgbe_set_multi - Multicast Update 3226 * 3227 * Called whenever multicast address list is updated. 3228 ************************************************************************/ 3229 static int 3230 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count) 3231 { 3232 struct adapter *adapter = arg; 3233 struct ixgbe_mc_addr *mta = adapter->mta; 3234 3235 if (ifma->ifma_addr->sa_family != AF_LINK) 3236 return (0); 3237 if (count == MAX_NUM_MULTICAST_ADDRESSES) 3238 return (0); 3239 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 3240 mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3241 mta[count].vmdq = adapter->pool; 3242 3243 return (1); 3244 } /* ixgbe_mc_filter_apply */ 3245 3246 static void 3247 ixgbe_if_multi_set(if_ctx_t ctx) 3248 { 3249 struct adapter *adapter = iflib_get_softc(ctx); 3250 struct ixgbe_mc_addr *mta; 3251 struct ifnet *ifp = iflib_get_ifp(ctx); 3252 u8 *update_ptr; 3253 int mcnt = 0; 3254 u32 fctrl; 3255 3256 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3257 3258 mta = adapter->mta; 3259 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3260 3261 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter); 3262 3263 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3264 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3265 if (ifp->if_flags & IFF_PROMISC) 3266 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3267 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3268 ifp->if_flags & IFF_ALLMULTI) { 3269 fctrl |= IXGBE_FCTRL_MPE; 3270 fctrl &= ~IXGBE_FCTRL_UPE; 3271 } else 3272 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3273 3274 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 3275 3276 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3277 update_ptr = (u8 *)mta; 3278 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 3279 ixgbe_mc_array_itr, TRUE); 3280 } 3281 3282 } /* ixgbe_if_multi_set */ 3283 3284 /************************************************************************ 3285 * ixgbe_mc_array_itr 3286 * 3287 * An iterator function needed by the multicast shared code. 3288 * It feeds the shared code routine the addresses in the 3289 * array of ixgbe_set_multi() one by one. 3290 ************************************************************************/ 3291 static u8 * 3292 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3293 { 3294 struct ixgbe_mc_addr *mta; 3295 3296 mta = (struct ixgbe_mc_addr *)*update_ptr; 3297 *vmdq = mta->vmdq; 3298 3299 *update_ptr = (u8*)(mta + 1); 3300 3301 return (mta->addr); 3302 } /* ixgbe_mc_array_itr */ 3303 3304 /************************************************************************ 3305 * ixgbe_local_timer - Timer routine 3306 * 3307 * Checks for link status, updates statistics, 3308 * and runs the watchdog check. 3309 ************************************************************************/ 3310 static void 3311 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3312 { 3313 struct adapter *adapter = iflib_get_softc(ctx); 3314 3315 if (qid != 0) 3316 return; 3317 3318 /* Check for pluggable optics */ 3319 if (adapter->sfp_probe) 3320 if (!ixgbe_sfp_probe(ctx)) 3321 return; /* Nothing to do */ 3322 3323 ixgbe_check_link(&adapter->hw, &adapter->link_speed, 3324 &adapter->link_up, 0); 3325 3326 /* Fire off the adminq task */ 3327 iflib_admin_intr_deferred(ctx); 3328 3329 } /* ixgbe_if_timer */ 3330 3331 /************************************************************************ 3332 * ixgbe_sfp_probe 3333 * 3334 * Determine if a port had optics inserted. 3335 ************************************************************************/ 3336 static bool 3337 ixgbe_sfp_probe(if_ctx_t ctx) 3338 { 3339 struct adapter *adapter = iflib_get_softc(ctx); 3340 struct ixgbe_hw *hw = &adapter->hw; 3341 device_t dev = iflib_get_dev(ctx); 3342 bool result = FALSE; 3343 3344 if ((hw->phy.type == ixgbe_phy_nl) && 3345 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3346 s32 ret = hw->phy.ops.identify_sfp(hw); 3347 if (ret) 3348 goto out; 3349 ret = hw->phy.ops.reset(hw); 3350 adapter->sfp_probe = FALSE; 3351 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3352 device_printf(dev, "Unsupported SFP+ module detected!"); 3353 device_printf(dev, 3354 "Reload driver with supported module.\n"); 3355 goto out; 3356 } else 3357 device_printf(dev, "SFP+ module detected!\n"); 3358 /* We now have supported optics */ 3359 result = TRUE; 3360 } 3361 out: 3362 3363 return (result); 3364 } /* ixgbe_sfp_probe */ 3365 3366 /************************************************************************ 3367 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3368 ************************************************************************/ 3369 static void 3370 ixgbe_handle_mod(void *context) 3371 { 3372 if_ctx_t ctx = context; 3373 struct adapter *adapter = iflib_get_softc(ctx); 3374 struct ixgbe_hw *hw = &adapter->hw; 3375 device_t dev = iflib_get_dev(ctx); 3376 u32 err, cage_full = 0; 3377 3378 adapter->sfp_reinit = 1; 3379 if (adapter->hw.need_crosstalk_fix) { 3380 switch (hw->mac.type) { 3381 case ixgbe_mac_82599EB: 3382 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3383 IXGBE_ESDP_SDP2; 3384 break; 3385 case ixgbe_mac_X550EM_x: 3386 case ixgbe_mac_X550EM_a: 3387 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3388 IXGBE_ESDP_SDP0; 3389 break; 3390 default: 3391 break; 3392 } 3393 3394 if (!cage_full) 3395 goto handle_mod_out; 3396 } 3397 3398 err = hw->phy.ops.identify_sfp(hw); 3399 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3400 device_printf(dev, 3401 "Unsupported SFP+ module type was detected.\n"); 3402 goto handle_mod_out; 3403 } 3404 3405 if (hw->mac.type == ixgbe_mac_82598EB) 3406 err = hw->phy.ops.reset(hw); 3407 else 3408 err = hw->mac.ops.setup_sfp(hw); 3409 3410 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3411 device_printf(dev, 3412 "Setup failure - unsupported SFP+ module type.\n"); 3413 goto handle_mod_out; 3414 } 3415 GROUPTASK_ENQUEUE(&adapter->msf_task); 3416 return; 3417 3418 handle_mod_out: 3419 adapter->sfp_reinit = 0; 3420 } /* ixgbe_handle_mod */ 3421 3422 3423 /************************************************************************ 3424 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3425 ************************************************************************/ 3426 static void 3427 ixgbe_handle_msf(void *context) 3428 { 3429 if_ctx_t ctx = context; 3430 struct adapter *adapter = iflib_get_softc(ctx); 3431 struct ixgbe_hw *hw = &adapter->hw; 3432 u32 autoneg; 3433 bool negotiate; 3434 3435 if (adapter->sfp_reinit != 1) 3436 return; 3437 3438 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3439 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 3440 3441 autoneg = hw->phy.autoneg_advertised; 3442 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3443 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3444 if (hw->mac.ops.setup_link) 3445 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3446 3447 /* Adjust media types shown in ifconfig */ 3448 ifmedia_removeall(adapter->media); 3449 ixgbe_add_media_types(adapter->ctx); 3450 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 3451 3452 adapter->sfp_reinit = 0; 3453 } /* ixgbe_handle_msf */ 3454 3455 /************************************************************************ 3456 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3457 ************************************************************************/ 3458 static void 3459 ixgbe_handle_phy(void *context) 3460 { 3461 if_ctx_t ctx = context; 3462 struct adapter *adapter = iflib_get_softc(ctx); 3463 struct ixgbe_hw *hw = &adapter->hw; 3464 int error; 3465 3466 error = hw->phy.ops.handle_lasi(hw); 3467 if (error == IXGBE_ERR_OVERTEMP) 3468 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3469 else if (error) 3470 device_printf(adapter->dev, 3471 "Error handling LASI interrupt: %d\n", error); 3472 } /* ixgbe_handle_phy */ 3473 3474 /************************************************************************ 3475 * ixgbe_if_stop - Stop the hardware 3476 * 3477 * Disables all traffic on the adapter by issuing a 3478 * global reset on the MAC and deallocates TX/RX buffers. 3479 ************************************************************************/ 3480 static void 3481 ixgbe_if_stop(if_ctx_t ctx) 3482 { 3483 struct adapter *adapter = iflib_get_softc(ctx); 3484 struct ixgbe_hw *hw = &adapter->hw; 3485 3486 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3487 3488 ixgbe_reset_hw(hw); 3489 hw->adapter_stopped = FALSE; 3490 ixgbe_stop_adapter(hw); 3491 if (hw->mac.type == ixgbe_mac_82599EB) 3492 ixgbe_stop_mac_link_on_d3_82599(hw); 3493 /* Turn off the laser - noop with no optics */ 3494 ixgbe_disable_tx_laser(hw); 3495 3496 /* Update the stack */ 3497 adapter->link_up = FALSE; 3498 ixgbe_if_update_admin_status(ctx); 3499 3500 /* reprogram the RAR[0] in case user changed it. */ 3501 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3502 3503 return; 3504 } /* ixgbe_if_stop */ 3505 3506 /************************************************************************ 3507 * ixgbe_update_link_status - Update OS on link state 3508 * 3509 * Note: Only updates the OS on the cached link state. 3510 * The real check of the hardware only happens with 3511 * a link interrupt. 3512 ************************************************************************/ 3513 static void 3514 ixgbe_if_update_admin_status(if_ctx_t ctx) 3515 { 3516 struct adapter *adapter = iflib_get_softc(ctx); 3517 device_t dev = iflib_get_dev(ctx); 3518 3519 if (adapter->link_up) { 3520 if (adapter->link_active == FALSE) { 3521 if (bootverbose) 3522 device_printf(dev, "Link is up %d Gbps %s \n", 3523 ((adapter->link_speed == 128) ? 10 : 1), 3524 "Full Duplex"); 3525 adapter->link_active = TRUE; 3526 /* Update any Flow Control changes */ 3527 ixgbe_fc_enable(&adapter->hw); 3528 /* Update DMA coalescing config */ 3529 ixgbe_config_dmac(adapter); 3530 /* should actually be negotiated value */ 3531 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3532 3533 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3534 ixgbe_ping_all_vfs(adapter); 3535 } 3536 } else { /* Link down */ 3537 if (adapter->link_active == TRUE) { 3538 if (bootverbose) 3539 device_printf(dev, "Link is Down\n"); 3540 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3541 adapter->link_active = FALSE; 3542 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3543 ixgbe_ping_all_vfs(adapter); 3544 } 3545 } 3546 3547 ixgbe_update_stats_counters(adapter); 3548 3549 /* Re-enable link interrupts */ 3550 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC); 3551 } /* ixgbe_if_update_admin_status */ 3552 3553 /************************************************************************ 3554 * ixgbe_config_dmac - Configure DMA Coalescing 3555 ************************************************************************/ 3556 static void 3557 ixgbe_config_dmac(struct adapter *adapter) 3558 { 3559 struct ixgbe_hw *hw = &adapter->hw; 3560 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3561 3562 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3563 return; 3564 3565 if (dcfg->watchdog_timer ^ adapter->dmac || 3566 dcfg->link_speed ^ adapter->link_speed) { 3567 dcfg->watchdog_timer = adapter->dmac; 3568 dcfg->fcoe_en = FALSE; 3569 dcfg->link_speed = adapter->link_speed; 3570 dcfg->num_tcs = 1; 3571 3572 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3573 dcfg->watchdog_timer, dcfg->link_speed); 3574 3575 hw->mac.ops.dmac_config(hw); 3576 } 3577 } /* ixgbe_config_dmac */ 3578 3579 /************************************************************************ 3580 * ixgbe_if_enable_intr 3581 ************************************************************************/ 3582 void 3583 ixgbe_if_enable_intr(if_ctx_t ctx) 3584 { 3585 struct adapter *adapter = iflib_get_softc(ctx); 3586 struct ixgbe_hw *hw = &adapter->hw; 3587 struct ix_rx_queue *que = adapter->rx_queues; 3588 u32 mask, fwsm; 3589 3590 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3591 3592 switch (adapter->hw.mac.type) { 3593 case ixgbe_mac_82599EB: 3594 mask |= IXGBE_EIMS_ECC; 3595 /* Temperature sensor on some adapters */ 3596 mask |= IXGBE_EIMS_GPI_SDP0; 3597 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3598 mask |= IXGBE_EIMS_GPI_SDP1; 3599 mask |= IXGBE_EIMS_GPI_SDP2; 3600 break; 3601 case ixgbe_mac_X540: 3602 /* Detect if Thermal Sensor is enabled */ 3603 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3604 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3605 mask |= IXGBE_EIMS_TS; 3606 mask |= IXGBE_EIMS_ECC; 3607 break; 3608 case ixgbe_mac_X550: 3609 /* MAC thermal sensor is automatically enabled */ 3610 mask |= IXGBE_EIMS_TS; 3611 mask |= IXGBE_EIMS_ECC; 3612 break; 3613 case ixgbe_mac_X550EM_x: 3614 case ixgbe_mac_X550EM_a: 3615 /* Some devices use SDP0 for important information */ 3616 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3617 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3618 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3619 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3620 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3621 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3622 mask |= IXGBE_EICR_GPI_SDP0_X540; 3623 mask |= IXGBE_EIMS_ECC; 3624 break; 3625 default: 3626 break; 3627 } 3628 3629 /* Enable Fan Failure detection */ 3630 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3631 mask |= IXGBE_EIMS_GPI_SDP1; 3632 /* Enable SR-IOV */ 3633 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3634 mask |= IXGBE_EIMS_MAILBOX; 3635 /* Enable Flow Director */ 3636 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 3637 mask |= IXGBE_EIMS_FLOW_DIR; 3638 3639 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3640 3641 /* With MSI-X we use auto clear */ 3642 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3643 mask = IXGBE_EIMS_ENABLE_MASK; 3644 /* Don't autoclear Link */ 3645 mask &= ~IXGBE_EIMS_OTHER; 3646 mask &= ~IXGBE_EIMS_LSC; 3647 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 3648 mask &= ~IXGBE_EIMS_MAILBOX; 3649 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3650 } 3651 3652 /* 3653 * Now enable all queues, this is done separately to 3654 * allow for handling the extended (beyond 32) MSI-X 3655 * vectors that can be used by 82599 3656 */ 3657 for (int i = 0; i < adapter->num_rx_queues; i++, que++) 3658 ixgbe_enable_queue(adapter, que->msix); 3659 3660 IXGBE_WRITE_FLUSH(hw); 3661 3662 } /* ixgbe_if_enable_intr */ 3663 3664 /************************************************************************ 3665 * ixgbe_disable_intr 3666 ************************************************************************/ 3667 static void 3668 ixgbe_if_disable_intr(if_ctx_t ctx) 3669 { 3670 struct adapter *adapter = iflib_get_softc(ctx); 3671 3672 if (adapter->intr_type == IFLIB_INTR_MSIX) 3673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3674 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3675 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3676 } else { 3677 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3678 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3679 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3680 } 3681 IXGBE_WRITE_FLUSH(&adapter->hw); 3682 3683 } /* ixgbe_if_disable_intr */ 3684 3685 /************************************************************************ 3686 * ixgbe_if_rx_queue_intr_enable 3687 ************************************************************************/ 3688 static int 3689 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3690 { 3691 struct adapter *adapter = iflib_get_softc(ctx); 3692 struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; 3693 3694 ixgbe_enable_queue(adapter, que->rxr.me); 3695 3696 return (0); 3697 } /* ixgbe_if_rx_queue_intr_enable */ 3698 3699 /************************************************************************ 3700 * ixgbe_enable_queue 3701 ************************************************************************/ 3702 static void 3703 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 3704 { 3705 struct ixgbe_hw *hw = &adapter->hw; 3706 u64 queue = (u64)(1 << vector); 3707 u32 mask; 3708 3709 if (hw->mac.type == ixgbe_mac_82598EB) { 3710 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3711 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3712 } else { 3713 mask = (queue & 0xFFFFFFFF); 3714 if (mask) 3715 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3716 mask = (queue >> 32); 3717 if (mask) 3718 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3719 } 3720 } /* ixgbe_enable_queue */ 3721 3722 /************************************************************************ 3723 * ixgbe_disable_queue 3724 ************************************************************************/ 3725 static void 3726 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 3727 { 3728 struct ixgbe_hw *hw = &adapter->hw; 3729 u64 queue = (u64)(1 << vector); 3730 u32 mask; 3731 3732 if (hw->mac.type == ixgbe_mac_82598EB) { 3733 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3734 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3735 } else { 3736 mask = (queue & 0xFFFFFFFF); 3737 if (mask) 3738 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3739 mask = (queue >> 32); 3740 if (mask) 3741 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3742 } 3743 } /* ixgbe_disable_queue */ 3744 3745 /************************************************************************ 3746 * ixgbe_intr - Legacy Interrupt Service Routine 3747 ************************************************************************/ 3748 int 3749 ixgbe_intr(void *arg) 3750 { 3751 struct adapter *adapter = arg; 3752 struct ix_rx_queue *que = adapter->rx_queues; 3753 struct ixgbe_hw *hw = &adapter->hw; 3754 if_ctx_t ctx = adapter->ctx; 3755 u32 eicr, eicr_mask; 3756 3757 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3758 3759 ++que->irqs; 3760 if (eicr == 0) { 3761 ixgbe_if_enable_intr(ctx); 3762 return (FILTER_HANDLED); 3763 } 3764 3765 /* Check for fan failure */ 3766 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3767 (eicr & IXGBE_EICR_GPI_SDP1)) { 3768 device_printf(adapter->dev, 3769 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3770 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3771 } 3772 3773 /* Link status change */ 3774 if (eicr & IXGBE_EICR_LSC) { 3775 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3776 iflib_admin_intr_deferred(ctx); 3777 } 3778 3779 if (ixgbe_is_sfp(hw)) { 3780 /* Pluggable optics-related interrupt */ 3781 if (hw->mac.type >= ixgbe_mac_X540) 3782 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3783 else 3784 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3785 3786 if (eicr & eicr_mask) { 3787 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3788 GROUPTASK_ENQUEUE(&adapter->mod_task); 3789 } 3790 3791 if ((hw->mac.type == ixgbe_mac_82599EB) && 3792 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3793 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3794 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3795 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1)) 3796 GROUPTASK_ENQUEUE(&adapter->msf_task); 3797 } 3798 } 3799 3800 /* External PHY interrupt */ 3801 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3802 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3803 GROUPTASK_ENQUEUE(&adapter->phy_task); 3804 3805 return (FILTER_SCHEDULE_THREAD); 3806 } /* ixgbe_intr */ 3807 3808 /************************************************************************ 3809 * ixgbe_free_pci_resources 3810 ************************************************************************/ 3811 static void 3812 ixgbe_free_pci_resources(if_ctx_t ctx) 3813 { 3814 struct adapter *adapter = iflib_get_softc(ctx); 3815 struct ix_rx_queue *que = adapter->rx_queues; 3816 device_t dev = iflib_get_dev(ctx); 3817 3818 /* Release all msix queue resources */ 3819 if (adapter->intr_type == IFLIB_INTR_MSIX) 3820 iflib_irq_free(ctx, &adapter->irq); 3821 3822 if (que != NULL) { 3823 for (int i = 0; i < adapter->num_rx_queues; i++, que++) { 3824 iflib_irq_free(ctx, &que->que_irq); 3825 } 3826 } 3827 3828 /* 3829 * Free link/admin interrupt 3830 */ 3831 if (adapter->pci_mem != NULL) 3832 bus_release_resource(dev, SYS_RES_MEMORY, 3833 PCIR_BAR(0), adapter->pci_mem); 3834 3835 } /* ixgbe_free_pci_resources */ 3836 3837 /************************************************************************ 3838 * ixgbe_sysctl_flowcntl 3839 * 3840 * SYSCTL wrapper around setting Flow Control 3841 ************************************************************************/ 3842 static int 3843 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 3844 { 3845 struct adapter *adapter; 3846 int error, fc; 3847 3848 adapter = (struct adapter *)arg1; 3849 fc = adapter->hw.fc.current_mode; 3850 3851 error = sysctl_handle_int(oidp, &fc, 0, req); 3852 if ((error) || (req->newptr == NULL)) 3853 return (error); 3854 3855 /* Don't bother if it's not changed */ 3856 if (fc == adapter->hw.fc.current_mode) 3857 return (0); 3858 3859 return ixgbe_set_flowcntl(adapter, fc); 3860 } /* ixgbe_sysctl_flowcntl */ 3861 3862 /************************************************************************ 3863 * ixgbe_set_flowcntl - Set flow control 3864 * 3865 * Flow control values: 3866 * 0 - off 3867 * 1 - rx pause 3868 * 2 - tx pause 3869 * 3 - full 3870 ************************************************************************/ 3871 static int 3872 ixgbe_set_flowcntl(struct adapter *adapter, int fc) 3873 { 3874 switch (fc) { 3875 case ixgbe_fc_rx_pause: 3876 case ixgbe_fc_tx_pause: 3877 case ixgbe_fc_full: 3878 adapter->hw.fc.requested_mode = fc; 3879 if (adapter->num_rx_queues > 1) 3880 ixgbe_disable_rx_drop(adapter); 3881 break; 3882 case ixgbe_fc_none: 3883 adapter->hw.fc.requested_mode = ixgbe_fc_none; 3884 if (adapter->num_rx_queues > 1) 3885 ixgbe_enable_rx_drop(adapter); 3886 break; 3887 default: 3888 return (EINVAL); 3889 } 3890 3891 /* Don't autoneg if forcing a value */ 3892 adapter->hw.fc.disable_fc_autoneg = TRUE; 3893 ixgbe_fc_enable(&adapter->hw); 3894 3895 return (0); 3896 } /* ixgbe_set_flowcntl */ 3897 3898 /************************************************************************ 3899 * ixgbe_enable_rx_drop 3900 * 3901 * Enable the hardware to drop packets when the buffer is 3902 * full. This is useful with multiqueue, so that no single 3903 * queue being full stalls the entire RX engine. We only 3904 * enable this when Multiqueue is enabled AND Flow Control 3905 * is disabled. 3906 ************************************************************************/ 3907 static void 3908 ixgbe_enable_rx_drop(struct adapter *adapter) 3909 { 3910 struct ixgbe_hw *hw = &adapter->hw; 3911 struct rx_ring *rxr; 3912 u32 srrctl; 3913 3914 for (int i = 0; i < adapter->num_rx_queues; i++) { 3915 rxr = &adapter->rx_queues[i].rxr; 3916 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3917 srrctl |= IXGBE_SRRCTL_DROP_EN; 3918 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3919 } 3920 3921 /* enable drop for each vf */ 3922 for (int i = 0; i < adapter->num_vfs; i++) { 3923 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3924 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 3925 IXGBE_QDE_ENABLE)); 3926 } 3927 } /* ixgbe_enable_rx_drop */ 3928 3929 /************************************************************************ 3930 * ixgbe_disable_rx_drop 3931 ************************************************************************/ 3932 static void 3933 ixgbe_disable_rx_drop(struct adapter *adapter) 3934 { 3935 struct ixgbe_hw *hw = &adapter->hw; 3936 struct rx_ring *rxr; 3937 u32 srrctl; 3938 3939 for (int i = 0; i < adapter->num_rx_queues; i++) { 3940 rxr = &adapter->rx_queues[i].rxr; 3941 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3942 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3943 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3944 } 3945 3946 /* disable drop for each vf */ 3947 for (int i = 0; i < adapter->num_vfs; i++) { 3948 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3949 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 3950 } 3951 } /* ixgbe_disable_rx_drop */ 3952 3953 /************************************************************************ 3954 * ixgbe_sysctl_advertise 3955 * 3956 * SYSCTL wrapper around setting advertised speed 3957 ************************************************************************/ 3958 static int 3959 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 3960 { 3961 struct adapter *adapter; 3962 int error, advertise; 3963 3964 adapter = (struct adapter *)arg1; 3965 advertise = adapter->advertise; 3966 3967 error = sysctl_handle_int(oidp, &advertise, 0, req); 3968 if ((error) || (req->newptr == NULL)) 3969 return (error); 3970 3971 return ixgbe_set_advertise(adapter, advertise); 3972 } /* ixgbe_sysctl_advertise */ 3973 3974 /************************************************************************ 3975 * ixgbe_set_advertise - Control advertised link speed 3976 * 3977 * Flags: 3978 * 0x1 - advertise 100 Mb 3979 * 0x2 - advertise 1G 3980 * 0x4 - advertise 10G 3981 * 0x8 - advertise 10 Mb (yes, Mb) 3982 ************************************************************************/ 3983 static int 3984 ixgbe_set_advertise(struct adapter *adapter, int advertise) 3985 { 3986 device_t dev = iflib_get_dev(adapter->ctx); 3987 struct ixgbe_hw *hw; 3988 ixgbe_link_speed speed = 0; 3989 ixgbe_link_speed link_caps = 0; 3990 s32 err = IXGBE_NOT_IMPLEMENTED; 3991 bool negotiate = FALSE; 3992 3993 /* Checks to validate new value */ 3994 if (adapter->advertise == advertise) /* no change */ 3995 return (0); 3996 3997 hw = &adapter->hw; 3998 3999 /* No speed changes for backplane media */ 4000 if (hw->phy.media_type == ixgbe_media_type_backplane) 4001 return (ENODEV); 4002 4003 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4004 (hw->phy.multispeed_fiber))) { 4005 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4006 return (EINVAL); 4007 } 4008 4009 if (advertise < 0x1 || advertise > 0xF) { 4010 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n"); 4011 return (EINVAL); 4012 } 4013 4014 if (hw->mac.ops.get_link_capabilities) { 4015 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4016 &negotiate); 4017 if (err != IXGBE_SUCCESS) { 4018 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4019 return (ENODEV); 4020 } 4021 } 4022 4023 /* Set new value and report new advertised mode */ 4024 if (advertise & 0x1) { 4025 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4026 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4027 return (EINVAL); 4028 } 4029 speed |= IXGBE_LINK_SPEED_100_FULL; 4030 } 4031 if (advertise & 0x2) { 4032 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4033 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4034 return (EINVAL); 4035 } 4036 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4037 } 4038 if (advertise & 0x4) { 4039 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4040 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4041 return (EINVAL); 4042 } 4043 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4044 } 4045 if (advertise & 0x8) { 4046 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4047 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4048 return (EINVAL); 4049 } 4050 speed |= IXGBE_LINK_SPEED_10_FULL; 4051 } 4052 4053 hw->mac.autotry_restart = TRUE; 4054 hw->mac.ops.setup_link(hw, speed, TRUE); 4055 adapter->advertise = advertise; 4056 4057 return (0); 4058 } /* ixgbe_set_advertise */ 4059 4060 /************************************************************************ 4061 * ixgbe_get_advertise - Get current advertised speed settings 4062 * 4063 * Formatted for sysctl usage. 4064 * Flags: 4065 * 0x1 - advertise 100 Mb 4066 * 0x2 - advertise 1G 4067 * 0x4 - advertise 10G 4068 * 0x8 - advertise 10 Mb (yes, Mb) 4069 ************************************************************************/ 4070 static int 4071 ixgbe_get_advertise(struct adapter *adapter) 4072 { 4073 struct ixgbe_hw *hw = &adapter->hw; 4074 int speed; 4075 ixgbe_link_speed link_caps = 0; 4076 s32 err; 4077 bool negotiate = FALSE; 4078 4079 /* 4080 * Advertised speed means nothing unless it's copper or 4081 * multi-speed fiber 4082 */ 4083 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4084 !(hw->phy.multispeed_fiber)) 4085 return (0); 4086 4087 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4088 if (err != IXGBE_SUCCESS) 4089 return (0); 4090 4091 speed = 4092 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 4093 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 4094 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 4095 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 4096 4097 return speed; 4098 } /* ixgbe_get_advertise */ 4099 4100 /************************************************************************ 4101 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4102 * 4103 * Control values: 4104 * 0/1 - off / on (use default value of 1000) 4105 * 4106 * Legal timer values are: 4107 * 50,100,250,500,1000,2000,5000,10000 4108 * 4109 * Turning off interrupt moderation will also turn this off. 4110 ************************************************************************/ 4111 static int 4112 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4113 { 4114 struct adapter *adapter = (struct adapter *)arg1; 4115 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4116 int error; 4117 u16 newval; 4118 4119 newval = adapter->dmac; 4120 error = sysctl_handle_16(oidp, &newval, 0, req); 4121 if ((error) || (req->newptr == NULL)) 4122 return (error); 4123 4124 switch (newval) { 4125 case 0: 4126 /* Disabled */ 4127 adapter->dmac = 0; 4128 break; 4129 case 1: 4130 /* Enable and use default */ 4131 adapter->dmac = 1000; 4132 break; 4133 case 50: 4134 case 100: 4135 case 250: 4136 case 500: 4137 case 1000: 4138 case 2000: 4139 case 5000: 4140 case 10000: 4141 /* Legal values - allow */ 4142 adapter->dmac = newval; 4143 break; 4144 default: 4145 /* Do nothing, illegal value */ 4146 return (EINVAL); 4147 } 4148 4149 /* Re-initialize hardware if it's already running */ 4150 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4151 ifp->if_init(ifp); 4152 4153 return (0); 4154 } /* ixgbe_sysctl_dmac */ 4155 4156 #ifdef IXGBE_DEBUG 4157 /************************************************************************ 4158 * ixgbe_sysctl_power_state 4159 * 4160 * Sysctl to test power states 4161 * Values: 4162 * 0 - set device to D0 4163 * 3 - set device to D3 4164 * (none) - get current device power state 4165 ************************************************************************/ 4166 static int 4167 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4168 { 4169 struct adapter *adapter = (struct adapter *)arg1; 4170 device_t dev = adapter->dev; 4171 int curr_ps, new_ps, error = 0; 4172 4173 curr_ps = new_ps = pci_get_powerstate(dev); 4174 4175 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4176 if ((error) || (req->newptr == NULL)) 4177 return (error); 4178 4179 if (new_ps == curr_ps) 4180 return (0); 4181 4182 if (new_ps == 3 && curr_ps == 0) 4183 error = DEVICE_SUSPEND(dev); 4184 else if (new_ps == 0 && curr_ps == 3) 4185 error = DEVICE_RESUME(dev); 4186 else 4187 return (EINVAL); 4188 4189 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4190 4191 return (error); 4192 } /* ixgbe_sysctl_power_state */ 4193 #endif 4194 4195 /************************************************************************ 4196 * ixgbe_sysctl_wol_enable 4197 * 4198 * Sysctl to enable/disable the WoL capability, 4199 * if supported by the adapter. 4200 * 4201 * Values: 4202 * 0 - disabled 4203 * 1 - enabled 4204 ************************************************************************/ 4205 static int 4206 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4207 { 4208 struct adapter *adapter = (struct adapter *)arg1; 4209 struct ixgbe_hw *hw = &adapter->hw; 4210 int new_wol_enabled; 4211 int error = 0; 4212 4213 new_wol_enabled = hw->wol_enabled; 4214 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4215 if ((error) || (req->newptr == NULL)) 4216 return (error); 4217 new_wol_enabled = !!(new_wol_enabled); 4218 if (new_wol_enabled == hw->wol_enabled) 4219 return (0); 4220 4221 if (new_wol_enabled > 0 && !adapter->wol_support) 4222 return (ENODEV); 4223 else 4224 hw->wol_enabled = new_wol_enabled; 4225 4226 return (0); 4227 } /* ixgbe_sysctl_wol_enable */ 4228 4229 /************************************************************************ 4230 * ixgbe_sysctl_wufc - Wake Up Filter Control 4231 * 4232 * Sysctl to enable/disable the types of packets that the 4233 * adapter will wake up on upon receipt. 4234 * Flags: 4235 * 0x1 - Link Status Change 4236 * 0x2 - Magic Packet 4237 * 0x4 - Direct Exact 4238 * 0x8 - Directed Multicast 4239 * 0x10 - Broadcast 4240 * 0x20 - ARP/IPv4 Request Packet 4241 * 0x40 - Direct IPv4 Packet 4242 * 0x80 - Direct IPv6 Packet 4243 * 4244 * Settings not listed above will cause the sysctl to return an error. 4245 ************************************************************************/ 4246 static int 4247 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4248 { 4249 struct adapter *adapter = (struct adapter *)arg1; 4250 int error = 0; 4251 u32 new_wufc; 4252 4253 new_wufc = adapter->wufc; 4254 4255 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4256 if ((error) || (req->newptr == NULL)) 4257 return (error); 4258 if (new_wufc == adapter->wufc) 4259 return (0); 4260 4261 if (new_wufc & 0xffffff00) 4262 return (EINVAL); 4263 4264 new_wufc &= 0xff; 4265 new_wufc |= (0xffffff & adapter->wufc); 4266 adapter->wufc = new_wufc; 4267 4268 return (0); 4269 } /* ixgbe_sysctl_wufc */ 4270 4271 #ifdef IXGBE_DEBUG 4272 /************************************************************************ 4273 * ixgbe_sysctl_print_rss_config 4274 ************************************************************************/ 4275 static int 4276 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4277 { 4278 struct adapter *adapter = (struct adapter *)arg1; 4279 struct ixgbe_hw *hw = &adapter->hw; 4280 device_t dev = adapter->dev; 4281 struct sbuf *buf; 4282 int error = 0, reta_size; 4283 u32 reg; 4284 4285 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4286 if (!buf) { 4287 device_printf(dev, "Could not allocate sbuf for output.\n"); 4288 return (ENOMEM); 4289 } 4290 4291 // TODO: use sbufs to make a string to print out 4292 /* Set multiplier for RETA setup and table size based on MAC */ 4293 switch (adapter->hw.mac.type) { 4294 case ixgbe_mac_X550: 4295 case ixgbe_mac_X550EM_x: 4296 case ixgbe_mac_X550EM_a: 4297 reta_size = 128; 4298 break; 4299 default: 4300 reta_size = 32; 4301 break; 4302 } 4303 4304 /* Print out the redirection table */ 4305 sbuf_cat(buf, "\n"); 4306 for (int i = 0; i < reta_size; i++) { 4307 if (i < 32) { 4308 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4309 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4310 } else { 4311 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4312 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4313 } 4314 } 4315 4316 // TODO: print more config 4317 4318 error = sbuf_finish(buf); 4319 if (error) 4320 device_printf(dev, "Error finishing sbuf: %d\n", error); 4321 4322 sbuf_delete(buf); 4323 4324 return (0); 4325 } /* ixgbe_sysctl_print_rss_config */ 4326 #endif /* IXGBE_DEBUG */ 4327 4328 /************************************************************************ 4329 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4330 * 4331 * For X552/X557-AT devices using an external PHY 4332 ************************************************************************/ 4333 static int 4334 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4335 { 4336 struct adapter *adapter = (struct adapter *)arg1; 4337 struct ixgbe_hw *hw = &adapter->hw; 4338 u16 reg; 4339 4340 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4341 device_printf(iflib_get_dev(adapter->ctx), 4342 "Device has no supported external thermal sensor.\n"); 4343 return (ENODEV); 4344 } 4345 4346 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4347 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4348 device_printf(iflib_get_dev(adapter->ctx), 4349 "Error reading from PHY's current temperature register\n"); 4350 return (EAGAIN); 4351 } 4352 4353 /* Shift temp for output */ 4354 reg = reg >> 8; 4355 4356 return (sysctl_handle_16(oidp, NULL, reg, req)); 4357 } /* ixgbe_sysctl_phy_temp */ 4358 4359 /************************************************************************ 4360 * ixgbe_sysctl_phy_overtemp_occurred 4361 * 4362 * Reports (directly from the PHY) whether the current PHY 4363 * temperature is over the overtemp threshold. 4364 ************************************************************************/ 4365 static int 4366 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4367 { 4368 struct adapter *adapter = (struct adapter *)arg1; 4369 struct ixgbe_hw *hw = &adapter->hw; 4370 u16 reg; 4371 4372 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4373 device_printf(iflib_get_dev(adapter->ctx), 4374 "Device has no supported external thermal sensor.\n"); 4375 return (ENODEV); 4376 } 4377 4378 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4379 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4380 device_printf(iflib_get_dev(adapter->ctx), 4381 "Error reading from PHY's temperature status register\n"); 4382 return (EAGAIN); 4383 } 4384 4385 /* Get occurrence bit */ 4386 reg = !!(reg & 0x4000); 4387 4388 return (sysctl_handle_16(oidp, 0, reg, req)); 4389 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4390 4391 /************************************************************************ 4392 * ixgbe_sysctl_eee_state 4393 * 4394 * Sysctl to set EEE power saving feature 4395 * Values: 4396 * 0 - disable EEE 4397 * 1 - enable EEE 4398 * (none) - get current device EEE state 4399 ************************************************************************/ 4400 static int 4401 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4402 { 4403 struct adapter *adapter = (struct adapter *)arg1; 4404 device_t dev = adapter->dev; 4405 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4406 int curr_eee, new_eee, error = 0; 4407 s32 retval; 4408 4409 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE); 4410 4411 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4412 if ((error) || (req->newptr == NULL)) 4413 return (error); 4414 4415 /* Nothing to do */ 4416 if (new_eee == curr_eee) 4417 return (0); 4418 4419 /* Not supported */ 4420 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE)) 4421 return (EINVAL); 4422 4423 /* Bounds checking */ 4424 if ((new_eee < 0) || (new_eee > 1)) 4425 return (EINVAL); 4426 4427 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee); 4428 if (retval) { 4429 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4430 return (EINVAL); 4431 } 4432 4433 /* Restart auto-neg */ 4434 ifp->if_init(ifp); 4435 4436 device_printf(dev, "New EEE state: %d\n", new_eee); 4437 4438 /* Cache new value */ 4439 if (new_eee) 4440 adapter->feat_en |= IXGBE_FEATURE_EEE; 4441 else 4442 adapter->feat_en &= ~IXGBE_FEATURE_EEE; 4443 4444 return (error); 4445 } /* ixgbe_sysctl_eee_state */ 4446 4447 /************************************************************************ 4448 * ixgbe_init_device_features 4449 ************************************************************************/ 4450 static void 4451 ixgbe_init_device_features(struct adapter *adapter) 4452 { 4453 adapter->feat_cap = IXGBE_FEATURE_NETMAP 4454 | IXGBE_FEATURE_RSS 4455 | IXGBE_FEATURE_MSI 4456 | IXGBE_FEATURE_MSIX 4457 | IXGBE_FEATURE_LEGACY_IRQ; 4458 4459 /* Set capabilities first... */ 4460 switch (adapter->hw.mac.type) { 4461 case ixgbe_mac_82598EB: 4462 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT) 4463 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4464 break; 4465 case ixgbe_mac_X540: 4466 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4467 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4468 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4469 (adapter->hw.bus.func == 0)) 4470 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4471 break; 4472 case ixgbe_mac_X550: 4473 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4474 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4475 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4476 break; 4477 case ixgbe_mac_X550EM_x: 4478 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4479 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4480 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR) 4481 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4482 break; 4483 case ixgbe_mac_X550EM_a: 4484 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4485 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4486 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4487 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4488 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4489 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4490 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4491 } 4492 break; 4493 case ixgbe_mac_82599EB: 4494 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4495 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4496 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4497 (adapter->hw.bus.func == 0)) 4498 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4499 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4500 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4501 break; 4502 default: 4503 break; 4504 } 4505 4506 /* Enabled by default... */ 4507 /* Fan failure detection */ 4508 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4509 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4510 /* Netmap */ 4511 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 4512 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 4513 /* EEE */ 4514 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4515 adapter->feat_en |= IXGBE_FEATURE_EEE; 4516 /* Thermal Sensor */ 4517 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4518 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4519 4520 /* Enabled via global sysctl... */ 4521 /* Flow Director */ 4522 if (ixgbe_enable_fdir) { 4523 if (adapter->feat_cap & IXGBE_FEATURE_FDIR) 4524 adapter->feat_en |= IXGBE_FEATURE_FDIR; 4525 else 4526 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled."); 4527 } 4528 /* 4529 * Message Signal Interrupts - Extended (MSI-X) 4530 * Normal MSI is only enabled if MSI-X calls fail. 4531 */ 4532 if (!ixgbe_enable_msix) 4533 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX; 4534 /* Receive-Side Scaling (RSS) */ 4535 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4536 adapter->feat_en |= IXGBE_FEATURE_RSS; 4537 4538 /* Disable features with unmet dependencies... */ 4539 /* No MSI-X */ 4540 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) { 4541 adapter->feat_cap &= ~IXGBE_FEATURE_RSS; 4542 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4543 adapter->feat_en &= ~IXGBE_FEATURE_RSS; 4544 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; 4545 } 4546 } /* ixgbe_init_device_features */ 4547 4548 /************************************************************************ 4549 * ixgbe_check_fan_failure 4550 ************************************************************************/ 4551 static void 4552 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt) 4553 { 4554 u32 mask; 4555 4556 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) : 4557 IXGBE_ESDP_SDP1; 4558 4559 if (reg & mask) 4560 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4561 } /* ixgbe_check_fan_failure */ 4562