1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_rss.h" 39 40 #include "ixgbe.h" 41 #include "ixgbe_sriov.h" 42 #include "ifdi_if.h" 43 44 #include <net/netmap.h> 45 #include <dev/netmap/netmap_kern.h> 46 47 /************************************************************************ 48 * Driver version 49 ************************************************************************/ 50 char ixgbe_driver_version[] = "4.0.1-k"; 51 52 53 /************************************************************************ 54 * PCI Device ID Table 55 * 56 * Used by probe to select devices to load on 57 * Last field stores an index into ixgbe_strings 58 * Last entry must be all 0s 59 * 60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 61 ************************************************************************/ 62 static pci_vendor_info_t ixgbe_vendor_info_array[] = 63 { 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 108 /* required last entry */ 109 PVID_END 110 }; 111 112 static void *ixgbe_register(device_t dev); 113 static int ixgbe_if_attach_pre(if_ctx_t ctx); 114 static int ixgbe_if_attach_post(if_ctx_t ctx); 115 static int ixgbe_if_detach(if_ctx_t ctx); 116 static int ixgbe_if_shutdown(if_ctx_t ctx); 117 static int ixgbe_if_suspend(if_ctx_t ctx); 118 static int ixgbe_if_resume(if_ctx_t ctx); 119 120 static void ixgbe_if_stop(if_ctx_t ctx); 121 void ixgbe_if_enable_intr(if_ctx_t ctx); 122 static void ixgbe_if_disable_intr(if_ctx_t ctx); 123 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); 124 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); 125 static int ixgbe_if_media_change(if_ctx_t ctx); 126 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 127 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 128 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip); 129 static void ixgbe_if_multi_set(if_ctx_t ctx); 130 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags); 131 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 132 uint64_t *paddrs, int nrxqs, int nrxqsets); 133 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 134 uint64_t *paddrs, int nrxqs, int nrxqsets); 135 static void ixgbe_if_queues_free(if_ctx_t ctx); 136 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t); 137 static void ixgbe_if_update_admin_status(if_ctx_t ctx); 138 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag); 139 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 140 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 141 int ixgbe_intr(void *arg); 142 143 /************************************************************************ 144 * Function prototypes 145 ************************************************************************/ 146 #if __FreeBSD_version >= 1100036 147 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 148 #endif 149 150 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector); 151 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector); 152 static void ixgbe_add_device_sysctls(if_ctx_t ctx); 153 static int ixgbe_allocate_pci_resources(if_ctx_t ctx); 154 static int ixgbe_setup_low_power_mode(if_ctx_t ctx); 155 156 static void ixgbe_config_dmac(struct adapter *adapter); 157 static void ixgbe_configure_ivars(struct adapter *adapter); 158 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, 159 s8 type); 160 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 161 static bool ixgbe_sfp_probe(if_ctx_t ctx); 162 163 static void ixgbe_free_pci_resources(if_ctx_t ctx); 164 165 static int ixgbe_msix_link(void *arg); 166 static int ixgbe_msix_que(void *arg); 167 static void ixgbe_initialize_rss_mapping(struct adapter *adapter); 168 static void ixgbe_initialize_receive_units(if_ctx_t ctx); 169 static void ixgbe_initialize_transmit_units(if_ctx_t ctx); 170 171 static int ixgbe_setup_interface(if_ctx_t ctx); 172 static void ixgbe_init_device_features(struct adapter *adapter); 173 static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 174 static void ixgbe_add_media_types(if_ctx_t ctx); 175 static void ixgbe_update_stats_counters(struct adapter *adapter); 176 static void ixgbe_config_link(struct adapter *adapter); 177 static void ixgbe_get_slot_info(struct adapter *); 178 static void ixgbe_check_wol_support(struct adapter *adapter); 179 static void ixgbe_enable_rx_drop(struct adapter *); 180 static void ixgbe_disable_rx_drop(struct adapter *); 181 182 static void ixgbe_add_hw_stats(struct adapter *adapter); 183 static int ixgbe_set_flowcntl(struct adapter *, int); 184 static int ixgbe_set_advertise(struct adapter *, int); 185 static int ixgbe_get_advertise(struct adapter *); 186 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx); 187 static void ixgbe_config_gpie(struct adapter *adapter); 188 static void ixgbe_config_delay_values(struct adapter *adapter); 189 190 /* Sysctl handlers */ 191 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 192 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 195 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 196 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 197 #ifdef IXGBE_DEBUG 198 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 199 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 200 #endif 201 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 202 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 206 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 207 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 208 209 /* Deferred interrupt tasklets */ 210 static void ixgbe_handle_msf(void *); 211 static void ixgbe_handle_mod(void *); 212 static void ixgbe_handle_phy(void *); 213 214 /************************************************************************ 215 * FreeBSD Device Interface Entry Points 216 ************************************************************************/ 217 static device_method_t ix_methods[] = { 218 /* Device interface */ 219 DEVMETHOD(device_register, ixgbe_register), 220 DEVMETHOD(device_probe, iflib_device_probe), 221 DEVMETHOD(device_attach, iflib_device_attach), 222 DEVMETHOD(device_detach, iflib_device_detach), 223 DEVMETHOD(device_shutdown, iflib_device_shutdown), 224 DEVMETHOD(device_suspend, iflib_device_suspend), 225 DEVMETHOD(device_resume, iflib_device_resume), 226 #ifdef PCI_IOV 227 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 228 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 229 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 230 #endif /* PCI_IOV */ 231 DEVMETHOD_END 232 }; 233 234 static driver_t ix_driver = { 235 "ix", ix_methods, sizeof(struct adapter), 236 }; 237 238 devclass_t ix_devclass; 239 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 240 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ix, ixgbe_vendor_info_array, 241 sizeof(ixgbe_vendor_info_array[0]), nitems(ixgbe_vendor_info_array) - 1); 242 243 MODULE_DEPEND(ix, pci, 1, 1, 1); 244 MODULE_DEPEND(ix, ether, 1, 1, 1); 245 MODULE_DEPEND(ix, iflib, 1, 1, 1); 246 247 static device_method_t ixgbe_if_methods[] = { 248 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 249 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 250 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 251 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 252 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 253 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 254 DEVMETHOD(ifdi_init, ixgbe_if_init), 255 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 256 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 257 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 258 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 259 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 260 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 261 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 262 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 263 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 264 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 265 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 266 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 267 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 268 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 269 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 270 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 271 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 272 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 273 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 274 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 275 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 276 #ifdef PCI_IOV 277 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 278 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 279 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 280 #endif /* PCI_IOV */ 281 DEVMETHOD_END 282 }; 283 284 /* 285 * TUNEABLE PARAMETERS: 286 */ 287 288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters"); 289 static driver_t ixgbe_if_driver = { 290 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter) 291 }; 292 293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 295 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 296 297 /* Flow control setting, default to full */ 298 static int ixgbe_flow_control = ixgbe_fc_full; 299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 300 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 301 302 /* Advertise Speed, default to 0 (auto) */ 303 static int ixgbe_advertise_speed = 0; 304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 305 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 306 307 /* 308 * Smart speed setting, default to on 309 * this only works as a compile option 310 * right now as its during attach, set 311 * this to 'ixgbe_smart_speed_off' to 312 * disable. 313 */ 314 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 315 316 /* 317 * MSI-X should be the default for best performance, 318 * but this allows it to be forced off for testing. 319 */ 320 static int ixgbe_enable_msix = 1; 321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 322 "Enable MSI-X interrupts"); 323 324 /* 325 * Defining this on will allow the use 326 * of unsupported SFP+ modules, note that 327 * doing so you are on your own :) 328 */ 329 static int allow_unsupported_sfp = FALSE; 330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 331 &allow_unsupported_sfp, 0, 332 "Allow unsupported SFP modules...use at your own risk"); 333 334 /* 335 * Not sure if Flow Director is fully baked, 336 * so we'll default to turning it off. 337 */ 338 static int ixgbe_enable_fdir = 0; 339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 340 "Enable Flow Director"); 341 342 /* Receive-Side Scaling */ 343 static int ixgbe_enable_rss = 1; 344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 345 "Enable Receive-Side Scaling (RSS)"); 346 347 #if 0 348 /* Keep running tab on them for sanity check */ 349 static int ixgbe_total_ports; 350 #endif 351 352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 353 354 /* 355 * For Flow Director: this is the number of TX packets we sample 356 * for the filter pool, this means every 20th packet will be probed. 357 * 358 * This feature can be disabled by setting this to 0. 359 */ 360 static int atr_sample_rate = 20; 361 362 extern struct if_txrx ixgbe_txrx; 363 364 static struct if_shared_ctx ixgbe_sctx_init = { 365 .isc_magic = IFLIB_MAGIC, 366 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 367 .isc_tx_maxsize = IXGBE_TSO_SIZE, 368 369 .isc_tx_maxsegsize = PAGE_SIZE, 370 371 .isc_rx_maxsize = PAGE_SIZE*4, 372 .isc_rx_nsegments = 1, 373 .isc_rx_maxsegsize = PAGE_SIZE*4, 374 .isc_nfl = 1, 375 .isc_ntxqs = 1, 376 .isc_nrxqs = 1, 377 378 .isc_admin_intrcnt = 1, 379 .isc_vendor_info = ixgbe_vendor_info_array, 380 .isc_driver_version = ixgbe_driver_version, 381 .isc_driver = &ixgbe_if_driver, 382 383 .isc_nrxd_min = {MIN_RXD}, 384 .isc_ntxd_min = {MIN_TXD}, 385 .isc_nrxd_max = {MAX_RXD}, 386 .isc_ntxd_max = {MAX_TXD}, 387 .isc_nrxd_default = {DEFAULT_RXD}, 388 .isc_ntxd_default = {DEFAULT_TXD}, 389 }; 390 391 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init; 392 393 /************************************************************************ 394 * ixgbe_if_tx_queues_alloc 395 ************************************************************************/ 396 static int 397 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 398 int ntxqs, int ntxqsets) 399 { 400 struct adapter *adapter = iflib_get_softc(ctx); 401 if_softc_ctx_t scctx = adapter->shared; 402 struct ix_tx_queue *que; 403 int i, j, error; 404 405 MPASS(adapter->num_tx_queues > 0); 406 MPASS(adapter->num_tx_queues == ntxqsets); 407 MPASS(ntxqs == 1); 408 409 /* Allocate queue structure memory */ 410 adapter->tx_queues = 411 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 412 M_IXGBE, M_NOWAIT | M_ZERO); 413 if (!adapter->tx_queues) { 414 device_printf(iflib_get_dev(ctx), 415 "Unable to allocate TX ring memory\n"); 416 return (ENOMEM); 417 } 418 419 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { 420 struct tx_ring *txr = &que->txr; 421 422 /* In case SR-IOV is enabled, align the index properly */ 423 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 424 i); 425 426 txr->adapter = que->adapter = adapter; 427 adapter->active_queues |= (u64)1 << txr->me; 428 429 /* Allocate report status array */ 430 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 431 if (txr->tx_rsq == NULL) { 432 error = ENOMEM; 433 goto fail; 434 } 435 for (j = 0; j < scctx->isc_ntxd[0]; j++) 436 txr->tx_rsq[j] = QIDX_INVALID; 437 /* get the virtual and physical address of the hardware queues */ 438 txr->tail = IXGBE_TDT(txr->me); 439 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 440 txr->tx_paddr = paddrs[i]; 441 442 txr->bytes = 0; 443 txr->total_packets = 0; 444 445 /* Set the rate at which we sample packets */ 446 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 447 txr->atr_sample = atr_sample_rate; 448 449 } 450 451 iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod, 452 "mod_task"); 453 iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf, 454 "msf_task"); 455 iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy, 456 "phy_task"); 457 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 458 iflib_config_gtask_init(ctx, &adapter->mbx_task, 459 ixgbe_handle_mbx, "mbx_task"); 460 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 461 iflib_config_gtask_init(ctx, &adapter->fdir_task, 462 ixgbe_reinit_fdir, "fdir_task"); 463 464 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 465 adapter->num_tx_queues); 466 467 return (0); 468 469 fail: 470 ixgbe_if_queues_free(ctx); 471 472 return (error); 473 } /* ixgbe_if_tx_queues_alloc */ 474 475 /************************************************************************ 476 * ixgbe_if_rx_queues_alloc 477 ************************************************************************/ 478 static int 479 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 480 int nrxqs, int nrxqsets) 481 { 482 struct adapter *adapter = iflib_get_softc(ctx); 483 struct ix_rx_queue *que; 484 int i; 485 486 MPASS(adapter->num_rx_queues > 0); 487 MPASS(adapter->num_rx_queues == nrxqsets); 488 MPASS(nrxqs == 1); 489 490 /* Allocate queue structure memory */ 491 adapter->rx_queues = 492 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 493 M_IXGBE, M_NOWAIT | M_ZERO); 494 if (!adapter->rx_queues) { 495 device_printf(iflib_get_dev(ctx), 496 "Unable to allocate TX ring memory\n"); 497 return (ENOMEM); 498 } 499 500 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { 501 struct rx_ring *rxr = &que->rxr; 502 503 /* In case SR-IOV is enabled, align the index properly */ 504 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 505 i); 506 507 rxr->adapter = que->adapter = adapter; 508 509 /* get the virtual and physical address of the hw queues */ 510 rxr->tail = IXGBE_RDT(rxr->me); 511 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 512 rxr->rx_paddr = paddrs[i]; 513 rxr->bytes = 0; 514 rxr->que = que; 515 } 516 517 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 518 adapter->num_rx_queues); 519 520 return (0); 521 } /* ixgbe_if_rx_queues_alloc */ 522 523 /************************************************************************ 524 * ixgbe_if_queues_free 525 ************************************************************************/ 526 static void 527 ixgbe_if_queues_free(if_ctx_t ctx) 528 { 529 struct adapter *adapter = iflib_get_softc(ctx); 530 struct ix_tx_queue *tx_que = adapter->tx_queues; 531 struct ix_rx_queue *rx_que = adapter->rx_queues; 532 int i; 533 534 if (tx_que != NULL) { 535 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 536 struct tx_ring *txr = &tx_que->txr; 537 if (txr->tx_rsq == NULL) 538 break; 539 540 free(txr->tx_rsq, M_IXGBE); 541 txr->tx_rsq = NULL; 542 } 543 544 free(adapter->tx_queues, M_IXGBE); 545 adapter->tx_queues = NULL; 546 } 547 if (rx_que != NULL) { 548 free(adapter->rx_queues, M_IXGBE); 549 adapter->rx_queues = NULL; 550 } 551 } /* ixgbe_if_queues_free */ 552 553 /************************************************************************ 554 * ixgbe_initialize_rss_mapping 555 ************************************************************************/ 556 static void 557 ixgbe_initialize_rss_mapping(struct adapter *adapter) 558 { 559 struct ixgbe_hw *hw = &adapter->hw; 560 u32 reta = 0, mrqc, rss_key[10]; 561 int queue_id, table_size, index_mult; 562 int i, j; 563 u32 rss_hash_config; 564 565 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 566 /* Fetch the configured RSS key */ 567 rss_getkey((uint8_t *)&rss_key); 568 } else { 569 /* set up random bits */ 570 arc4rand(&rss_key, sizeof(rss_key), 0); 571 } 572 573 /* Set multiplier for RETA setup and table size based on MAC */ 574 index_mult = 0x1; 575 table_size = 128; 576 switch (adapter->hw.mac.type) { 577 case ixgbe_mac_82598EB: 578 index_mult = 0x11; 579 break; 580 case ixgbe_mac_X550: 581 case ixgbe_mac_X550EM_x: 582 case ixgbe_mac_X550EM_a: 583 table_size = 512; 584 break; 585 default: 586 break; 587 } 588 589 /* Set up the redirection table */ 590 for (i = 0, j = 0; i < table_size; i++, j++) { 591 if (j == adapter->num_rx_queues) 592 j = 0; 593 594 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 595 /* 596 * Fetch the RSS bucket id for the given indirection 597 * entry. Cap it at the number of configured buckets 598 * (which is num_rx_queues.) 599 */ 600 queue_id = rss_get_indirection_to_bucket(i); 601 queue_id = queue_id % adapter->num_rx_queues; 602 } else 603 queue_id = (j * index_mult); 604 605 /* 606 * The low 8 bits are for hash value (n+0); 607 * The next 8 bits are for hash value (n+1), etc. 608 */ 609 reta = reta >> 8; 610 reta = reta | (((uint32_t)queue_id) << 24); 611 if ((i & 3) == 3) { 612 if (i < 128) 613 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 614 else 615 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 616 reta); 617 reta = 0; 618 } 619 } 620 621 /* Now fill our hash function seeds */ 622 for (i = 0; i < 10; i++) 623 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 624 625 /* Perform hash on these packet types */ 626 if (adapter->feat_en & IXGBE_FEATURE_RSS) 627 rss_hash_config = rss_gethashconfig(); 628 else { 629 /* 630 * Disable UDP - IP fragments aren't currently being handled 631 * and so we end up with a mix of 2-tuple and 4-tuple 632 * traffic. 633 */ 634 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 635 | RSS_HASHTYPE_RSS_TCP_IPV4 636 | RSS_HASHTYPE_RSS_IPV6 637 | RSS_HASHTYPE_RSS_TCP_IPV6 638 | RSS_HASHTYPE_RSS_IPV6_EX 639 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 640 } 641 642 mrqc = IXGBE_MRQC_RSSEN; 643 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 645 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 647 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 649 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 650 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 651 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 652 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 653 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 654 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 655 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 656 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 657 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 658 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 659 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 660 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 661 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 662 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 663 } /* ixgbe_initialize_rss_mapping */ 664 665 /************************************************************************ 666 * ixgbe_initialize_receive_units - Setup receive registers and features. 667 ************************************************************************/ 668 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 669 670 static void 671 ixgbe_initialize_receive_units(if_ctx_t ctx) 672 { 673 struct adapter *adapter = iflib_get_softc(ctx); 674 if_softc_ctx_t scctx = adapter->shared; 675 struct ixgbe_hw *hw = &adapter->hw; 676 struct ifnet *ifp = iflib_get_ifp(ctx); 677 struct ix_rx_queue *que; 678 int i, j; 679 u32 bufsz, fctrl, srrctl, rxcsum; 680 u32 hlreg; 681 682 /* 683 * Make sure receives are disabled while 684 * setting up the descriptor ring 685 */ 686 ixgbe_disable_rx(hw); 687 688 /* Enable broadcasts */ 689 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 690 fctrl |= IXGBE_FCTRL_BAM; 691 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 692 fctrl |= IXGBE_FCTRL_DPF; 693 fctrl |= IXGBE_FCTRL_PMCF; 694 } 695 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 696 697 /* Set for Jumbo Frames? */ 698 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 699 if (ifp->if_mtu > ETHERMTU) 700 hlreg |= IXGBE_HLREG0_JUMBOEN; 701 else 702 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 703 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 704 705 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 706 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 707 708 /* Setup the Base and Length of the Rx Descriptor Ring */ 709 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) { 710 struct rx_ring *rxr = &que->rxr; 711 u64 rdba = rxr->rx_paddr; 712 713 j = rxr->me; 714 715 /* Setup the Base and Length of the Rx Descriptor Ring */ 716 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 717 (rdba & 0x00000000ffffffffULL)); 718 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 719 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 720 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 721 722 /* Set up the SRRCTL register */ 723 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 724 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 725 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 726 srrctl |= bufsz; 727 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 728 729 /* 730 * Set DROP_EN iff we have no flow control and >1 queue. 731 * Note that srrctl was cleared shortly before during reset, 732 * so we do not need to clear the bit, but do it just in case 733 * this code is moved elsewhere. 734 */ 735 if (adapter->num_rx_queues > 1 && 736 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 737 srrctl |= IXGBE_SRRCTL_DROP_EN; 738 } else { 739 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 740 } 741 742 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 743 744 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 745 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 746 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 747 748 /* Set the driver rx tail address */ 749 rxr->tail = IXGBE_RDT(rxr->me); 750 } 751 752 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 753 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 754 | IXGBE_PSRTYPE_UDPHDR 755 | IXGBE_PSRTYPE_IPV4HDR 756 | IXGBE_PSRTYPE_IPV6HDR; 757 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 758 } 759 760 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 761 762 ixgbe_initialize_rss_mapping(adapter); 763 764 if (adapter->num_rx_queues > 1) { 765 /* RSS and RX IPP Checksum are mutually exclusive */ 766 rxcsum |= IXGBE_RXCSUM_PCSD; 767 } 768 769 if (ifp->if_capenable & IFCAP_RXCSUM) 770 rxcsum |= IXGBE_RXCSUM_PCSD; 771 772 /* This is useful for calculating UDP/IP fragment checksums */ 773 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 774 rxcsum |= IXGBE_RXCSUM_IPPCSE; 775 776 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 777 778 } /* ixgbe_initialize_receive_units */ 779 780 /************************************************************************ 781 * ixgbe_initialize_transmit_units - Enable transmit units. 782 ************************************************************************/ 783 static void 784 ixgbe_initialize_transmit_units(if_ctx_t ctx) 785 { 786 struct adapter *adapter = iflib_get_softc(ctx); 787 struct ixgbe_hw *hw = &adapter->hw; 788 if_softc_ctx_t scctx = adapter->shared; 789 struct ix_tx_queue *que; 790 int i; 791 792 /* Setup the Base and Length of the Tx Descriptor Ring */ 793 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues; 794 i++, que++) { 795 struct tx_ring *txr = &que->txr; 796 u64 tdba = txr->tx_paddr; 797 u32 txctrl = 0; 798 int j = txr->me; 799 800 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 801 (tdba & 0x00000000ffffffffULL)); 802 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 803 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 804 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 805 806 /* Setup the HW Tx Head and Tail descriptor pointers */ 807 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 808 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 809 810 /* Cache the tail address */ 811 txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; 812 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 813 txr->tx_rsq[k] = QIDX_INVALID; 814 815 /* Disable Head Writeback */ 816 /* 817 * Note: for X550 series devices, these registers are actually 818 * prefixed with TPH_ isntead of DCA_, but the addresses and 819 * fields remain the same. 820 */ 821 switch (hw->mac.type) { 822 case ixgbe_mac_82598EB: 823 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 824 break; 825 default: 826 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 827 break; 828 } 829 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 830 switch (hw->mac.type) { 831 case ixgbe_mac_82598EB: 832 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 833 break; 834 default: 835 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 836 break; 837 } 838 839 } 840 841 if (hw->mac.type != ixgbe_mac_82598EB) { 842 u32 dmatxctl, rttdcs; 843 844 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 845 dmatxctl |= IXGBE_DMATXCTL_TE; 846 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 847 /* Disable arbiter to set MTQC */ 848 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 849 rttdcs |= IXGBE_RTTDCS_ARBDIS; 850 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 851 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 852 ixgbe_get_mtqc(adapter->iov_mode)); 853 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 854 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 855 } 856 857 } /* ixgbe_initialize_transmit_units */ 858 859 /************************************************************************ 860 * ixgbe_register 861 ************************************************************************/ 862 static void * 863 ixgbe_register(device_t dev) 864 { 865 return (ixgbe_sctx); 866 } /* ixgbe_register */ 867 868 /************************************************************************ 869 * ixgbe_if_attach_pre - Device initialization routine, part 1 870 * 871 * Called when the driver is being loaded. 872 * Identifies the type of hardware, initializes the hardware, 873 * and initializes iflib structures. 874 * 875 * return 0 on success, positive on failure 876 ************************************************************************/ 877 static int 878 ixgbe_if_attach_pre(if_ctx_t ctx) 879 { 880 struct adapter *adapter; 881 device_t dev; 882 if_softc_ctx_t scctx; 883 struct ixgbe_hw *hw; 884 int error = 0; 885 u32 ctrl_ext; 886 887 INIT_DEBUGOUT("ixgbe_attach: begin"); 888 889 /* Allocate, clear, and link in our adapter structure */ 890 dev = iflib_get_dev(ctx); 891 adapter = iflib_get_softc(ctx); 892 adapter->hw.back = adapter; 893 adapter->ctx = ctx; 894 adapter->dev = dev; 895 scctx = adapter->shared = iflib_get_softc_ctx(ctx); 896 adapter->media = iflib_get_media(ctx); 897 hw = &adapter->hw; 898 899 /* Determine hardware revision */ 900 hw->vendor_id = pci_get_vendor(dev); 901 hw->device_id = pci_get_device(dev); 902 hw->revision_id = pci_get_revid(dev); 903 hw->subsystem_vendor_id = pci_get_subvendor(dev); 904 hw->subsystem_device_id = pci_get_subdevice(dev); 905 906 /* Do base PCI setup - map BAR0 */ 907 if (ixgbe_allocate_pci_resources(ctx)) { 908 device_printf(dev, "Allocation of PCI resources failed\n"); 909 return (ENXIO); 910 } 911 912 /* let hardware know driver is loaded */ 913 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 914 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 915 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 916 917 /* 918 * Initialize the shared code 919 */ 920 if (ixgbe_init_shared_code(hw) != 0) { 921 device_printf(dev, "Unable to initialize the shared code\n"); 922 error = ENXIO; 923 goto err_pci; 924 } 925 926 if (hw->mbx.ops.init_params) 927 hw->mbx.ops.init_params(hw); 928 929 hw->allow_unsupported_sfp = allow_unsupported_sfp; 930 931 if (hw->mac.type != ixgbe_mac_82598EB) 932 hw->phy.smart_speed = ixgbe_smart_speed; 933 934 ixgbe_init_device_features(adapter); 935 936 /* Enable WoL (if supported) */ 937 ixgbe_check_wol_support(adapter); 938 939 /* Verify adapter fan is still functional (if applicable) */ 940 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 941 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 942 ixgbe_check_fan_failure(adapter, esdp, FALSE); 943 } 944 945 /* Ensure SW/FW semaphore is free */ 946 ixgbe_init_swfw_semaphore(hw); 947 948 /* Set an initial default flow control value */ 949 hw->fc.requested_mode = ixgbe_flow_control; 950 951 hw->phy.reset_if_overtemp = TRUE; 952 error = ixgbe_reset_hw(hw); 953 hw->phy.reset_if_overtemp = FALSE; 954 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 955 /* 956 * No optics in this port, set up 957 * so the timer routine will probe 958 * for later insertion. 959 */ 960 adapter->sfp_probe = TRUE; 961 error = 0; 962 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 963 device_printf(dev, "Unsupported SFP+ module detected!\n"); 964 error = EIO; 965 goto err_pci; 966 } else if (error) { 967 device_printf(dev, "Hardware initialization failed\n"); 968 error = EIO; 969 goto err_pci; 970 } 971 972 /* Make sure we have a good EEPROM before we read from it */ 973 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 974 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 975 error = EIO; 976 goto err_pci; 977 } 978 979 error = ixgbe_start_hw(hw); 980 switch (error) { 981 case IXGBE_ERR_EEPROM_VERSION: 982 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 983 break; 984 case IXGBE_ERR_SFP_NOT_SUPPORTED: 985 device_printf(dev, "Unsupported SFP+ Module\n"); 986 error = EIO; 987 goto err_pci; 988 case IXGBE_ERR_SFP_NOT_PRESENT: 989 device_printf(dev, "No SFP+ Module found\n"); 990 /* falls thru */ 991 default: 992 break; 993 } 994 995 /* Most of the iflib initialization... */ 996 997 iflib_set_mac(ctx, hw->mac.addr); 998 switch (adapter->hw.mac.type) { 999 case ixgbe_mac_X550: 1000 case ixgbe_mac_X550EM_x: 1001 case ixgbe_mac_X550EM_a: 1002 scctx->isc_rss_table_size = 512; 1003 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 1004 break; 1005 default: 1006 scctx->isc_rss_table_size = 128; 1007 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 1008 } 1009 1010 /* Allow legacy interrupts */ 1011 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1012 1013 scctx->isc_txqsizes[0] = 1014 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1015 sizeof(u32), DBA_ALIGN), 1016 scctx->isc_rxqsizes[0] = 1017 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1018 DBA_ALIGN); 1019 1020 /* XXX */ 1021 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1022 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1023 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1024 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1025 scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR); 1026 } else { 1027 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1028 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1029 scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR); 1030 } 1031 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1032 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1033 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1034 1035 scctx->isc_txrx = &ixgbe_txrx; 1036 1037 scctx->isc_capenable = IXGBE_CAPS; 1038 1039 return (0); 1040 1041 err_pci: 1042 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 1043 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 1045 ixgbe_free_pci_resources(ctx); 1046 1047 return (error); 1048 } /* ixgbe_if_attach_pre */ 1049 1050 /********************************************************************* 1051 * ixgbe_if_attach_post - Device initialization routine, part 2 1052 * 1053 * Called during driver load, but after interrupts and 1054 * resources have been allocated and configured. 1055 * Sets up some data structures not relevant to iflib. 1056 * 1057 * return 0 on success, positive on failure 1058 *********************************************************************/ 1059 static int 1060 ixgbe_if_attach_post(if_ctx_t ctx) 1061 { 1062 device_t dev; 1063 struct adapter *adapter; 1064 struct ixgbe_hw *hw; 1065 int error = 0; 1066 1067 dev = iflib_get_dev(ctx); 1068 adapter = iflib_get_softc(ctx); 1069 hw = &adapter->hw; 1070 1071 1072 if (adapter->intr_type == IFLIB_INTR_LEGACY && 1073 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1074 device_printf(dev, "Device does not support legacy interrupts"); 1075 error = ENXIO; 1076 goto err; 1077 } 1078 1079 /* Allocate multicast array memory. */ 1080 adapter->mta = malloc(sizeof(*adapter->mta) * 1081 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1082 if (adapter->mta == NULL) { 1083 device_printf(dev, "Can not allocate multicast setup array\n"); 1084 error = ENOMEM; 1085 goto err; 1086 } 1087 1088 /* hw.ix defaults init */ 1089 ixgbe_set_advertise(adapter, ixgbe_advertise_speed); 1090 1091 /* Enable the optics for 82599 SFP+ fiber */ 1092 ixgbe_enable_tx_laser(hw); 1093 1094 /* Enable power to the phy. */ 1095 ixgbe_set_phy_power(hw, TRUE); 1096 1097 ixgbe_initialize_iov(adapter); 1098 1099 error = ixgbe_setup_interface(ctx); 1100 if (error) { 1101 device_printf(dev, "Interface setup failed: %d\n", error); 1102 goto err; 1103 } 1104 1105 ixgbe_if_update_admin_status(ctx); 1106 1107 /* Initialize statistics */ 1108 ixgbe_update_stats_counters(adapter); 1109 ixgbe_add_hw_stats(adapter); 1110 1111 /* Check PCIE slot type/speed/width */ 1112 ixgbe_get_slot_info(adapter); 1113 1114 /* 1115 * Do time init and sysctl init here, but 1116 * only on the first port of a bypass adapter. 1117 */ 1118 ixgbe_bypass_init(adapter); 1119 1120 /* Set an initial dmac value */ 1121 adapter->dmac = 0; 1122 /* Set initial advertised speeds (if applicable) */ 1123 adapter->advertise = ixgbe_get_advertise(adapter); 1124 1125 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1126 ixgbe_define_iov_schemas(dev, &error); 1127 1128 /* Add sysctls */ 1129 ixgbe_add_device_sysctls(ctx); 1130 1131 return (0); 1132 err: 1133 return (error); 1134 } /* ixgbe_if_attach_post */ 1135 1136 /************************************************************************ 1137 * ixgbe_check_wol_support 1138 * 1139 * Checks whether the adapter's ports are capable of 1140 * Wake On LAN by reading the adapter's NVM. 1141 * 1142 * Sets each port's hw->wol_enabled value depending 1143 * on the value read here. 1144 ************************************************************************/ 1145 static void 1146 ixgbe_check_wol_support(struct adapter *adapter) 1147 { 1148 struct ixgbe_hw *hw = &adapter->hw; 1149 u16 dev_caps = 0; 1150 1151 /* Find out WoL support for port */ 1152 adapter->wol_support = hw->wol_enabled = 0; 1153 ixgbe_get_device_caps(hw, &dev_caps); 1154 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1155 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1156 hw->bus.func == 0)) 1157 adapter->wol_support = hw->wol_enabled = 1; 1158 1159 /* Save initial wake up filter configuration */ 1160 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1161 1162 return; 1163 } /* ixgbe_check_wol_support */ 1164 1165 /************************************************************************ 1166 * ixgbe_setup_interface 1167 * 1168 * Setup networking device structure and register an interface. 1169 ************************************************************************/ 1170 static int 1171 ixgbe_setup_interface(if_ctx_t ctx) 1172 { 1173 struct ifnet *ifp = iflib_get_ifp(ctx); 1174 struct adapter *adapter = iflib_get_softc(ctx); 1175 1176 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1177 1178 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 1179 if_setbaudrate(ifp, IF_Gbps(10)); 1180 1181 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1182 1183 /* 1184 * Don't turn this on by default, if vlans are 1185 * created on another pseudo device (eg. lagg) 1186 * then vlan events are not passed thru, breaking 1187 * operation, but with HW FILTER off it works. If 1188 * using vlans directly on the ixgbe driver you can 1189 * enable this and get full hardware tag filtering. 1190 */ 1191 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWFILTER); 1192 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1193 1194 ixgbe_add_media_types(ctx); 1195 1196 /* Autoselect media by default */ 1197 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 1198 1199 return (0); 1200 } /* ixgbe_setup_interface */ 1201 1202 /************************************************************************ 1203 * ixgbe_if_get_counter 1204 ************************************************************************/ 1205 static uint64_t 1206 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1207 { 1208 struct adapter *adapter = iflib_get_softc(ctx); 1209 if_t ifp = iflib_get_ifp(ctx); 1210 1211 switch (cnt) { 1212 case IFCOUNTER_IPACKETS: 1213 return (adapter->ipackets); 1214 case IFCOUNTER_OPACKETS: 1215 return (adapter->opackets); 1216 case IFCOUNTER_IBYTES: 1217 return (adapter->ibytes); 1218 case IFCOUNTER_OBYTES: 1219 return (adapter->obytes); 1220 case IFCOUNTER_IMCASTS: 1221 return (adapter->imcasts); 1222 case IFCOUNTER_OMCASTS: 1223 return (adapter->omcasts); 1224 case IFCOUNTER_COLLISIONS: 1225 return (0); 1226 case IFCOUNTER_IQDROPS: 1227 return (adapter->iqdrops); 1228 case IFCOUNTER_OQDROPS: 1229 return (0); 1230 case IFCOUNTER_IERRORS: 1231 return (adapter->ierrors); 1232 default: 1233 return (if_get_counter_default(ifp, cnt)); 1234 } 1235 } /* ixgbe_if_get_counter */ 1236 1237 /************************************************************************ 1238 * ixgbe_if_i2c_req 1239 ************************************************************************/ 1240 static int 1241 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1242 { 1243 struct adapter *adapter = iflib_get_softc(ctx); 1244 struct ixgbe_hw *hw = &adapter->hw; 1245 int i; 1246 1247 1248 if (hw->phy.ops.read_i2c_byte == NULL) 1249 return (ENXIO); 1250 for (i = 0; i < req->len; i++) 1251 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1252 req->dev_addr, &req->data[i]); 1253 return (0); 1254 } /* ixgbe_if_i2c_req */ 1255 1256 /************************************************************************ 1257 * ixgbe_add_media_types 1258 ************************************************************************/ 1259 static void 1260 ixgbe_add_media_types(if_ctx_t ctx) 1261 { 1262 struct adapter *adapter = iflib_get_softc(ctx); 1263 struct ixgbe_hw *hw = &adapter->hw; 1264 device_t dev = iflib_get_dev(ctx); 1265 u64 layer; 1266 1267 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 1268 1269 /* Media types with matching FreeBSD media defines */ 1270 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1271 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1272 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1273 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1274 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1275 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1276 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1277 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1278 1279 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1280 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1281 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1282 NULL); 1283 1284 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1285 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1286 if (hw->phy.multispeed_fiber) 1287 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0, 1288 NULL); 1289 } 1290 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1291 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1292 if (hw->phy.multispeed_fiber) 1293 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, 1294 NULL); 1295 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1296 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1297 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1298 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1299 1300 #ifdef IFM_ETH_XTYPE 1301 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1302 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1303 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1304 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1305 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1306 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1307 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1308 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1309 #else 1310 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1311 device_printf(dev, "Media supported: 10GbaseKR\n"); 1312 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1313 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1314 } 1315 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1316 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1317 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1318 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1319 } 1320 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1321 device_printf(dev, "Media supported: 1000baseKX\n"); 1322 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1323 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1324 } 1325 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1326 device_printf(dev, "Media supported: 2500baseKX\n"); 1327 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1328 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1329 } 1330 #endif 1331 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1332 device_printf(dev, "Media supported: 1000baseBX\n"); 1333 1334 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1335 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1336 0, NULL); 1337 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1338 } 1339 1340 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1341 } /* ixgbe_add_media_types */ 1342 1343 /************************************************************************ 1344 * ixgbe_is_sfp 1345 ************************************************************************/ 1346 static inline bool 1347 ixgbe_is_sfp(struct ixgbe_hw *hw) 1348 { 1349 switch (hw->mac.type) { 1350 case ixgbe_mac_82598EB: 1351 if (hw->phy.type == ixgbe_phy_nl) 1352 return (TRUE); 1353 return (FALSE); 1354 case ixgbe_mac_82599EB: 1355 switch (hw->mac.ops.get_media_type(hw)) { 1356 case ixgbe_media_type_fiber: 1357 case ixgbe_media_type_fiber_qsfp: 1358 return (TRUE); 1359 default: 1360 return (FALSE); 1361 } 1362 case ixgbe_mac_X550EM_x: 1363 case ixgbe_mac_X550EM_a: 1364 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1365 return (TRUE); 1366 return (FALSE); 1367 default: 1368 return (FALSE); 1369 } 1370 } /* ixgbe_is_sfp */ 1371 1372 /************************************************************************ 1373 * ixgbe_config_link 1374 ************************************************************************/ 1375 static void 1376 ixgbe_config_link(struct adapter *adapter) 1377 { 1378 struct ixgbe_hw *hw = &adapter->hw; 1379 u32 autoneg, err = 0; 1380 bool sfp, negotiate; 1381 1382 sfp = ixgbe_is_sfp(hw); 1383 1384 if (sfp) { 1385 GROUPTASK_ENQUEUE(&adapter->mod_task); 1386 } else { 1387 if (hw->mac.ops.check_link) 1388 err = ixgbe_check_link(hw, &adapter->link_speed, 1389 &adapter->link_up, FALSE); 1390 if (err) 1391 return; 1392 autoneg = hw->phy.autoneg_advertised; 1393 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1394 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1395 &negotiate); 1396 if (err) 1397 return; 1398 if (hw->mac.ops.setup_link) 1399 err = hw->mac.ops.setup_link(hw, autoneg, 1400 adapter->link_up); 1401 } 1402 1403 } /* ixgbe_config_link */ 1404 1405 /************************************************************************ 1406 * ixgbe_update_stats_counters - Update board statistics counters. 1407 ************************************************************************/ 1408 static void 1409 ixgbe_update_stats_counters(struct adapter *adapter) 1410 { 1411 struct ixgbe_hw *hw = &adapter->hw; 1412 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1413 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1414 u64 total_missed_rx = 0; 1415 1416 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1417 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1418 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1419 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1420 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1421 1422 for (int i = 0; i < 16; i++) { 1423 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1424 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1425 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1426 } 1427 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1428 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1429 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1430 1431 /* Hardware workaround, gprc counts missed packets */ 1432 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1433 stats->gprc -= missed_rx; 1434 1435 if (hw->mac.type != ixgbe_mac_82598EB) { 1436 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1437 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1438 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1439 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1440 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1441 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1442 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1443 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1444 } else { 1445 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1446 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1447 /* 82598 only has a counter in the high register */ 1448 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1449 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1450 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1451 } 1452 1453 /* 1454 * Workaround: mprc hardware is incorrectly counting 1455 * broadcasts, so for now we subtract those. 1456 */ 1457 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1458 stats->bprc += bprc; 1459 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1460 if (hw->mac.type == ixgbe_mac_82598EB) 1461 stats->mprc -= bprc; 1462 1463 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1464 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1465 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1466 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1467 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1468 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1469 1470 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1471 stats->lxontxc += lxon; 1472 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1473 stats->lxofftxc += lxoff; 1474 total = lxon + lxoff; 1475 1476 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1477 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1478 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1479 stats->gptc -= total; 1480 stats->mptc -= total; 1481 stats->ptc64 -= total; 1482 stats->gotc -= total * ETHER_MIN_LEN; 1483 1484 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1485 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1486 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1487 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1488 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1489 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1490 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1491 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1492 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1493 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1494 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1495 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1496 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1497 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1498 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1499 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1500 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1501 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1502 /* Only read FCOE on 82599 */ 1503 if (hw->mac.type != ixgbe_mac_82598EB) { 1504 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1505 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1506 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1507 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1508 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1509 } 1510 1511 /* Fill out the OS statistics structure */ 1512 IXGBE_SET_IPACKETS(adapter, stats->gprc); 1513 IXGBE_SET_OPACKETS(adapter, stats->gptc); 1514 IXGBE_SET_IBYTES(adapter, stats->gorc); 1515 IXGBE_SET_OBYTES(adapter, stats->gotc); 1516 IXGBE_SET_IMCASTS(adapter, stats->mprc); 1517 IXGBE_SET_OMCASTS(adapter, stats->mptc); 1518 IXGBE_SET_COLLISIONS(adapter, 0); 1519 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 1520 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec); 1521 } /* ixgbe_update_stats_counters */ 1522 1523 /************************************************************************ 1524 * ixgbe_add_hw_stats 1525 * 1526 * Add sysctl variables, one per statistic, to the system. 1527 ************************************************************************/ 1528 static void 1529 ixgbe_add_hw_stats(struct adapter *adapter) 1530 { 1531 device_t dev = iflib_get_dev(adapter->ctx); 1532 struct ix_rx_queue *rx_que; 1533 struct ix_tx_queue *tx_que; 1534 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1535 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1536 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1537 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1538 struct sysctl_oid *stat_node, *queue_node; 1539 struct sysctl_oid_list *stat_list, *queue_list; 1540 int i; 1541 1542 #define QUEUE_NAME_LEN 32 1543 char namebuf[QUEUE_NAME_LEN]; 1544 1545 /* Driver Statistics */ 1546 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1547 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); 1548 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1549 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); 1550 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1551 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); 1552 1553 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 1554 struct tx_ring *txr = &tx_que->txr; 1555 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1556 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1557 CTLFLAG_RD, NULL, "Queue Name"); 1558 queue_list = SYSCTL_CHILDREN(queue_node); 1559 1560 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1561 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1562 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1563 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1564 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1565 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1566 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1567 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1568 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1569 CTLFLAG_RD, &txr->total_packets, 1570 "Queue Packets Transmitted"); 1571 } 1572 1573 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 1574 struct rx_ring *rxr = &rx_que->rxr; 1575 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1576 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1577 CTLFLAG_RD, NULL, "Queue Name"); 1578 queue_list = SYSCTL_CHILDREN(queue_node); 1579 1580 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1581 CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i], 1582 sizeof(&adapter->rx_queues[i]), 1583 ixgbe_sysctl_interrupt_rate_handler, "IU", 1584 "Interrupt Rate"); 1585 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1586 CTLFLAG_RD, &(adapter->rx_queues[i].irqs), 1587 "irqs on this queue"); 1588 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1589 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1590 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1591 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1592 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1593 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1594 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1595 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1596 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1597 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1598 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1599 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1600 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1601 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1602 } 1603 1604 /* MAC stats get their own sub node */ 1605 1606 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1607 CTLFLAG_RD, NULL, "MAC Statistics"); 1608 stat_list = SYSCTL_CHILDREN(stat_node); 1609 1610 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1611 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1613 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1615 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1616 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1617 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1619 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1621 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1623 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1625 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1626 1627 /* Flow Control stats */ 1628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1629 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1631 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1633 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1635 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1636 1637 /* Packet Reception Stats */ 1638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1639 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1641 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1643 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1645 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1647 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1649 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1651 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1653 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1655 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1657 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1659 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1661 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1663 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1664 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1665 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1666 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1667 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1669 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1671 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1673 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1675 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1676 1677 /* Packet Transmission Stats */ 1678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1679 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1681 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1683 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1685 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1687 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1689 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1691 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1693 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1695 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1697 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1698 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1699 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1701 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1702 } /* ixgbe_add_hw_stats */ 1703 1704 /************************************************************************ 1705 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1706 * 1707 * Retrieves the TDH value from the hardware 1708 ************************************************************************/ 1709 static int 1710 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1711 { 1712 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1713 int error; 1714 unsigned int val; 1715 1716 if (!txr) 1717 return (0); 1718 1719 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 1720 error = sysctl_handle_int(oidp, &val, 0, req); 1721 if (error || !req->newptr) 1722 return error; 1723 1724 return (0); 1725 } /* ixgbe_sysctl_tdh_handler */ 1726 1727 /************************************************************************ 1728 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1729 * 1730 * Retrieves the TDT value from the hardware 1731 ************************************************************************/ 1732 static int 1733 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1734 { 1735 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1736 int error; 1737 unsigned int val; 1738 1739 if (!txr) 1740 return (0); 1741 1742 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 1743 error = sysctl_handle_int(oidp, &val, 0, req); 1744 if (error || !req->newptr) 1745 return error; 1746 1747 return (0); 1748 } /* ixgbe_sysctl_tdt_handler */ 1749 1750 /************************************************************************ 1751 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1752 * 1753 * Retrieves the RDH value from the hardware 1754 ************************************************************************/ 1755 static int 1756 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1757 { 1758 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1759 int error; 1760 unsigned int val; 1761 1762 if (!rxr) 1763 return (0); 1764 1765 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 1766 error = sysctl_handle_int(oidp, &val, 0, req); 1767 if (error || !req->newptr) 1768 return error; 1769 1770 return (0); 1771 } /* ixgbe_sysctl_rdh_handler */ 1772 1773 /************************************************************************ 1774 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1775 * 1776 * Retrieves the RDT value from the hardware 1777 ************************************************************************/ 1778 static int 1779 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1780 { 1781 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1782 int error; 1783 unsigned int val; 1784 1785 if (!rxr) 1786 return (0); 1787 1788 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 1789 error = sysctl_handle_int(oidp, &val, 0, req); 1790 if (error || !req->newptr) 1791 return error; 1792 1793 return (0); 1794 } /* ixgbe_sysctl_rdt_handler */ 1795 1796 /************************************************************************ 1797 * ixgbe_if_vlan_register 1798 * 1799 * Run via vlan config EVENT, it enables us to use the 1800 * HW Filter table since we can get the vlan id. This 1801 * just creates the entry in the soft version of the 1802 * VFTA, init will repopulate the real table. 1803 ************************************************************************/ 1804 static void 1805 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1806 { 1807 struct adapter *adapter = iflib_get_softc(ctx); 1808 u16 index, bit; 1809 1810 index = (vtag >> 5) & 0x7F; 1811 bit = vtag & 0x1F; 1812 adapter->shadow_vfta[index] |= (1 << bit); 1813 ++adapter->num_vlans; 1814 ixgbe_setup_vlan_hw_support(ctx); 1815 } /* ixgbe_if_vlan_register */ 1816 1817 /************************************************************************ 1818 * ixgbe_if_vlan_unregister 1819 * 1820 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1821 ************************************************************************/ 1822 static void 1823 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1824 { 1825 struct adapter *adapter = iflib_get_softc(ctx); 1826 u16 index, bit; 1827 1828 index = (vtag >> 5) & 0x7F; 1829 bit = vtag & 0x1F; 1830 adapter->shadow_vfta[index] &= ~(1 << bit); 1831 --adapter->num_vlans; 1832 /* Re-init to load the changes */ 1833 ixgbe_setup_vlan_hw_support(ctx); 1834 } /* ixgbe_if_vlan_unregister */ 1835 1836 /************************************************************************ 1837 * ixgbe_setup_vlan_hw_support 1838 ************************************************************************/ 1839 static void 1840 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1841 { 1842 struct ifnet *ifp = iflib_get_ifp(ctx); 1843 struct adapter *adapter = iflib_get_softc(ctx); 1844 struct ixgbe_hw *hw = &adapter->hw; 1845 struct rx_ring *rxr; 1846 int i; 1847 u32 ctrl; 1848 1849 1850 /* 1851 * We get here thru init_locked, meaning 1852 * a soft reset, this has already cleared 1853 * the VFTA and other state, so if there 1854 * have been no vlan's registered do nothing. 1855 */ 1856 if (adapter->num_vlans == 0) 1857 return; 1858 1859 /* Setup the queues for vlans */ 1860 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1861 for (i = 0; i < adapter->num_rx_queues; i++) { 1862 rxr = &adapter->rx_queues[i].rxr; 1863 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1864 if (hw->mac.type != ixgbe_mac_82598EB) { 1865 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1866 ctrl |= IXGBE_RXDCTL_VME; 1867 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1868 } 1869 rxr->vtag_strip = TRUE; 1870 } 1871 } 1872 1873 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1874 return; 1875 /* 1876 * A soft reset zero's out the VFTA, so 1877 * we need to repopulate it now. 1878 */ 1879 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1880 if (adapter->shadow_vfta[i] != 0) 1881 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1882 adapter->shadow_vfta[i]); 1883 1884 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1885 /* Enable the Filter Table if enabled */ 1886 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1887 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1888 ctrl |= IXGBE_VLNCTRL_VFE; 1889 } 1890 if (hw->mac.type == ixgbe_mac_82598EB) 1891 ctrl |= IXGBE_VLNCTRL_VME; 1892 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1893 } /* ixgbe_setup_vlan_hw_support */ 1894 1895 /************************************************************************ 1896 * ixgbe_get_slot_info 1897 * 1898 * Get the width and transaction speed of 1899 * the slot this adapter is plugged into. 1900 ************************************************************************/ 1901 static void 1902 ixgbe_get_slot_info(struct adapter *adapter) 1903 { 1904 device_t dev = iflib_get_dev(adapter->ctx); 1905 struct ixgbe_hw *hw = &adapter->hw; 1906 int bus_info_valid = TRUE; 1907 u32 offset; 1908 u16 link; 1909 1910 /* Some devices are behind an internal bridge */ 1911 switch (hw->device_id) { 1912 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1913 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1914 goto get_parent_info; 1915 default: 1916 break; 1917 } 1918 1919 ixgbe_get_bus_info(hw); 1920 1921 /* 1922 * Some devices don't use PCI-E, but there is no need 1923 * to display "Unknown" for bus speed and width. 1924 */ 1925 switch (hw->mac.type) { 1926 case ixgbe_mac_X550EM_x: 1927 case ixgbe_mac_X550EM_a: 1928 return; 1929 default: 1930 goto display; 1931 } 1932 1933 get_parent_info: 1934 /* 1935 * For the Quad port adapter we need to parse back 1936 * up the PCI tree to find the speed of the expansion 1937 * slot into which this adapter is plugged. A bit more work. 1938 */ 1939 dev = device_get_parent(device_get_parent(dev)); 1940 #ifdef IXGBE_DEBUG 1941 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 1942 pci_get_slot(dev), pci_get_function(dev)); 1943 #endif 1944 dev = device_get_parent(device_get_parent(dev)); 1945 #ifdef IXGBE_DEBUG 1946 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 1947 pci_get_slot(dev), pci_get_function(dev)); 1948 #endif 1949 /* Now get the PCI Express Capabilities offset */ 1950 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 1951 /* 1952 * Hmm...can't get PCI-Express capabilities. 1953 * Falling back to default method. 1954 */ 1955 bus_info_valid = FALSE; 1956 ixgbe_get_bus_info(hw); 1957 goto display; 1958 } 1959 /* ...and read the Link Status Register */ 1960 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 1961 ixgbe_set_pci_config_data_generic(hw, link); 1962 1963 display: 1964 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 1965 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 1966 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 1967 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 1968 "Unknown"), 1969 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 1970 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 1971 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 1972 "Unknown")); 1973 1974 if (bus_info_valid) { 1975 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 1976 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 1977 (hw->bus.speed == ixgbe_bus_speed_2500))) { 1978 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1979 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 1980 } 1981 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 1982 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 1983 (hw->bus.speed < ixgbe_bus_speed_8000))) { 1984 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1985 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 1986 } 1987 } else 1988 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 1989 1990 return; 1991 } /* ixgbe_get_slot_info */ 1992 1993 /************************************************************************ 1994 * ixgbe_if_msix_intr_assign 1995 * 1996 * Setup MSI-X Interrupt resources and handlers 1997 ************************************************************************/ 1998 static int 1999 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 2000 { 2001 struct adapter *adapter = iflib_get_softc(ctx); 2002 struct ix_rx_queue *rx_que = adapter->rx_queues; 2003 struct ix_tx_queue *tx_que; 2004 int error, rid, vector = 0; 2005 int cpu_id = 0; 2006 char buf[16]; 2007 2008 /* Admin Que is vector 0*/ 2009 rid = vector + 1; 2010 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { 2011 rid = vector + 1; 2012 2013 snprintf(buf, sizeof(buf), "rxq%d", i); 2014 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2015 IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2016 2017 if (error) { 2018 device_printf(iflib_get_dev(ctx), 2019 "Failed to allocate que int %d err: %d", i, error); 2020 adapter->num_rx_queues = i + 1; 2021 goto fail; 2022 } 2023 2024 rx_que->msix = vector; 2025 adapter->active_queues |= (u64)(1 << rx_que->msix); 2026 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 2027 /* 2028 * The queue ID is used as the RSS layer bucket ID. 2029 * We look up the queue ID -> RSS CPU ID and select 2030 * that. 2031 */ 2032 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2033 } else { 2034 /* 2035 * Bind the msix vector, and thus the 2036 * rings to the corresponding cpu. 2037 * 2038 * This just happens to match the default RSS 2039 * round-robin bucket -> queue -> CPU allocation. 2040 */ 2041 if (adapter->num_rx_queues > 1) 2042 cpu_id = i; 2043 } 2044 2045 } 2046 for (int i = 0; i < adapter->num_tx_queues; i++) { 2047 snprintf(buf, sizeof(buf), "txq%d", i); 2048 tx_que = &adapter->tx_queues[i]; 2049 tx_que->msix = i % adapter->num_rx_queues; 2050 iflib_softirq_alloc_generic(ctx, 2051 &adapter->rx_queues[tx_que->msix].que_irq, 2052 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2053 } 2054 rid = vector + 1; 2055 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, 2056 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq"); 2057 if (error) { 2058 device_printf(iflib_get_dev(ctx), 2059 "Failed to register admin handler"); 2060 return (error); 2061 } 2062 2063 adapter->vector = vector; 2064 2065 return (0); 2066 fail: 2067 iflib_irq_free(ctx, &adapter->irq); 2068 rx_que = adapter->rx_queues; 2069 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) 2070 iflib_irq_free(ctx, &rx_que->que_irq); 2071 2072 return (error); 2073 } /* ixgbe_if_msix_intr_assign */ 2074 2075 /********************************************************************* 2076 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2077 **********************************************************************/ 2078 static int 2079 ixgbe_msix_que(void *arg) 2080 { 2081 struct ix_rx_queue *que = arg; 2082 struct adapter *adapter = que->adapter; 2083 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx); 2084 2085 /* Protect against spurious interrupts */ 2086 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2087 return 0; 2088 2089 ixgbe_disable_queue(adapter, que->msix); 2090 ++que->irqs; 2091 2092 return (FILTER_SCHEDULE_THREAD); 2093 } /* ixgbe_msix_que */ 2094 2095 /************************************************************************ 2096 * ixgbe_media_status - Media Ioctl callback 2097 * 2098 * Called whenever the user queries the status of 2099 * the interface using ifconfig. 2100 ************************************************************************/ 2101 static void 2102 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2103 { 2104 struct adapter *adapter = iflib_get_softc(ctx); 2105 struct ixgbe_hw *hw = &adapter->hw; 2106 int layer; 2107 2108 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2109 2110 iflib_admin_intr_deferred(ctx); 2111 2112 ifmr->ifm_status = IFM_AVALID; 2113 ifmr->ifm_active = IFM_ETHER; 2114 2115 if (!adapter->link_active) 2116 return; 2117 2118 ifmr->ifm_status |= IFM_ACTIVE; 2119 layer = adapter->phy_layer; 2120 2121 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2122 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2123 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2124 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2125 switch (adapter->link_speed) { 2126 case IXGBE_LINK_SPEED_10GB_FULL: 2127 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2128 break; 2129 case IXGBE_LINK_SPEED_1GB_FULL: 2130 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2131 break; 2132 case IXGBE_LINK_SPEED_100_FULL: 2133 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2134 break; 2135 case IXGBE_LINK_SPEED_10_FULL: 2136 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2137 break; 2138 } 2139 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2140 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2141 switch (adapter->link_speed) { 2142 case IXGBE_LINK_SPEED_10GB_FULL: 2143 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2144 break; 2145 } 2146 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2147 switch (adapter->link_speed) { 2148 case IXGBE_LINK_SPEED_10GB_FULL: 2149 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2150 break; 2151 case IXGBE_LINK_SPEED_1GB_FULL: 2152 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2153 break; 2154 } 2155 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2156 switch (adapter->link_speed) { 2157 case IXGBE_LINK_SPEED_10GB_FULL: 2158 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2159 break; 2160 case IXGBE_LINK_SPEED_1GB_FULL: 2161 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2162 break; 2163 } 2164 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2165 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2166 switch (adapter->link_speed) { 2167 case IXGBE_LINK_SPEED_10GB_FULL: 2168 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2169 break; 2170 case IXGBE_LINK_SPEED_1GB_FULL: 2171 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2172 break; 2173 } 2174 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2175 switch (adapter->link_speed) { 2176 case IXGBE_LINK_SPEED_10GB_FULL: 2177 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2178 break; 2179 } 2180 /* 2181 * XXX: These need to use the proper media types once 2182 * they're added. 2183 */ 2184 #ifndef IFM_ETH_XTYPE 2185 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2186 switch (adapter->link_speed) { 2187 case IXGBE_LINK_SPEED_10GB_FULL: 2188 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2189 break; 2190 case IXGBE_LINK_SPEED_2_5GB_FULL: 2191 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2192 break; 2193 case IXGBE_LINK_SPEED_1GB_FULL: 2194 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2195 break; 2196 } 2197 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2198 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2199 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2200 switch (adapter->link_speed) { 2201 case IXGBE_LINK_SPEED_10GB_FULL: 2202 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2203 break; 2204 case IXGBE_LINK_SPEED_2_5GB_FULL: 2205 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2206 break; 2207 case IXGBE_LINK_SPEED_1GB_FULL: 2208 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2209 break; 2210 } 2211 #else 2212 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2213 switch (adapter->link_speed) { 2214 case IXGBE_LINK_SPEED_10GB_FULL: 2215 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2216 break; 2217 case IXGBE_LINK_SPEED_2_5GB_FULL: 2218 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2219 break; 2220 case IXGBE_LINK_SPEED_1GB_FULL: 2221 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2222 break; 2223 } 2224 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2225 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2226 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2227 switch (adapter->link_speed) { 2228 case IXGBE_LINK_SPEED_10GB_FULL: 2229 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2230 break; 2231 case IXGBE_LINK_SPEED_2_5GB_FULL: 2232 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2233 break; 2234 case IXGBE_LINK_SPEED_1GB_FULL: 2235 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2236 break; 2237 } 2238 #endif 2239 2240 /* If nothing is recognized... */ 2241 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2242 ifmr->ifm_active |= IFM_UNKNOWN; 2243 2244 /* Display current flow control setting used on link */ 2245 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2246 hw->fc.current_mode == ixgbe_fc_full) 2247 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2248 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2249 hw->fc.current_mode == ixgbe_fc_full) 2250 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2251 } /* ixgbe_media_status */ 2252 2253 /************************************************************************ 2254 * ixgbe_media_change - Media Ioctl callback 2255 * 2256 * Called when the user changes speed/duplex using 2257 * media/mediopt option with ifconfig. 2258 ************************************************************************/ 2259 static int 2260 ixgbe_if_media_change(if_ctx_t ctx) 2261 { 2262 struct adapter *adapter = iflib_get_softc(ctx); 2263 struct ifmedia *ifm = iflib_get_media(ctx); 2264 struct ixgbe_hw *hw = &adapter->hw; 2265 ixgbe_link_speed speed = 0; 2266 2267 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2268 2269 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2270 return (EINVAL); 2271 2272 if (hw->phy.media_type == ixgbe_media_type_backplane) 2273 return (EPERM); 2274 2275 /* 2276 * We don't actually need to check against the supported 2277 * media types of the adapter; ifmedia will take care of 2278 * that for us. 2279 */ 2280 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2281 case IFM_AUTO: 2282 case IFM_10G_T: 2283 speed |= IXGBE_LINK_SPEED_100_FULL; 2284 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2285 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2286 break; 2287 case IFM_10G_LRM: 2288 case IFM_10G_LR: 2289 #ifndef IFM_ETH_XTYPE 2290 case IFM_10G_SR: /* KR, too */ 2291 case IFM_10G_CX4: /* KX4 */ 2292 #else 2293 case IFM_10G_KR: 2294 case IFM_10G_KX4: 2295 #endif 2296 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2297 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2298 break; 2299 #ifndef IFM_ETH_XTYPE 2300 case IFM_1000_CX: /* KX */ 2301 #else 2302 case IFM_1000_KX: 2303 #endif 2304 case IFM_1000_LX: 2305 case IFM_1000_SX: 2306 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2307 break; 2308 case IFM_1000_T: 2309 speed |= IXGBE_LINK_SPEED_100_FULL; 2310 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2311 break; 2312 case IFM_10G_TWINAX: 2313 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2314 break; 2315 case IFM_100_TX: 2316 speed |= IXGBE_LINK_SPEED_100_FULL; 2317 break; 2318 case IFM_10_T: 2319 speed |= IXGBE_LINK_SPEED_10_FULL; 2320 break; 2321 default: 2322 goto invalid; 2323 } 2324 2325 hw->mac.autotry_restart = TRUE; 2326 hw->mac.ops.setup_link(hw, speed, TRUE); 2327 adapter->advertise = 2328 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 2329 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 2330 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 2331 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 2332 2333 return (0); 2334 2335 invalid: 2336 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2337 2338 return (EINVAL); 2339 } /* ixgbe_if_media_change */ 2340 2341 /************************************************************************ 2342 * ixgbe_set_promisc 2343 ************************************************************************/ 2344 static int 2345 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2346 { 2347 struct adapter *adapter = iflib_get_softc(ctx); 2348 struct ifnet *ifp = iflib_get_ifp(ctx); 2349 u32 rctl; 2350 int mcnt = 0; 2351 2352 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2353 rctl &= (~IXGBE_FCTRL_UPE); 2354 if (ifp->if_flags & IFF_ALLMULTI) 2355 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2356 else { 2357 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES); 2358 } 2359 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2360 rctl &= (~IXGBE_FCTRL_MPE); 2361 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2362 2363 if (ifp->if_flags & IFF_PROMISC) { 2364 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2365 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2366 } else if (ifp->if_flags & IFF_ALLMULTI) { 2367 rctl |= IXGBE_FCTRL_MPE; 2368 rctl &= ~IXGBE_FCTRL_UPE; 2369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2370 } 2371 return (0); 2372 } /* ixgbe_if_promisc_set */ 2373 2374 /************************************************************************ 2375 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2376 ************************************************************************/ 2377 static int 2378 ixgbe_msix_link(void *arg) 2379 { 2380 struct adapter *adapter = arg; 2381 struct ixgbe_hw *hw = &adapter->hw; 2382 u32 eicr, eicr_mask; 2383 s32 retval; 2384 2385 ++adapter->link_irq; 2386 2387 /* Pause other interrupts */ 2388 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2389 2390 /* First get the cause */ 2391 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2392 /* Be sure the queue bits are not cleared */ 2393 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2394 /* Clear interrupt with write */ 2395 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2396 2397 /* Link status change */ 2398 if (eicr & IXGBE_EICR_LSC) { 2399 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2400 iflib_admin_intr_deferred(adapter->ctx); 2401 } 2402 2403 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 2404 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 2405 (eicr & IXGBE_EICR_FLOW_DIR)) { 2406 /* This is probably overkill :) */ 2407 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 2408 return (FILTER_HANDLED); 2409 /* Disable the interrupt */ 2410 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2411 GROUPTASK_ENQUEUE(&adapter->fdir_task); 2412 } else 2413 if (eicr & IXGBE_EICR_ECC) { 2414 device_printf(iflib_get_dev(adapter->ctx), 2415 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); 2416 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2417 } 2418 2419 /* Check for over temp condition */ 2420 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2421 switch (adapter->hw.mac.type) { 2422 case ixgbe_mac_X550EM_a: 2423 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2424 break; 2425 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2426 IXGBE_EICR_GPI_SDP0_X550EM_a); 2427 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2428 IXGBE_EICR_GPI_SDP0_X550EM_a); 2429 retval = hw->phy.ops.check_overtemp(hw); 2430 if (retval != IXGBE_ERR_OVERTEMP) 2431 break; 2432 device_printf(iflib_get_dev(adapter->ctx), 2433 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2434 device_printf(iflib_get_dev(adapter->ctx), 2435 "System shutdown required!\n"); 2436 break; 2437 default: 2438 if (!(eicr & IXGBE_EICR_TS)) 2439 break; 2440 retval = hw->phy.ops.check_overtemp(hw); 2441 if (retval != IXGBE_ERR_OVERTEMP) 2442 break; 2443 device_printf(iflib_get_dev(adapter->ctx), 2444 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2445 device_printf(iflib_get_dev(adapter->ctx), 2446 "System shutdown required!\n"); 2447 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2448 break; 2449 } 2450 } 2451 2452 /* Check for VF message */ 2453 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 2454 (eicr & IXGBE_EICR_MAILBOX)) 2455 GROUPTASK_ENQUEUE(&adapter->mbx_task); 2456 } 2457 2458 if (ixgbe_is_sfp(hw)) { 2459 /* Pluggable optics-related interrupt */ 2460 if (hw->mac.type >= ixgbe_mac_X540) 2461 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2462 else 2463 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2464 2465 if (eicr & eicr_mask) { 2466 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2467 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1)) 2468 GROUPTASK_ENQUEUE(&adapter->mod_task); 2469 } 2470 2471 if ((hw->mac.type == ixgbe_mac_82599EB) && 2472 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2473 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2474 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2475 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1)) 2476 GROUPTASK_ENQUEUE(&adapter->msf_task); 2477 } 2478 } 2479 2480 /* Check for fan failure */ 2481 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2482 ixgbe_check_fan_failure(adapter, eicr, TRUE); 2483 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2484 } 2485 2486 /* External PHY interrupt */ 2487 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2488 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2489 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2490 GROUPTASK_ENQUEUE(&adapter->phy_task); 2491 } 2492 2493 /* Re-enable other interrupts */ 2494 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 2495 2496 return (FILTER_HANDLED); 2497 } /* ixgbe_msix_link */ 2498 2499 /************************************************************************ 2500 * ixgbe_sysctl_interrupt_rate_handler 2501 ************************************************************************/ 2502 static int 2503 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2504 { 2505 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2506 int error; 2507 unsigned int reg, usec, rate; 2508 2509 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 2510 usec = ((reg & 0x0FF8) >> 3); 2511 if (usec > 0) 2512 rate = 500000 / usec; 2513 else 2514 rate = 0; 2515 error = sysctl_handle_int(oidp, &rate, 0, req); 2516 if (error || !req->newptr) 2517 return error; 2518 reg &= ~0xfff; /* default, no limitation */ 2519 ixgbe_max_interrupt_rate = 0; 2520 if (rate > 0 && rate < 500000) { 2521 if (rate < 1000) 2522 rate = 1000; 2523 ixgbe_max_interrupt_rate = rate; 2524 reg |= ((4000000/rate) & 0xff8); 2525 } 2526 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 2527 2528 return (0); 2529 } /* ixgbe_sysctl_interrupt_rate_handler */ 2530 2531 /************************************************************************ 2532 * ixgbe_add_device_sysctls 2533 ************************************************************************/ 2534 static void 2535 ixgbe_add_device_sysctls(if_ctx_t ctx) 2536 { 2537 struct adapter *adapter = iflib_get_softc(ctx); 2538 device_t dev = iflib_get_dev(ctx); 2539 struct ixgbe_hw *hw = &adapter->hw; 2540 struct sysctl_oid_list *child; 2541 struct sysctl_ctx_list *ctx_list; 2542 2543 ctx_list = device_get_sysctl_ctx(dev); 2544 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2545 2546 /* Sysctls for all devices */ 2547 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2548 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I", 2549 IXGBE_SYSCTL_DESC_SET_FC); 2550 2551 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2552 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I", 2553 IXGBE_SYSCTL_DESC_ADV_SPEED); 2554 2555 #ifdef IXGBE_DEBUG 2556 /* testing sysctls (for all devices) */ 2557 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2558 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state, 2559 "I", "PCI Power State"); 2560 2561 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2562 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, 2563 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2564 #endif 2565 /* for X550 series devices */ 2566 if (hw->mac.type >= ixgbe_mac_X550) 2567 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2568 CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac, 2569 "I", "DMA Coalesce"); 2570 2571 /* for WoL-capable devices */ 2572 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2573 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2574 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2575 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2576 2577 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2578 CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc, 2579 "I", "Enable/Disable Wake Up Filters"); 2580 } 2581 2582 /* for X552/X557-AT devices */ 2583 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2584 struct sysctl_oid *phy_node; 2585 struct sysctl_oid_list *phy_list; 2586 2587 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2588 CTLFLAG_RD, NULL, "External PHY sysctls"); 2589 phy_list = SYSCTL_CHILDREN(phy_node); 2590 2591 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2592 CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp, 2593 "I", "Current External PHY Temperature (Celsius)"); 2594 2595 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2596 "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, 2597 ixgbe_sysctl_phy_overtemp_occurred, "I", 2598 "External PHY High Temperature Event Occurred"); 2599 } 2600 2601 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 2602 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2603 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2604 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2605 } 2606 } /* ixgbe_add_device_sysctls */ 2607 2608 /************************************************************************ 2609 * ixgbe_allocate_pci_resources 2610 ************************************************************************/ 2611 static int 2612 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2613 { 2614 struct adapter *adapter = iflib_get_softc(ctx); 2615 device_t dev = iflib_get_dev(ctx); 2616 int rid; 2617 2618 rid = PCIR_BAR(0); 2619 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2620 RF_ACTIVE); 2621 2622 if (!(adapter->pci_mem)) { 2623 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2624 return (ENXIO); 2625 } 2626 2627 /* Save bus_space values for READ/WRITE_REG macros */ 2628 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); 2629 adapter->osdep.mem_bus_space_handle = 2630 rman_get_bushandle(adapter->pci_mem); 2631 /* Set hw values for shared code */ 2632 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2633 2634 return (0); 2635 } /* ixgbe_allocate_pci_resources */ 2636 2637 /************************************************************************ 2638 * ixgbe_detach - Device removal routine 2639 * 2640 * Called when the driver is being removed. 2641 * Stops the adapter and deallocates all the resources 2642 * that were allocated for driver operation. 2643 * 2644 * return 0 on success, positive on failure 2645 ************************************************************************/ 2646 static int 2647 ixgbe_if_detach(if_ctx_t ctx) 2648 { 2649 struct adapter *adapter = iflib_get_softc(ctx); 2650 device_t dev = iflib_get_dev(ctx); 2651 u32 ctrl_ext; 2652 2653 INIT_DEBUGOUT("ixgbe_detach: begin"); 2654 2655 if (ixgbe_pci_iov_detach(dev) != 0) { 2656 device_printf(dev, "SR-IOV in use; detach first.\n"); 2657 return (EBUSY); 2658 } 2659 2660 iflib_config_gtask_deinit(&adapter->mod_task); 2661 iflib_config_gtask_deinit(&adapter->msf_task); 2662 iflib_config_gtask_deinit(&adapter->phy_task); 2663 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 2664 iflib_config_gtask_deinit(&adapter->mbx_task); 2665 2666 ixgbe_setup_low_power_mode(ctx); 2667 2668 /* let hardware know driver is unloading */ 2669 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 2670 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2671 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 2672 2673 ixgbe_free_pci_resources(ctx); 2674 free(adapter->mta, M_IXGBE); 2675 2676 return (0); 2677 } /* ixgbe_if_detach */ 2678 2679 /************************************************************************ 2680 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2681 * 2682 * Prepare the adapter/port for LPLU and/or WoL 2683 ************************************************************************/ 2684 static int 2685 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2686 { 2687 struct adapter *adapter = iflib_get_softc(ctx); 2688 struct ixgbe_hw *hw = &adapter->hw; 2689 device_t dev = iflib_get_dev(ctx); 2690 s32 error = 0; 2691 2692 if (!hw->wol_enabled) 2693 ixgbe_set_phy_power(hw, FALSE); 2694 2695 /* Limit power management flow to X550EM baseT */ 2696 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2697 hw->phy.ops.enter_lplu) { 2698 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2699 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2700 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2701 2702 /* 2703 * Clear Wake Up Status register to prevent any previous wakeup 2704 * events from waking us up immediately after we suspend. 2705 */ 2706 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2707 2708 /* 2709 * Program the Wakeup Filter Control register with user filter 2710 * settings 2711 */ 2712 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 2713 2714 /* Enable wakeups and power management in Wakeup Control */ 2715 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2716 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2717 2718 /* X550EM baseT adapters need a special LPLU flow */ 2719 hw->phy.reset_disable = TRUE; 2720 ixgbe_if_stop(ctx); 2721 error = hw->phy.ops.enter_lplu(hw); 2722 if (error) 2723 device_printf(dev, "Error entering LPLU: %d\n", error); 2724 hw->phy.reset_disable = FALSE; 2725 } else { 2726 /* Just stop for other adapters */ 2727 ixgbe_if_stop(ctx); 2728 } 2729 2730 return error; 2731 } /* ixgbe_setup_low_power_mode */ 2732 2733 /************************************************************************ 2734 * ixgbe_shutdown - Shutdown entry point 2735 ************************************************************************/ 2736 static int 2737 ixgbe_if_shutdown(if_ctx_t ctx) 2738 { 2739 int error = 0; 2740 2741 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2742 2743 error = ixgbe_setup_low_power_mode(ctx); 2744 2745 return (error); 2746 } /* ixgbe_if_shutdown */ 2747 2748 /************************************************************************ 2749 * ixgbe_suspend 2750 * 2751 * From D0 to D3 2752 ************************************************************************/ 2753 static int 2754 ixgbe_if_suspend(if_ctx_t ctx) 2755 { 2756 int error = 0; 2757 2758 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2759 2760 error = ixgbe_setup_low_power_mode(ctx); 2761 2762 return (error); 2763 } /* ixgbe_if_suspend */ 2764 2765 /************************************************************************ 2766 * ixgbe_resume 2767 * 2768 * From D3 to D0 2769 ************************************************************************/ 2770 static int 2771 ixgbe_if_resume(if_ctx_t ctx) 2772 { 2773 struct adapter *adapter = iflib_get_softc(ctx); 2774 device_t dev = iflib_get_dev(ctx); 2775 struct ifnet *ifp = iflib_get_ifp(ctx); 2776 struct ixgbe_hw *hw = &adapter->hw; 2777 u32 wus; 2778 2779 INIT_DEBUGOUT("ixgbe_resume: begin"); 2780 2781 /* Read & clear WUS register */ 2782 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2783 if (wus) 2784 device_printf(dev, "Woken up by (WUS): %#010x\n", 2785 IXGBE_READ_REG(hw, IXGBE_WUS)); 2786 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2787 /* And clear WUFC until next low-power transition */ 2788 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2789 2790 /* 2791 * Required after D3->D0 transition; 2792 * will re-advertise all previous advertised speeds 2793 */ 2794 if (ifp->if_flags & IFF_UP) 2795 ixgbe_if_init(ctx); 2796 2797 return (0); 2798 } /* ixgbe_if_resume */ 2799 2800 /************************************************************************ 2801 * ixgbe_if_mtu_set - Ioctl mtu entry point 2802 * 2803 * Return 0 on success, EINVAL on failure 2804 ************************************************************************/ 2805 static int 2806 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2807 { 2808 struct adapter *adapter = iflib_get_softc(ctx); 2809 int error = 0; 2810 2811 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2812 2813 if (mtu > IXGBE_MAX_MTU) { 2814 error = EINVAL; 2815 } else { 2816 adapter->max_frame_size = mtu + IXGBE_MTU_HDR; 2817 } 2818 2819 return error; 2820 } /* ixgbe_if_mtu_set */ 2821 2822 /************************************************************************ 2823 * ixgbe_if_crcstrip_set 2824 ************************************************************************/ 2825 static void 2826 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2827 { 2828 struct adapter *sc = iflib_get_softc(ctx); 2829 struct ixgbe_hw *hw = &sc->hw; 2830 /* crc stripping is set in two places: 2831 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2832 * IXGBE_RDRXCTL (set by the original driver in 2833 * ixgbe_setup_hw_rsc() called in init_locked. 2834 * We disable the setting when netmap is compiled in). 2835 * We update the values here, but also in ixgbe.c because 2836 * init_locked sometimes is called outside our control. 2837 */ 2838 uint32_t hl, rxc; 2839 2840 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2841 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2842 #ifdef NETMAP 2843 if (netmap_verbose) 2844 D("%s read HLREG 0x%x rxc 0x%x", 2845 onoff ? "enter" : "exit", hl, rxc); 2846 #endif 2847 /* hw requirements ... */ 2848 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2849 rxc |= IXGBE_RDRXCTL_RSCACKC; 2850 if (onoff && !crcstrip) { 2851 /* keep the crc. Fast rx */ 2852 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 2853 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 2854 } else { 2855 /* reset default mode */ 2856 hl |= IXGBE_HLREG0_RXCRCSTRP; 2857 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 2858 } 2859 #ifdef NETMAP 2860 if (netmap_verbose) 2861 D("%s write HLREG 0x%x rxc 0x%x", 2862 onoff ? "enter" : "exit", hl, rxc); 2863 #endif 2864 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 2865 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 2866 } /* ixgbe_if_crcstrip_set */ 2867 2868 /********************************************************************* 2869 * ixgbe_if_init - Init entry point 2870 * 2871 * Used in two ways: It is used by the stack as an init 2872 * entry point in network interface structure. It is also 2873 * used by the driver as a hw/sw initialization routine to 2874 * get to a consistent state. 2875 * 2876 * Return 0 on success, positive on failure 2877 **********************************************************************/ 2878 void 2879 ixgbe_if_init(if_ctx_t ctx) 2880 { 2881 struct adapter *adapter = iflib_get_softc(ctx); 2882 struct ifnet *ifp = iflib_get_ifp(ctx); 2883 device_t dev = iflib_get_dev(ctx); 2884 struct ixgbe_hw *hw = &adapter->hw; 2885 struct ix_rx_queue *rx_que; 2886 struct ix_tx_queue *tx_que; 2887 u32 txdctl, mhadd; 2888 u32 rxdctl, rxctrl; 2889 u32 ctrl_ext; 2890 2891 int i, j, err; 2892 2893 INIT_DEBUGOUT("ixgbe_if_init: begin"); 2894 2895 /* Queue indices may change with IOV mode */ 2896 ixgbe_align_all_queue_indices(adapter); 2897 2898 /* reprogram the RAR[0] in case user changed it. */ 2899 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 2900 2901 /* Get the latest mac address, User can use a LAA */ 2902 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 2903 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 2904 hw->addr_ctrl.rar_used_count = 1; 2905 2906 ixgbe_init_hw(hw); 2907 2908 ixgbe_initialize_iov(adapter); 2909 2910 ixgbe_initialize_transmit_units(ctx); 2911 2912 /* Setup Multicast table */ 2913 ixgbe_if_multi_set(ctx); 2914 2915 /* Determine the correct mbuf pool, based on frame size */ 2916 if (adapter->max_frame_size <= MCLBYTES) 2917 adapter->rx_mbuf_sz = MCLBYTES; 2918 else 2919 adapter->rx_mbuf_sz = MJUMPAGESIZE; 2920 2921 /* Configure RX settings */ 2922 ixgbe_initialize_receive_units(ctx); 2923 2924 /* Enable SDP & MSI-X interrupts based on adapter */ 2925 ixgbe_config_gpie(adapter); 2926 2927 /* Set MTU size */ 2928 if (ifp->if_mtu > ETHERMTU) { 2929 /* aka IXGBE_MAXFRS on 82599 and newer */ 2930 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2931 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2932 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 2933 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 2934 } 2935 2936 /* Now enable all the queues */ 2937 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 2938 struct tx_ring *txr = &tx_que->txr; 2939 2940 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 2941 txdctl |= IXGBE_TXDCTL_ENABLE; 2942 /* Set WTHRESH to 8, burst writeback */ 2943 txdctl |= (8 << 16); 2944 /* 2945 * When the internal queue falls below PTHRESH (32), 2946 * start prefetching as long as there are at least 2947 * HTHRESH (1) buffers ready. The values are taken 2948 * from the Intel linux driver 3.8.21. 2949 * Prefetching enables tx line rate even with 1 queue. 2950 */ 2951 txdctl |= (32 << 0) | (1 << 8); 2952 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 2953 } 2954 2955 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 2956 struct rx_ring *rxr = &rx_que->rxr; 2957 2958 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2959 if (hw->mac.type == ixgbe_mac_82598EB) { 2960 /* 2961 * PTHRESH = 21 2962 * HTHRESH = 4 2963 * WTHRESH = 8 2964 */ 2965 rxdctl &= ~0x3FFFFF; 2966 rxdctl |= 0x080420; 2967 } 2968 rxdctl |= IXGBE_RXDCTL_ENABLE; 2969 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 2970 for (j = 0; j < 10; j++) { 2971 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 2972 IXGBE_RXDCTL_ENABLE) 2973 break; 2974 else 2975 msec_delay(1); 2976 } 2977 wmb(); 2978 } 2979 2980 /* Enable Receive engine */ 2981 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2982 if (hw->mac.type == ixgbe_mac_82598EB) 2983 rxctrl |= IXGBE_RXCTRL_DMBYPS; 2984 rxctrl |= IXGBE_RXCTRL_RXEN; 2985 ixgbe_enable_rx_dma(hw, rxctrl); 2986 2987 /* Set up MSI/MSI-X routing */ 2988 if (ixgbe_enable_msix) { 2989 ixgbe_configure_ivars(adapter); 2990 /* Set up auto-mask */ 2991 if (hw->mac.type == ixgbe_mac_82598EB) 2992 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2993 else { 2994 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 2995 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 2996 } 2997 } else { /* Simple settings for Legacy/MSI */ 2998 ixgbe_set_ivar(adapter, 0, 0, 0); 2999 ixgbe_set_ivar(adapter, 0, 0, 1); 3000 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3001 } 3002 3003 ixgbe_init_fdir(adapter); 3004 3005 /* 3006 * Check on any SFP devices that 3007 * need to be kick-started 3008 */ 3009 if (hw->phy.type == ixgbe_phy_none) { 3010 err = hw->phy.ops.identify(hw); 3011 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3012 device_printf(dev, 3013 "Unsupported SFP+ module type was detected.\n"); 3014 return; 3015 } 3016 } 3017 3018 /* Set moderation on the Link interrupt */ 3019 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 3020 3021 /* Enable power to the phy. */ 3022 ixgbe_set_phy_power(hw, TRUE); 3023 3024 /* Config/Enable Link */ 3025 ixgbe_config_link(adapter); 3026 3027 /* Hardware Packet Buffer & Flow Control setup */ 3028 ixgbe_config_delay_values(adapter); 3029 3030 /* Initialize the FC settings */ 3031 ixgbe_start_hw(hw); 3032 3033 /* Set up VLAN support and filter */ 3034 ixgbe_setup_vlan_hw_support(ctx); 3035 3036 /* Setup DMA Coalescing */ 3037 ixgbe_config_dmac(adapter); 3038 3039 /* And now turn on interrupts */ 3040 ixgbe_if_enable_intr(ctx); 3041 3042 /* Enable the use of the MBX by the VF's */ 3043 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 3044 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3045 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3046 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3047 } 3048 3049 } /* ixgbe_init_locked */ 3050 3051 /************************************************************************ 3052 * ixgbe_set_ivar 3053 * 3054 * Setup the correct IVAR register for a particular MSI-X interrupt 3055 * (yes this is all very magic and confusing :) 3056 * - entry is the register array entry 3057 * - vector is the MSI-X vector for this queue 3058 * - type is RX/TX/MISC 3059 ************************************************************************/ 3060 static void 3061 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3062 { 3063 struct ixgbe_hw *hw = &adapter->hw; 3064 u32 ivar, index; 3065 3066 vector |= IXGBE_IVAR_ALLOC_VAL; 3067 3068 switch (hw->mac.type) { 3069 case ixgbe_mac_82598EB: 3070 if (type == -1) 3071 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3072 else 3073 entry += (type * 64); 3074 index = (entry >> 2) & 0x1F; 3075 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3076 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3077 ivar |= (vector << (8 * (entry & 0x3))); 3078 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3079 break; 3080 case ixgbe_mac_82599EB: 3081 case ixgbe_mac_X540: 3082 case ixgbe_mac_X550: 3083 case ixgbe_mac_X550EM_x: 3084 case ixgbe_mac_X550EM_a: 3085 if (type == -1) { /* MISC IVAR */ 3086 index = (entry & 1) * 8; 3087 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3088 ivar &= ~(0xFF << index); 3089 ivar |= (vector << index); 3090 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3091 } else { /* RX/TX IVARS */ 3092 index = (16 * (entry & 1)) + (8 * type); 3093 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3094 ivar &= ~(0xFF << index); 3095 ivar |= (vector << index); 3096 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3097 } 3098 default: 3099 break; 3100 } 3101 } /* ixgbe_set_ivar */ 3102 3103 /************************************************************************ 3104 * ixgbe_configure_ivars 3105 ************************************************************************/ 3106 static void 3107 ixgbe_configure_ivars(struct adapter *adapter) 3108 { 3109 struct ix_rx_queue *rx_que = adapter->rx_queues; 3110 struct ix_tx_queue *tx_que = adapter->tx_queues; 3111 u32 newitr; 3112 3113 if (ixgbe_max_interrupt_rate > 0) 3114 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3115 else { 3116 /* 3117 * Disable DMA coalescing if interrupt moderation is 3118 * disabled. 3119 */ 3120 adapter->dmac = 0; 3121 newitr = 0; 3122 } 3123 3124 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { 3125 struct rx_ring *rxr = &rx_que->rxr; 3126 3127 /* First the RX queue entry */ 3128 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0); 3129 3130 /* Set an Initial EITR value */ 3131 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr); 3132 } 3133 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 3134 struct tx_ring *txr = &tx_que->txr; 3135 3136 /* ... and the TX */ 3137 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1); 3138 } 3139 /* For the Link interrupt */ 3140 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3141 } /* ixgbe_configure_ivars */ 3142 3143 /************************************************************************ 3144 * ixgbe_config_gpie 3145 ************************************************************************/ 3146 static void 3147 ixgbe_config_gpie(struct adapter *adapter) 3148 { 3149 struct ixgbe_hw *hw = &adapter->hw; 3150 u32 gpie; 3151 3152 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3153 3154 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3155 /* Enable Enhanced MSI-X mode */ 3156 gpie |= IXGBE_GPIE_MSIX_MODE 3157 | IXGBE_GPIE_EIAME 3158 | IXGBE_GPIE_PBA_SUPPORT 3159 | IXGBE_GPIE_OCD; 3160 } 3161 3162 /* Fan Failure Interrupt */ 3163 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3164 gpie |= IXGBE_SDP1_GPIEN; 3165 3166 /* Thermal Sensor Interrupt */ 3167 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3168 gpie |= IXGBE_SDP0_GPIEN_X540; 3169 3170 /* Link detection */ 3171 switch (hw->mac.type) { 3172 case ixgbe_mac_82599EB: 3173 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3174 break; 3175 case ixgbe_mac_X550EM_x: 3176 case ixgbe_mac_X550EM_a: 3177 gpie |= IXGBE_SDP0_GPIEN_X540; 3178 break; 3179 default: 3180 break; 3181 } 3182 3183 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3184 3185 } /* ixgbe_config_gpie */ 3186 3187 /************************************************************************ 3188 * ixgbe_config_delay_values 3189 * 3190 * Requires adapter->max_frame_size to be set. 3191 ************************************************************************/ 3192 static void 3193 ixgbe_config_delay_values(struct adapter *adapter) 3194 { 3195 struct ixgbe_hw *hw = &adapter->hw; 3196 u32 rxpb, frame, size, tmp; 3197 3198 frame = adapter->max_frame_size; 3199 3200 /* Calculate High Water */ 3201 switch (hw->mac.type) { 3202 case ixgbe_mac_X540: 3203 case ixgbe_mac_X550: 3204 case ixgbe_mac_X550EM_x: 3205 case ixgbe_mac_X550EM_a: 3206 tmp = IXGBE_DV_X540(frame, frame); 3207 break; 3208 default: 3209 tmp = IXGBE_DV(frame, frame); 3210 break; 3211 } 3212 size = IXGBE_BT2KB(tmp); 3213 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3214 hw->fc.high_water[0] = rxpb - size; 3215 3216 /* Now calculate Low Water */ 3217 switch (hw->mac.type) { 3218 case ixgbe_mac_X540: 3219 case ixgbe_mac_X550: 3220 case ixgbe_mac_X550EM_x: 3221 case ixgbe_mac_X550EM_a: 3222 tmp = IXGBE_LOW_DV_X540(frame); 3223 break; 3224 default: 3225 tmp = IXGBE_LOW_DV(frame); 3226 break; 3227 } 3228 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3229 3230 hw->fc.pause_time = IXGBE_FC_PAUSE; 3231 hw->fc.send_xon = TRUE; 3232 } /* ixgbe_config_delay_values */ 3233 3234 /************************************************************************ 3235 * ixgbe_set_multi - Multicast Update 3236 * 3237 * Called whenever multicast address list is updated. 3238 ************************************************************************/ 3239 static int 3240 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count) 3241 { 3242 struct adapter *adapter = arg; 3243 struct ixgbe_mc_addr *mta = adapter->mta; 3244 3245 if (ifma->ifma_addr->sa_family != AF_LINK) 3246 return (0); 3247 if (count == MAX_NUM_MULTICAST_ADDRESSES) 3248 return (0); 3249 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 3250 mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3251 mta[count].vmdq = adapter->pool; 3252 3253 return (1); 3254 } /* ixgbe_mc_filter_apply */ 3255 3256 static void 3257 ixgbe_if_multi_set(if_ctx_t ctx) 3258 { 3259 struct adapter *adapter = iflib_get_softc(ctx); 3260 struct ixgbe_mc_addr *mta; 3261 struct ifnet *ifp = iflib_get_ifp(ctx); 3262 u8 *update_ptr; 3263 int mcnt = 0; 3264 u32 fctrl; 3265 3266 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3267 3268 mta = adapter->mta; 3269 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3270 3271 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter); 3272 3273 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3274 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3275 if (ifp->if_flags & IFF_PROMISC) 3276 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3277 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3278 ifp->if_flags & IFF_ALLMULTI) { 3279 fctrl |= IXGBE_FCTRL_MPE; 3280 fctrl &= ~IXGBE_FCTRL_UPE; 3281 } else 3282 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3283 3284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 3285 3286 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3287 update_ptr = (u8 *)mta; 3288 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 3289 ixgbe_mc_array_itr, TRUE); 3290 } 3291 3292 } /* ixgbe_if_multi_set */ 3293 3294 /************************************************************************ 3295 * ixgbe_mc_array_itr 3296 * 3297 * An iterator function needed by the multicast shared code. 3298 * It feeds the shared code routine the addresses in the 3299 * array of ixgbe_set_multi() one by one. 3300 ************************************************************************/ 3301 static u8 * 3302 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3303 { 3304 struct ixgbe_mc_addr *mta; 3305 3306 mta = (struct ixgbe_mc_addr *)*update_ptr; 3307 *vmdq = mta->vmdq; 3308 3309 *update_ptr = (u8*)(mta + 1); 3310 3311 return (mta->addr); 3312 } /* ixgbe_mc_array_itr */ 3313 3314 /************************************************************************ 3315 * ixgbe_local_timer - Timer routine 3316 * 3317 * Checks for link status, updates statistics, 3318 * and runs the watchdog check. 3319 ************************************************************************/ 3320 static void 3321 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3322 { 3323 struct adapter *adapter = iflib_get_softc(ctx); 3324 3325 if (qid != 0) 3326 return; 3327 3328 /* Check for pluggable optics */ 3329 if (adapter->sfp_probe) 3330 if (!ixgbe_sfp_probe(ctx)) 3331 return; /* Nothing to do */ 3332 3333 ixgbe_check_link(&adapter->hw, &adapter->link_speed, 3334 &adapter->link_up, 0); 3335 3336 /* Fire off the adminq task */ 3337 iflib_admin_intr_deferred(ctx); 3338 3339 } /* ixgbe_if_timer */ 3340 3341 /************************************************************************ 3342 * ixgbe_sfp_probe 3343 * 3344 * Determine if a port had optics inserted. 3345 ************************************************************************/ 3346 static bool 3347 ixgbe_sfp_probe(if_ctx_t ctx) 3348 { 3349 struct adapter *adapter = iflib_get_softc(ctx); 3350 struct ixgbe_hw *hw = &adapter->hw; 3351 device_t dev = iflib_get_dev(ctx); 3352 bool result = FALSE; 3353 3354 if ((hw->phy.type == ixgbe_phy_nl) && 3355 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3356 s32 ret = hw->phy.ops.identify_sfp(hw); 3357 if (ret) 3358 goto out; 3359 ret = hw->phy.ops.reset(hw); 3360 adapter->sfp_probe = FALSE; 3361 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3362 device_printf(dev, "Unsupported SFP+ module detected!"); 3363 device_printf(dev, 3364 "Reload driver with supported module.\n"); 3365 goto out; 3366 } else 3367 device_printf(dev, "SFP+ module detected!\n"); 3368 /* We now have supported optics */ 3369 result = TRUE; 3370 } 3371 out: 3372 3373 return (result); 3374 } /* ixgbe_sfp_probe */ 3375 3376 /************************************************************************ 3377 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3378 ************************************************************************/ 3379 static void 3380 ixgbe_handle_mod(void *context) 3381 { 3382 if_ctx_t ctx = context; 3383 struct adapter *adapter = iflib_get_softc(ctx); 3384 struct ixgbe_hw *hw = &adapter->hw; 3385 device_t dev = iflib_get_dev(ctx); 3386 u32 err, cage_full = 0; 3387 3388 adapter->sfp_reinit = 1; 3389 if (adapter->hw.need_crosstalk_fix) { 3390 switch (hw->mac.type) { 3391 case ixgbe_mac_82599EB: 3392 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3393 IXGBE_ESDP_SDP2; 3394 break; 3395 case ixgbe_mac_X550EM_x: 3396 case ixgbe_mac_X550EM_a: 3397 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3398 IXGBE_ESDP_SDP0; 3399 break; 3400 default: 3401 break; 3402 } 3403 3404 if (!cage_full) 3405 goto handle_mod_out; 3406 } 3407 3408 err = hw->phy.ops.identify_sfp(hw); 3409 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3410 device_printf(dev, 3411 "Unsupported SFP+ module type was detected.\n"); 3412 goto handle_mod_out; 3413 } 3414 3415 if (hw->mac.type == ixgbe_mac_82598EB) 3416 err = hw->phy.ops.reset(hw); 3417 else 3418 err = hw->mac.ops.setup_sfp(hw); 3419 3420 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3421 device_printf(dev, 3422 "Setup failure - unsupported SFP+ module type.\n"); 3423 goto handle_mod_out; 3424 } 3425 GROUPTASK_ENQUEUE(&adapter->msf_task); 3426 return; 3427 3428 handle_mod_out: 3429 adapter->sfp_reinit = 0; 3430 } /* ixgbe_handle_mod */ 3431 3432 3433 /************************************************************************ 3434 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3435 ************************************************************************/ 3436 static void 3437 ixgbe_handle_msf(void *context) 3438 { 3439 if_ctx_t ctx = context; 3440 struct adapter *adapter = iflib_get_softc(ctx); 3441 struct ixgbe_hw *hw = &adapter->hw; 3442 u32 autoneg; 3443 bool negotiate; 3444 3445 if (adapter->sfp_reinit != 1) 3446 return; 3447 3448 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3449 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 3450 3451 autoneg = hw->phy.autoneg_advertised; 3452 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3453 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3454 if (hw->mac.ops.setup_link) 3455 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3456 3457 /* Adjust media types shown in ifconfig */ 3458 ifmedia_removeall(adapter->media); 3459 ixgbe_add_media_types(adapter->ctx); 3460 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 3461 3462 adapter->sfp_reinit = 0; 3463 } /* ixgbe_handle_msf */ 3464 3465 /************************************************************************ 3466 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3467 ************************************************************************/ 3468 static void 3469 ixgbe_handle_phy(void *context) 3470 { 3471 if_ctx_t ctx = context; 3472 struct adapter *adapter = iflib_get_softc(ctx); 3473 struct ixgbe_hw *hw = &adapter->hw; 3474 int error; 3475 3476 error = hw->phy.ops.handle_lasi(hw); 3477 if (error == IXGBE_ERR_OVERTEMP) 3478 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3479 else if (error) 3480 device_printf(adapter->dev, 3481 "Error handling LASI interrupt: %d\n", error); 3482 } /* ixgbe_handle_phy */ 3483 3484 /************************************************************************ 3485 * ixgbe_if_stop - Stop the hardware 3486 * 3487 * Disables all traffic on the adapter by issuing a 3488 * global reset on the MAC and deallocates TX/RX buffers. 3489 ************************************************************************/ 3490 static void 3491 ixgbe_if_stop(if_ctx_t ctx) 3492 { 3493 struct adapter *adapter = iflib_get_softc(ctx); 3494 struct ixgbe_hw *hw = &adapter->hw; 3495 3496 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3497 3498 ixgbe_reset_hw(hw); 3499 hw->adapter_stopped = FALSE; 3500 ixgbe_stop_adapter(hw); 3501 if (hw->mac.type == ixgbe_mac_82599EB) 3502 ixgbe_stop_mac_link_on_d3_82599(hw); 3503 /* Turn off the laser - noop with no optics */ 3504 ixgbe_disable_tx_laser(hw); 3505 3506 /* Update the stack */ 3507 adapter->link_up = FALSE; 3508 ixgbe_if_update_admin_status(ctx); 3509 3510 /* reprogram the RAR[0] in case user changed it. */ 3511 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3512 3513 return; 3514 } /* ixgbe_if_stop */ 3515 3516 /************************************************************************ 3517 * ixgbe_update_link_status - Update OS on link state 3518 * 3519 * Note: Only updates the OS on the cached link state. 3520 * The real check of the hardware only happens with 3521 * a link interrupt. 3522 ************************************************************************/ 3523 static void 3524 ixgbe_if_update_admin_status(if_ctx_t ctx) 3525 { 3526 struct adapter *adapter = iflib_get_softc(ctx); 3527 device_t dev = iflib_get_dev(ctx); 3528 3529 if (adapter->link_up) { 3530 if (adapter->link_active == FALSE) { 3531 if (bootverbose) 3532 device_printf(dev, "Link is up %d Gbps %s \n", 3533 ((adapter->link_speed == 128) ? 10 : 1), 3534 "Full Duplex"); 3535 adapter->link_active = TRUE; 3536 /* Update any Flow Control changes */ 3537 ixgbe_fc_enable(&adapter->hw); 3538 /* Update DMA coalescing config */ 3539 ixgbe_config_dmac(adapter); 3540 /* should actually be negotiated value */ 3541 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3542 3543 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3544 ixgbe_ping_all_vfs(adapter); 3545 } 3546 } else { /* Link down */ 3547 if (adapter->link_active == TRUE) { 3548 if (bootverbose) 3549 device_printf(dev, "Link is Down\n"); 3550 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3551 adapter->link_active = FALSE; 3552 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3553 ixgbe_ping_all_vfs(adapter); 3554 } 3555 } 3556 3557 ixgbe_update_stats_counters(adapter); 3558 3559 /* Re-enable link interrupts */ 3560 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC); 3561 } /* ixgbe_if_update_admin_status */ 3562 3563 /************************************************************************ 3564 * ixgbe_config_dmac - Configure DMA Coalescing 3565 ************************************************************************/ 3566 static void 3567 ixgbe_config_dmac(struct adapter *adapter) 3568 { 3569 struct ixgbe_hw *hw = &adapter->hw; 3570 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3571 3572 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3573 return; 3574 3575 if (dcfg->watchdog_timer ^ adapter->dmac || 3576 dcfg->link_speed ^ adapter->link_speed) { 3577 dcfg->watchdog_timer = adapter->dmac; 3578 dcfg->fcoe_en = FALSE; 3579 dcfg->link_speed = adapter->link_speed; 3580 dcfg->num_tcs = 1; 3581 3582 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3583 dcfg->watchdog_timer, dcfg->link_speed); 3584 3585 hw->mac.ops.dmac_config(hw); 3586 } 3587 } /* ixgbe_config_dmac */ 3588 3589 /************************************************************************ 3590 * ixgbe_if_enable_intr 3591 ************************************************************************/ 3592 void 3593 ixgbe_if_enable_intr(if_ctx_t ctx) 3594 { 3595 struct adapter *adapter = iflib_get_softc(ctx); 3596 struct ixgbe_hw *hw = &adapter->hw; 3597 struct ix_rx_queue *que = adapter->rx_queues; 3598 u32 mask, fwsm; 3599 3600 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3601 3602 switch (adapter->hw.mac.type) { 3603 case ixgbe_mac_82599EB: 3604 mask |= IXGBE_EIMS_ECC; 3605 /* Temperature sensor on some adapters */ 3606 mask |= IXGBE_EIMS_GPI_SDP0; 3607 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3608 mask |= IXGBE_EIMS_GPI_SDP1; 3609 mask |= IXGBE_EIMS_GPI_SDP2; 3610 break; 3611 case ixgbe_mac_X540: 3612 /* Detect if Thermal Sensor is enabled */ 3613 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3614 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3615 mask |= IXGBE_EIMS_TS; 3616 mask |= IXGBE_EIMS_ECC; 3617 break; 3618 case ixgbe_mac_X550: 3619 /* MAC thermal sensor is automatically enabled */ 3620 mask |= IXGBE_EIMS_TS; 3621 mask |= IXGBE_EIMS_ECC; 3622 break; 3623 case ixgbe_mac_X550EM_x: 3624 case ixgbe_mac_X550EM_a: 3625 /* Some devices use SDP0 for important information */ 3626 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3627 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3628 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3629 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3630 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3631 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3632 mask |= IXGBE_EICR_GPI_SDP0_X540; 3633 mask |= IXGBE_EIMS_ECC; 3634 break; 3635 default: 3636 break; 3637 } 3638 3639 /* Enable Fan Failure detection */ 3640 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3641 mask |= IXGBE_EIMS_GPI_SDP1; 3642 /* Enable SR-IOV */ 3643 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3644 mask |= IXGBE_EIMS_MAILBOX; 3645 /* Enable Flow Director */ 3646 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 3647 mask |= IXGBE_EIMS_FLOW_DIR; 3648 3649 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3650 3651 /* With MSI-X we use auto clear */ 3652 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3653 mask = IXGBE_EIMS_ENABLE_MASK; 3654 /* Don't autoclear Link */ 3655 mask &= ~IXGBE_EIMS_OTHER; 3656 mask &= ~IXGBE_EIMS_LSC; 3657 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 3658 mask &= ~IXGBE_EIMS_MAILBOX; 3659 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3660 } 3661 3662 /* 3663 * Now enable all queues, this is done separately to 3664 * allow for handling the extended (beyond 32) MSI-X 3665 * vectors that can be used by 82599 3666 */ 3667 for (int i = 0; i < adapter->num_rx_queues; i++, que++) 3668 ixgbe_enable_queue(adapter, que->msix); 3669 3670 IXGBE_WRITE_FLUSH(hw); 3671 3672 } /* ixgbe_if_enable_intr */ 3673 3674 /************************************************************************ 3675 * ixgbe_disable_intr 3676 ************************************************************************/ 3677 static void 3678 ixgbe_if_disable_intr(if_ctx_t ctx) 3679 { 3680 struct adapter *adapter = iflib_get_softc(ctx); 3681 3682 if (adapter->intr_type == IFLIB_INTR_MSIX) 3683 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3684 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3685 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3686 } else { 3687 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3688 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3689 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3690 } 3691 IXGBE_WRITE_FLUSH(&adapter->hw); 3692 3693 } /* ixgbe_if_disable_intr */ 3694 3695 /************************************************************************ 3696 * ixgbe_if_rx_queue_intr_enable 3697 ************************************************************************/ 3698 static int 3699 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3700 { 3701 struct adapter *adapter = iflib_get_softc(ctx); 3702 struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; 3703 3704 ixgbe_enable_queue(adapter, que->rxr.me); 3705 3706 return (0); 3707 } /* ixgbe_if_rx_queue_intr_enable */ 3708 3709 /************************************************************************ 3710 * ixgbe_enable_queue 3711 ************************************************************************/ 3712 static void 3713 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 3714 { 3715 struct ixgbe_hw *hw = &adapter->hw; 3716 u64 queue = (u64)(1 << vector); 3717 u32 mask; 3718 3719 if (hw->mac.type == ixgbe_mac_82598EB) { 3720 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3721 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3722 } else { 3723 mask = (queue & 0xFFFFFFFF); 3724 if (mask) 3725 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3726 mask = (queue >> 32); 3727 if (mask) 3728 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3729 } 3730 } /* ixgbe_enable_queue */ 3731 3732 /************************************************************************ 3733 * ixgbe_disable_queue 3734 ************************************************************************/ 3735 static void 3736 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 3737 { 3738 struct ixgbe_hw *hw = &adapter->hw; 3739 u64 queue = (u64)(1 << vector); 3740 u32 mask; 3741 3742 if (hw->mac.type == ixgbe_mac_82598EB) { 3743 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3744 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3745 } else { 3746 mask = (queue & 0xFFFFFFFF); 3747 if (mask) 3748 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3749 mask = (queue >> 32); 3750 if (mask) 3751 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3752 } 3753 } /* ixgbe_disable_queue */ 3754 3755 /************************************************************************ 3756 * ixgbe_intr - Legacy Interrupt Service Routine 3757 ************************************************************************/ 3758 int 3759 ixgbe_intr(void *arg) 3760 { 3761 struct adapter *adapter = arg; 3762 struct ix_rx_queue *que = adapter->rx_queues; 3763 struct ixgbe_hw *hw = &adapter->hw; 3764 if_ctx_t ctx = adapter->ctx; 3765 u32 eicr, eicr_mask; 3766 3767 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3768 3769 ++que->irqs; 3770 if (eicr == 0) { 3771 ixgbe_if_enable_intr(ctx); 3772 return (FILTER_HANDLED); 3773 } 3774 3775 /* Check for fan failure */ 3776 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3777 (eicr & IXGBE_EICR_GPI_SDP1)) { 3778 device_printf(adapter->dev, 3779 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3780 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3781 } 3782 3783 /* Link status change */ 3784 if (eicr & IXGBE_EICR_LSC) { 3785 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3786 iflib_admin_intr_deferred(ctx); 3787 } 3788 3789 if (ixgbe_is_sfp(hw)) { 3790 /* Pluggable optics-related interrupt */ 3791 if (hw->mac.type >= ixgbe_mac_X540) 3792 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3793 else 3794 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3795 3796 if (eicr & eicr_mask) { 3797 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3798 GROUPTASK_ENQUEUE(&adapter->mod_task); 3799 } 3800 3801 if ((hw->mac.type == ixgbe_mac_82599EB) && 3802 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3803 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3804 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3805 if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1)) 3806 GROUPTASK_ENQUEUE(&adapter->msf_task); 3807 } 3808 } 3809 3810 /* External PHY interrupt */ 3811 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3812 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3813 GROUPTASK_ENQUEUE(&adapter->phy_task); 3814 3815 return (FILTER_SCHEDULE_THREAD); 3816 } /* ixgbe_intr */ 3817 3818 /************************************************************************ 3819 * ixgbe_free_pci_resources 3820 ************************************************************************/ 3821 static void 3822 ixgbe_free_pci_resources(if_ctx_t ctx) 3823 { 3824 struct adapter *adapter = iflib_get_softc(ctx); 3825 struct ix_rx_queue *que = adapter->rx_queues; 3826 device_t dev = iflib_get_dev(ctx); 3827 3828 /* Release all msix queue resources */ 3829 if (adapter->intr_type == IFLIB_INTR_MSIX) 3830 iflib_irq_free(ctx, &adapter->irq); 3831 3832 if (que != NULL) { 3833 for (int i = 0; i < adapter->num_rx_queues; i++, que++) { 3834 iflib_irq_free(ctx, &que->que_irq); 3835 } 3836 } 3837 3838 /* 3839 * Free link/admin interrupt 3840 */ 3841 if (adapter->pci_mem != NULL) 3842 bus_release_resource(dev, SYS_RES_MEMORY, 3843 PCIR_BAR(0), adapter->pci_mem); 3844 3845 } /* ixgbe_free_pci_resources */ 3846 3847 /************************************************************************ 3848 * ixgbe_sysctl_flowcntl 3849 * 3850 * SYSCTL wrapper around setting Flow Control 3851 ************************************************************************/ 3852 static int 3853 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 3854 { 3855 struct adapter *adapter; 3856 int error, fc; 3857 3858 adapter = (struct adapter *)arg1; 3859 fc = adapter->hw.fc.current_mode; 3860 3861 error = sysctl_handle_int(oidp, &fc, 0, req); 3862 if ((error) || (req->newptr == NULL)) 3863 return (error); 3864 3865 /* Don't bother if it's not changed */ 3866 if (fc == adapter->hw.fc.current_mode) 3867 return (0); 3868 3869 return ixgbe_set_flowcntl(adapter, fc); 3870 } /* ixgbe_sysctl_flowcntl */ 3871 3872 /************************************************************************ 3873 * ixgbe_set_flowcntl - Set flow control 3874 * 3875 * Flow control values: 3876 * 0 - off 3877 * 1 - rx pause 3878 * 2 - tx pause 3879 * 3 - full 3880 ************************************************************************/ 3881 static int 3882 ixgbe_set_flowcntl(struct adapter *adapter, int fc) 3883 { 3884 switch (fc) { 3885 case ixgbe_fc_rx_pause: 3886 case ixgbe_fc_tx_pause: 3887 case ixgbe_fc_full: 3888 adapter->hw.fc.requested_mode = fc; 3889 if (adapter->num_rx_queues > 1) 3890 ixgbe_disable_rx_drop(adapter); 3891 break; 3892 case ixgbe_fc_none: 3893 adapter->hw.fc.requested_mode = ixgbe_fc_none; 3894 if (adapter->num_rx_queues > 1) 3895 ixgbe_enable_rx_drop(adapter); 3896 break; 3897 default: 3898 return (EINVAL); 3899 } 3900 3901 /* Don't autoneg if forcing a value */ 3902 adapter->hw.fc.disable_fc_autoneg = TRUE; 3903 ixgbe_fc_enable(&adapter->hw); 3904 3905 return (0); 3906 } /* ixgbe_set_flowcntl */ 3907 3908 /************************************************************************ 3909 * ixgbe_enable_rx_drop 3910 * 3911 * Enable the hardware to drop packets when the buffer is 3912 * full. This is useful with multiqueue, so that no single 3913 * queue being full stalls the entire RX engine. We only 3914 * enable this when Multiqueue is enabled AND Flow Control 3915 * is disabled. 3916 ************************************************************************/ 3917 static void 3918 ixgbe_enable_rx_drop(struct adapter *adapter) 3919 { 3920 struct ixgbe_hw *hw = &adapter->hw; 3921 struct rx_ring *rxr; 3922 u32 srrctl; 3923 3924 for (int i = 0; i < adapter->num_rx_queues; i++) { 3925 rxr = &adapter->rx_queues[i].rxr; 3926 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3927 srrctl |= IXGBE_SRRCTL_DROP_EN; 3928 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3929 } 3930 3931 /* enable drop for each vf */ 3932 for (int i = 0; i < adapter->num_vfs; i++) { 3933 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3934 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 3935 IXGBE_QDE_ENABLE)); 3936 } 3937 } /* ixgbe_enable_rx_drop */ 3938 3939 /************************************************************************ 3940 * ixgbe_disable_rx_drop 3941 ************************************************************************/ 3942 static void 3943 ixgbe_disable_rx_drop(struct adapter *adapter) 3944 { 3945 struct ixgbe_hw *hw = &adapter->hw; 3946 struct rx_ring *rxr; 3947 u32 srrctl; 3948 3949 for (int i = 0; i < adapter->num_rx_queues; i++) { 3950 rxr = &adapter->rx_queues[i].rxr; 3951 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3952 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3953 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3954 } 3955 3956 /* disable drop for each vf */ 3957 for (int i = 0; i < adapter->num_vfs; i++) { 3958 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3959 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 3960 } 3961 } /* ixgbe_disable_rx_drop */ 3962 3963 /************************************************************************ 3964 * ixgbe_sysctl_advertise 3965 * 3966 * SYSCTL wrapper around setting advertised speed 3967 ************************************************************************/ 3968 static int 3969 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 3970 { 3971 struct adapter *adapter; 3972 int error, advertise; 3973 3974 adapter = (struct adapter *)arg1; 3975 advertise = adapter->advertise; 3976 3977 error = sysctl_handle_int(oidp, &advertise, 0, req); 3978 if ((error) || (req->newptr == NULL)) 3979 return (error); 3980 3981 return ixgbe_set_advertise(adapter, advertise); 3982 } /* ixgbe_sysctl_advertise */ 3983 3984 /************************************************************************ 3985 * ixgbe_set_advertise - Control advertised link speed 3986 * 3987 * Flags: 3988 * 0x1 - advertise 100 Mb 3989 * 0x2 - advertise 1G 3990 * 0x4 - advertise 10G 3991 * 0x8 - advertise 10 Mb (yes, Mb) 3992 ************************************************************************/ 3993 static int 3994 ixgbe_set_advertise(struct adapter *adapter, int advertise) 3995 { 3996 device_t dev = iflib_get_dev(adapter->ctx); 3997 struct ixgbe_hw *hw; 3998 ixgbe_link_speed speed = 0; 3999 ixgbe_link_speed link_caps = 0; 4000 s32 err = IXGBE_NOT_IMPLEMENTED; 4001 bool negotiate = FALSE; 4002 4003 /* Checks to validate new value */ 4004 if (adapter->advertise == advertise) /* no change */ 4005 return (0); 4006 4007 hw = &adapter->hw; 4008 4009 /* No speed changes for backplane media */ 4010 if (hw->phy.media_type == ixgbe_media_type_backplane) 4011 return (ENODEV); 4012 4013 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4014 (hw->phy.multispeed_fiber))) { 4015 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4016 return (EINVAL); 4017 } 4018 4019 if (advertise < 0x1 || advertise > 0xF) { 4020 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n"); 4021 return (EINVAL); 4022 } 4023 4024 if (hw->mac.ops.get_link_capabilities) { 4025 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4026 &negotiate); 4027 if (err != IXGBE_SUCCESS) { 4028 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4029 return (ENODEV); 4030 } 4031 } 4032 4033 /* Set new value and report new advertised mode */ 4034 if (advertise & 0x1) { 4035 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4036 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4037 return (EINVAL); 4038 } 4039 speed |= IXGBE_LINK_SPEED_100_FULL; 4040 } 4041 if (advertise & 0x2) { 4042 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4043 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4044 return (EINVAL); 4045 } 4046 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4047 } 4048 if (advertise & 0x4) { 4049 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4050 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4051 return (EINVAL); 4052 } 4053 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4054 } 4055 if (advertise & 0x8) { 4056 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4057 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4058 return (EINVAL); 4059 } 4060 speed |= IXGBE_LINK_SPEED_10_FULL; 4061 } 4062 4063 hw->mac.autotry_restart = TRUE; 4064 hw->mac.ops.setup_link(hw, speed, TRUE); 4065 adapter->advertise = advertise; 4066 4067 return (0); 4068 } /* ixgbe_set_advertise */ 4069 4070 /************************************************************************ 4071 * ixgbe_get_advertise - Get current advertised speed settings 4072 * 4073 * Formatted for sysctl usage. 4074 * Flags: 4075 * 0x1 - advertise 100 Mb 4076 * 0x2 - advertise 1G 4077 * 0x4 - advertise 10G 4078 * 0x8 - advertise 10 Mb (yes, Mb) 4079 ************************************************************************/ 4080 static int 4081 ixgbe_get_advertise(struct adapter *adapter) 4082 { 4083 struct ixgbe_hw *hw = &adapter->hw; 4084 int speed; 4085 ixgbe_link_speed link_caps = 0; 4086 s32 err; 4087 bool negotiate = FALSE; 4088 4089 /* 4090 * Advertised speed means nothing unless it's copper or 4091 * multi-speed fiber 4092 */ 4093 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4094 !(hw->phy.multispeed_fiber)) 4095 return (0); 4096 4097 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4098 if (err != IXGBE_SUCCESS) 4099 return (0); 4100 4101 speed = 4102 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 4103 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 4104 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 4105 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 4106 4107 return speed; 4108 } /* ixgbe_get_advertise */ 4109 4110 /************************************************************************ 4111 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4112 * 4113 * Control values: 4114 * 0/1 - off / on (use default value of 1000) 4115 * 4116 * Legal timer values are: 4117 * 50,100,250,500,1000,2000,5000,10000 4118 * 4119 * Turning off interrupt moderation will also turn this off. 4120 ************************************************************************/ 4121 static int 4122 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4123 { 4124 struct adapter *adapter = (struct adapter *)arg1; 4125 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4126 int error; 4127 u16 newval; 4128 4129 newval = adapter->dmac; 4130 error = sysctl_handle_16(oidp, &newval, 0, req); 4131 if ((error) || (req->newptr == NULL)) 4132 return (error); 4133 4134 switch (newval) { 4135 case 0: 4136 /* Disabled */ 4137 adapter->dmac = 0; 4138 break; 4139 case 1: 4140 /* Enable and use default */ 4141 adapter->dmac = 1000; 4142 break; 4143 case 50: 4144 case 100: 4145 case 250: 4146 case 500: 4147 case 1000: 4148 case 2000: 4149 case 5000: 4150 case 10000: 4151 /* Legal values - allow */ 4152 adapter->dmac = newval; 4153 break; 4154 default: 4155 /* Do nothing, illegal value */ 4156 return (EINVAL); 4157 } 4158 4159 /* Re-initialize hardware if it's already running */ 4160 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4161 ifp->if_init(ifp); 4162 4163 return (0); 4164 } /* ixgbe_sysctl_dmac */ 4165 4166 #ifdef IXGBE_DEBUG 4167 /************************************************************************ 4168 * ixgbe_sysctl_power_state 4169 * 4170 * Sysctl to test power states 4171 * Values: 4172 * 0 - set device to D0 4173 * 3 - set device to D3 4174 * (none) - get current device power state 4175 ************************************************************************/ 4176 static int 4177 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4178 { 4179 struct adapter *adapter = (struct adapter *)arg1; 4180 device_t dev = adapter->dev; 4181 int curr_ps, new_ps, error = 0; 4182 4183 curr_ps = new_ps = pci_get_powerstate(dev); 4184 4185 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4186 if ((error) || (req->newptr == NULL)) 4187 return (error); 4188 4189 if (new_ps == curr_ps) 4190 return (0); 4191 4192 if (new_ps == 3 && curr_ps == 0) 4193 error = DEVICE_SUSPEND(dev); 4194 else if (new_ps == 0 && curr_ps == 3) 4195 error = DEVICE_RESUME(dev); 4196 else 4197 return (EINVAL); 4198 4199 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4200 4201 return (error); 4202 } /* ixgbe_sysctl_power_state */ 4203 #endif 4204 4205 /************************************************************************ 4206 * ixgbe_sysctl_wol_enable 4207 * 4208 * Sysctl to enable/disable the WoL capability, 4209 * if supported by the adapter. 4210 * 4211 * Values: 4212 * 0 - disabled 4213 * 1 - enabled 4214 ************************************************************************/ 4215 static int 4216 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4217 { 4218 struct adapter *adapter = (struct adapter *)arg1; 4219 struct ixgbe_hw *hw = &adapter->hw; 4220 int new_wol_enabled; 4221 int error = 0; 4222 4223 new_wol_enabled = hw->wol_enabled; 4224 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4225 if ((error) || (req->newptr == NULL)) 4226 return (error); 4227 new_wol_enabled = !!(new_wol_enabled); 4228 if (new_wol_enabled == hw->wol_enabled) 4229 return (0); 4230 4231 if (new_wol_enabled > 0 && !adapter->wol_support) 4232 return (ENODEV); 4233 else 4234 hw->wol_enabled = new_wol_enabled; 4235 4236 return (0); 4237 } /* ixgbe_sysctl_wol_enable */ 4238 4239 /************************************************************************ 4240 * ixgbe_sysctl_wufc - Wake Up Filter Control 4241 * 4242 * Sysctl to enable/disable the types of packets that the 4243 * adapter will wake up on upon receipt. 4244 * Flags: 4245 * 0x1 - Link Status Change 4246 * 0x2 - Magic Packet 4247 * 0x4 - Direct Exact 4248 * 0x8 - Directed Multicast 4249 * 0x10 - Broadcast 4250 * 0x20 - ARP/IPv4 Request Packet 4251 * 0x40 - Direct IPv4 Packet 4252 * 0x80 - Direct IPv6 Packet 4253 * 4254 * Settings not listed above will cause the sysctl to return an error. 4255 ************************************************************************/ 4256 static int 4257 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4258 { 4259 struct adapter *adapter = (struct adapter *)arg1; 4260 int error = 0; 4261 u32 new_wufc; 4262 4263 new_wufc = adapter->wufc; 4264 4265 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4266 if ((error) || (req->newptr == NULL)) 4267 return (error); 4268 if (new_wufc == adapter->wufc) 4269 return (0); 4270 4271 if (new_wufc & 0xffffff00) 4272 return (EINVAL); 4273 4274 new_wufc &= 0xff; 4275 new_wufc |= (0xffffff & adapter->wufc); 4276 adapter->wufc = new_wufc; 4277 4278 return (0); 4279 } /* ixgbe_sysctl_wufc */ 4280 4281 #ifdef IXGBE_DEBUG 4282 /************************************************************************ 4283 * ixgbe_sysctl_print_rss_config 4284 ************************************************************************/ 4285 static int 4286 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4287 { 4288 struct adapter *adapter = (struct adapter *)arg1; 4289 struct ixgbe_hw *hw = &adapter->hw; 4290 device_t dev = adapter->dev; 4291 struct sbuf *buf; 4292 int error = 0, reta_size; 4293 u32 reg; 4294 4295 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4296 if (!buf) { 4297 device_printf(dev, "Could not allocate sbuf for output.\n"); 4298 return (ENOMEM); 4299 } 4300 4301 // TODO: use sbufs to make a string to print out 4302 /* Set multiplier for RETA setup and table size based on MAC */ 4303 switch (adapter->hw.mac.type) { 4304 case ixgbe_mac_X550: 4305 case ixgbe_mac_X550EM_x: 4306 case ixgbe_mac_X550EM_a: 4307 reta_size = 128; 4308 break; 4309 default: 4310 reta_size = 32; 4311 break; 4312 } 4313 4314 /* Print out the redirection table */ 4315 sbuf_cat(buf, "\n"); 4316 for (int i = 0; i < reta_size; i++) { 4317 if (i < 32) { 4318 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4319 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4320 } else { 4321 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4322 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4323 } 4324 } 4325 4326 // TODO: print more config 4327 4328 error = sbuf_finish(buf); 4329 if (error) 4330 device_printf(dev, "Error finishing sbuf: %d\n", error); 4331 4332 sbuf_delete(buf); 4333 4334 return (0); 4335 } /* ixgbe_sysctl_print_rss_config */ 4336 #endif /* IXGBE_DEBUG */ 4337 4338 /************************************************************************ 4339 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4340 * 4341 * For X552/X557-AT devices using an external PHY 4342 ************************************************************************/ 4343 static int 4344 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4345 { 4346 struct adapter *adapter = (struct adapter *)arg1; 4347 struct ixgbe_hw *hw = &adapter->hw; 4348 u16 reg; 4349 4350 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4351 device_printf(iflib_get_dev(adapter->ctx), 4352 "Device has no supported external thermal sensor.\n"); 4353 return (ENODEV); 4354 } 4355 4356 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4357 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4358 device_printf(iflib_get_dev(adapter->ctx), 4359 "Error reading from PHY's current temperature register\n"); 4360 return (EAGAIN); 4361 } 4362 4363 /* Shift temp for output */ 4364 reg = reg >> 8; 4365 4366 return (sysctl_handle_16(oidp, NULL, reg, req)); 4367 } /* ixgbe_sysctl_phy_temp */ 4368 4369 /************************************************************************ 4370 * ixgbe_sysctl_phy_overtemp_occurred 4371 * 4372 * Reports (directly from the PHY) whether the current PHY 4373 * temperature is over the overtemp threshold. 4374 ************************************************************************/ 4375 static int 4376 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4377 { 4378 struct adapter *adapter = (struct adapter *)arg1; 4379 struct ixgbe_hw *hw = &adapter->hw; 4380 u16 reg; 4381 4382 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4383 device_printf(iflib_get_dev(adapter->ctx), 4384 "Device has no supported external thermal sensor.\n"); 4385 return (ENODEV); 4386 } 4387 4388 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4389 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4390 device_printf(iflib_get_dev(adapter->ctx), 4391 "Error reading from PHY's temperature status register\n"); 4392 return (EAGAIN); 4393 } 4394 4395 /* Get occurrence bit */ 4396 reg = !!(reg & 0x4000); 4397 4398 return (sysctl_handle_16(oidp, 0, reg, req)); 4399 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4400 4401 /************************************************************************ 4402 * ixgbe_sysctl_eee_state 4403 * 4404 * Sysctl to set EEE power saving feature 4405 * Values: 4406 * 0 - disable EEE 4407 * 1 - enable EEE 4408 * (none) - get current device EEE state 4409 ************************************************************************/ 4410 static int 4411 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4412 { 4413 struct adapter *adapter = (struct adapter *)arg1; 4414 device_t dev = adapter->dev; 4415 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4416 int curr_eee, new_eee, error = 0; 4417 s32 retval; 4418 4419 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE); 4420 4421 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4422 if ((error) || (req->newptr == NULL)) 4423 return (error); 4424 4425 /* Nothing to do */ 4426 if (new_eee == curr_eee) 4427 return (0); 4428 4429 /* Not supported */ 4430 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE)) 4431 return (EINVAL); 4432 4433 /* Bounds checking */ 4434 if ((new_eee < 0) || (new_eee > 1)) 4435 return (EINVAL); 4436 4437 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee); 4438 if (retval) { 4439 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4440 return (EINVAL); 4441 } 4442 4443 /* Restart auto-neg */ 4444 ifp->if_init(ifp); 4445 4446 device_printf(dev, "New EEE state: %d\n", new_eee); 4447 4448 /* Cache new value */ 4449 if (new_eee) 4450 adapter->feat_en |= IXGBE_FEATURE_EEE; 4451 else 4452 adapter->feat_en &= ~IXGBE_FEATURE_EEE; 4453 4454 return (error); 4455 } /* ixgbe_sysctl_eee_state */ 4456 4457 /************************************************************************ 4458 * ixgbe_init_device_features 4459 ************************************************************************/ 4460 static void 4461 ixgbe_init_device_features(struct adapter *adapter) 4462 { 4463 adapter->feat_cap = IXGBE_FEATURE_NETMAP 4464 | IXGBE_FEATURE_RSS 4465 | IXGBE_FEATURE_MSI 4466 | IXGBE_FEATURE_MSIX 4467 | IXGBE_FEATURE_LEGACY_IRQ; 4468 4469 /* Set capabilities first... */ 4470 switch (adapter->hw.mac.type) { 4471 case ixgbe_mac_82598EB: 4472 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT) 4473 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4474 break; 4475 case ixgbe_mac_X540: 4476 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4477 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4478 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4479 (adapter->hw.bus.func == 0)) 4480 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4481 break; 4482 case ixgbe_mac_X550: 4483 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4484 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4485 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4486 break; 4487 case ixgbe_mac_X550EM_x: 4488 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4489 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4490 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR) 4491 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4492 break; 4493 case ixgbe_mac_X550EM_a: 4494 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4495 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4496 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4497 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4498 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4499 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4500 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4501 } 4502 break; 4503 case ixgbe_mac_82599EB: 4504 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4505 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4506 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4507 (adapter->hw.bus.func == 0)) 4508 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4509 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4510 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4511 break; 4512 default: 4513 break; 4514 } 4515 4516 /* Enabled by default... */ 4517 /* Fan failure detection */ 4518 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4519 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4520 /* Netmap */ 4521 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 4522 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 4523 /* EEE */ 4524 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4525 adapter->feat_en |= IXGBE_FEATURE_EEE; 4526 /* Thermal Sensor */ 4527 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4528 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4529 4530 /* Enabled via global sysctl... */ 4531 /* Flow Director */ 4532 if (ixgbe_enable_fdir) { 4533 if (adapter->feat_cap & IXGBE_FEATURE_FDIR) 4534 adapter->feat_en |= IXGBE_FEATURE_FDIR; 4535 else 4536 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled."); 4537 } 4538 /* 4539 * Message Signal Interrupts - Extended (MSI-X) 4540 * Normal MSI is only enabled if MSI-X calls fail. 4541 */ 4542 if (!ixgbe_enable_msix) 4543 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX; 4544 /* Receive-Side Scaling (RSS) */ 4545 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4546 adapter->feat_en |= IXGBE_FEATURE_RSS; 4547 4548 /* Disable features with unmet dependencies... */ 4549 /* No MSI-X */ 4550 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) { 4551 adapter->feat_cap &= ~IXGBE_FEATURE_RSS; 4552 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4553 adapter->feat_en &= ~IXGBE_FEATURE_RSS; 4554 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; 4555 } 4556 } /* ixgbe_init_device_features */ 4557 4558 /************************************************************************ 4559 * ixgbe_check_fan_failure 4560 ************************************************************************/ 4561 static void 4562 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt) 4563 { 4564 u32 mask; 4565 4566 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) : 4567 IXGBE_ESDP_SDP1; 4568 4569 if (reg & mask) 4570 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4571 } /* ixgbe_check_fan_failure */ 4572