1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_rss.h" 39 40 #include "ixgbe.h" 41 #include "ixgbe_sriov.h" 42 #include "ifdi_if.h" 43 44 #include <net/netmap.h> 45 #include <dev/netmap/netmap_kern.h> 46 47 /************************************************************************ 48 * Driver version 49 ************************************************************************/ 50 char ixgbe_driver_version[] = "4.0.1-k"; 51 52 53 /************************************************************************ 54 * PCI Device ID Table 55 * 56 * Used by probe to select devices to load on 57 * Last field stores an index into ixgbe_strings 58 * Last entry must be all 0s 59 * 60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 61 ************************************************************************/ 62 static pci_vendor_info_t ixgbe_vendor_info_array[] = 63 { 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 108 /* required last entry */ 109 PVID_END 110 }; 111 112 static void *ixgbe_register(device_t dev); 113 static int ixgbe_if_attach_pre(if_ctx_t ctx); 114 static int ixgbe_if_attach_post(if_ctx_t ctx); 115 static int ixgbe_if_detach(if_ctx_t ctx); 116 static int ixgbe_if_shutdown(if_ctx_t ctx); 117 static int ixgbe_if_suspend(if_ctx_t ctx); 118 static int ixgbe_if_resume(if_ctx_t ctx); 119 120 static void ixgbe_if_stop(if_ctx_t ctx); 121 void ixgbe_if_enable_intr(if_ctx_t ctx); 122 static void ixgbe_if_disable_intr(if_ctx_t ctx); 123 static void ixgbe_link_intr_enable(if_ctx_t ctx); 124 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); 125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); 126 static int ixgbe_if_media_change(if_ctx_t ctx); 127 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 128 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip); 130 static void ixgbe_if_multi_set(if_ctx_t ctx); 131 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags); 132 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 133 uint64_t *paddrs, int nrxqs, int nrxqsets); 134 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 135 uint64_t *paddrs, int nrxqs, int nrxqsets); 136 static void ixgbe_if_queues_free(if_ctx_t ctx); 137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t); 138 static void ixgbe_if_update_admin_status(if_ctx_t ctx); 139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag); 140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 141 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 142 int ixgbe_intr(void *arg); 143 144 /************************************************************************ 145 * Function prototypes 146 ************************************************************************/ 147 #if __FreeBSD_version >= 1100036 148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 149 #endif 150 151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector); 152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector); 153 static void ixgbe_add_device_sysctls(if_ctx_t ctx); 154 static int ixgbe_allocate_pci_resources(if_ctx_t ctx); 155 static int ixgbe_setup_low_power_mode(if_ctx_t ctx); 156 157 static void ixgbe_config_dmac(struct adapter *adapter); 158 static void ixgbe_configure_ivars(struct adapter *adapter); 159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, 160 s8 type); 161 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 162 static bool ixgbe_sfp_probe(if_ctx_t ctx); 163 164 static void ixgbe_free_pci_resources(if_ctx_t ctx); 165 166 static int ixgbe_msix_link(void *arg); 167 static int ixgbe_msix_que(void *arg); 168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter); 169 static void ixgbe_initialize_receive_units(if_ctx_t ctx); 170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx); 171 172 static int ixgbe_setup_interface(if_ctx_t ctx); 173 static void ixgbe_init_device_features(struct adapter *adapter); 174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 175 static void ixgbe_add_media_types(if_ctx_t ctx); 176 static void ixgbe_update_stats_counters(struct adapter *adapter); 177 static void ixgbe_config_link(if_ctx_t ctx); 178 static void ixgbe_get_slot_info(struct adapter *); 179 static void ixgbe_check_wol_support(struct adapter *adapter); 180 static void ixgbe_enable_rx_drop(struct adapter *); 181 static void ixgbe_disable_rx_drop(struct adapter *); 182 183 static void ixgbe_add_hw_stats(struct adapter *adapter); 184 static int ixgbe_set_flowcntl(struct adapter *, int); 185 static int ixgbe_set_advertise(struct adapter *, int); 186 static int ixgbe_get_advertise(struct adapter *); 187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx); 188 static void ixgbe_config_gpie(struct adapter *adapter); 189 static void ixgbe_config_delay_values(struct adapter *adapter); 190 191 /* Sysctl handlers */ 192 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 195 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 196 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 198 #ifdef IXGBE_DEBUG 199 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 201 #endif 202 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 206 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 207 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 208 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 209 210 /* Deferred interrupt tasklets */ 211 static void ixgbe_handle_msf(void *); 212 static void ixgbe_handle_mod(void *); 213 static void ixgbe_handle_phy(void *); 214 215 /************************************************************************ 216 * FreeBSD Device Interface Entry Points 217 ************************************************************************/ 218 static device_method_t ix_methods[] = { 219 /* Device interface */ 220 DEVMETHOD(device_register, ixgbe_register), 221 DEVMETHOD(device_probe, iflib_device_probe), 222 DEVMETHOD(device_attach, iflib_device_attach), 223 DEVMETHOD(device_detach, iflib_device_detach), 224 DEVMETHOD(device_shutdown, iflib_device_shutdown), 225 DEVMETHOD(device_suspend, iflib_device_suspend), 226 DEVMETHOD(device_resume, iflib_device_resume), 227 #ifdef PCI_IOV 228 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 229 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 230 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 231 #endif /* PCI_IOV */ 232 DEVMETHOD_END 233 }; 234 235 static driver_t ix_driver = { 236 "ix", ix_methods, sizeof(struct adapter), 237 }; 238 239 devclass_t ix_devclass; 240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 242 MODULE_DEPEND(ix, pci, 1, 1, 1); 243 MODULE_DEPEND(ix, ether, 1, 1, 1); 244 MODULE_DEPEND(ix, iflib, 1, 1, 1); 245 246 static device_method_t ixgbe_if_methods[] = { 247 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 248 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 249 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 250 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 251 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 252 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 253 DEVMETHOD(ifdi_init, ixgbe_if_init), 254 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 255 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 256 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 257 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 258 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 259 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 260 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 261 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 262 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 263 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 264 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 265 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 266 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 267 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 268 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 269 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 270 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 271 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 272 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 273 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 274 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 275 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 276 #ifdef PCI_IOV 277 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 278 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 279 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 280 #endif /* PCI_IOV */ 281 DEVMETHOD_END 282 }; 283 284 /* 285 * TUNEABLE PARAMETERS: 286 */ 287 288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters"); 289 static driver_t ixgbe_if_driver = { 290 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter) 291 }; 292 293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 295 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 296 297 /* Flow control setting, default to full */ 298 static int ixgbe_flow_control = ixgbe_fc_full; 299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 300 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 301 302 /* Advertise Speed, default to 0 (auto) */ 303 static int ixgbe_advertise_speed = 0; 304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 305 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 306 307 /* 308 * Smart speed setting, default to on 309 * this only works as a compile option 310 * right now as its during attach, set 311 * this to 'ixgbe_smart_speed_off' to 312 * disable. 313 */ 314 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 315 316 /* 317 * MSI-X should be the default for best performance, 318 * but this allows it to be forced off for testing. 319 */ 320 static int ixgbe_enable_msix = 1; 321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 322 "Enable MSI-X interrupts"); 323 324 /* 325 * Defining this on will allow the use 326 * of unsupported SFP+ modules, note that 327 * doing so you are on your own :) 328 */ 329 static int allow_unsupported_sfp = FALSE; 330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 331 &allow_unsupported_sfp, 0, 332 "Allow unsupported SFP modules...use at your own risk"); 333 334 /* 335 * Not sure if Flow Director is fully baked, 336 * so we'll default to turning it off. 337 */ 338 static int ixgbe_enable_fdir = 0; 339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 340 "Enable Flow Director"); 341 342 /* Receive-Side Scaling */ 343 static int ixgbe_enable_rss = 1; 344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 345 "Enable Receive-Side Scaling (RSS)"); 346 347 #if 0 348 /* Keep running tab on them for sanity check */ 349 static int ixgbe_total_ports; 350 #endif 351 352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 353 354 /* 355 * For Flow Director: this is the number of TX packets we sample 356 * for the filter pool, this means every 20th packet will be probed. 357 * 358 * This feature can be disabled by setting this to 0. 359 */ 360 static int atr_sample_rate = 20; 361 362 extern struct if_txrx ixgbe_txrx; 363 364 static struct if_shared_ctx ixgbe_sctx_init = { 365 .isc_magic = IFLIB_MAGIC, 366 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 367 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 368 .isc_tx_maxsegsize = PAGE_SIZE, 369 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 370 .isc_tso_maxsegsize = PAGE_SIZE, 371 .isc_rx_maxsize = PAGE_SIZE*4, 372 .isc_rx_nsegments = 1, 373 .isc_rx_maxsegsize = PAGE_SIZE*4, 374 .isc_nfl = 1, 375 .isc_ntxqs = 1, 376 .isc_nrxqs = 1, 377 378 .isc_admin_intrcnt = 1, 379 .isc_vendor_info = ixgbe_vendor_info_array, 380 .isc_driver_version = ixgbe_driver_version, 381 .isc_driver = &ixgbe_if_driver, 382 .isc_flags = IFLIB_TSO_INIT_IP, 383 384 .isc_nrxd_min = {MIN_RXD}, 385 .isc_ntxd_min = {MIN_TXD}, 386 .isc_nrxd_max = {MAX_RXD}, 387 .isc_ntxd_max = {MAX_TXD}, 388 .isc_nrxd_default = {DEFAULT_RXD}, 389 .isc_ntxd_default = {DEFAULT_TXD}, 390 }; 391 392 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init; 393 394 /************************************************************************ 395 * ixgbe_if_tx_queues_alloc 396 ************************************************************************/ 397 static int 398 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 399 int ntxqs, int ntxqsets) 400 { 401 struct adapter *adapter = iflib_get_softc(ctx); 402 if_softc_ctx_t scctx = adapter->shared; 403 struct ix_tx_queue *que; 404 int i, j, error; 405 406 MPASS(adapter->num_tx_queues > 0); 407 MPASS(adapter->num_tx_queues == ntxqsets); 408 MPASS(ntxqs == 1); 409 410 /* Allocate queue structure memory */ 411 adapter->tx_queues = 412 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 413 M_IXGBE, M_NOWAIT | M_ZERO); 414 if (!adapter->tx_queues) { 415 device_printf(iflib_get_dev(ctx), 416 "Unable to allocate TX ring memory\n"); 417 return (ENOMEM); 418 } 419 420 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { 421 struct tx_ring *txr = &que->txr; 422 423 /* In case SR-IOV is enabled, align the index properly */ 424 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 425 i); 426 427 txr->adapter = que->adapter = adapter; 428 adapter->active_queues |= (u64)1 << txr->me; 429 430 /* Allocate report status array */ 431 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 432 if (txr->tx_rsq == NULL) { 433 error = ENOMEM; 434 goto fail; 435 } 436 for (j = 0; j < scctx->isc_ntxd[0]; j++) 437 txr->tx_rsq[j] = QIDX_INVALID; 438 /* get the virtual and physical address of the hardware queues */ 439 txr->tail = IXGBE_TDT(txr->me); 440 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 441 txr->tx_paddr = paddrs[i]; 442 443 txr->bytes = 0; 444 txr->total_packets = 0; 445 446 /* Set the rate at which we sample packets */ 447 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 448 txr->atr_sample = atr_sample_rate; 449 450 } 451 452 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 453 adapter->num_tx_queues); 454 455 return (0); 456 457 fail: 458 ixgbe_if_queues_free(ctx); 459 460 return (error); 461 } /* ixgbe_if_tx_queues_alloc */ 462 463 /************************************************************************ 464 * ixgbe_if_rx_queues_alloc 465 ************************************************************************/ 466 static int 467 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 468 int nrxqs, int nrxqsets) 469 { 470 struct adapter *adapter = iflib_get_softc(ctx); 471 struct ix_rx_queue *que; 472 int i; 473 474 MPASS(adapter->num_rx_queues > 0); 475 MPASS(adapter->num_rx_queues == nrxqsets); 476 MPASS(nrxqs == 1); 477 478 /* Allocate queue structure memory */ 479 adapter->rx_queues = 480 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 481 M_IXGBE, M_NOWAIT | M_ZERO); 482 if (!adapter->rx_queues) { 483 device_printf(iflib_get_dev(ctx), 484 "Unable to allocate TX ring memory\n"); 485 return (ENOMEM); 486 } 487 488 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { 489 struct rx_ring *rxr = &que->rxr; 490 491 /* In case SR-IOV is enabled, align the index properly */ 492 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 493 i); 494 495 rxr->adapter = que->adapter = adapter; 496 497 /* get the virtual and physical address of the hw queues */ 498 rxr->tail = IXGBE_RDT(rxr->me); 499 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 500 rxr->rx_paddr = paddrs[i]; 501 rxr->bytes = 0; 502 rxr->que = que; 503 } 504 505 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 506 adapter->num_rx_queues); 507 508 return (0); 509 } /* ixgbe_if_rx_queues_alloc */ 510 511 /************************************************************************ 512 * ixgbe_if_queues_free 513 ************************************************************************/ 514 static void 515 ixgbe_if_queues_free(if_ctx_t ctx) 516 { 517 struct adapter *adapter = iflib_get_softc(ctx); 518 struct ix_tx_queue *tx_que = adapter->tx_queues; 519 struct ix_rx_queue *rx_que = adapter->rx_queues; 520 int i; 521 522 if (tx_que != NULL) { 523 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 524 struct tx_ring *txr = &tx_que->txr; 525 if (txr->tx_rsq == NULL) 526 break; 527 528 free(txr->tx_rsq, M_IXGBE); 529 txr->tx_rsq = NULL; 530 } 531 532 free(adapter->tx_queues, M_IXGBE); 533 adapter->tx_queues = NULL; 534 } 535 if (rx_que != NULL) { 536 free(adapter->rx_queues, M_IXGBE); 537 adapter->rx_queues = NULL; 538 } 539 } /* ixgbe_if_queues_free */ 540 541 /************************************************************************ 542 * ixgbe_initialize_rss_mapping 543 ************************************************************************/ 544 static void 545 ixgbe_initialize_rss_mapping(struct adapter *adapter) 546 { 547 struct ixgbe_hw *hw = &adapter->hw; 548 u32 reta = 0, mrqc, rss_key[10]; 549 int queue_id, table_size, index_mult; 550 int i, j; 551 u32 rss_hash_config; 552 553 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 554 /* Fetch the configured RSS key */ 555 rss_getkey((uint8_t *)&rss_key); 556 } else { 557 /* set up random bits */ 558 arc4rand(&rss_key, sizeof(rss_key), 0); 559 } 560 561 /* Set multiplier for RETA setup and table size based on MAC */ 562 index_mult = 0x1; 563 table_size = 128; 564 switch (adapter->hw.mac.type) { 565 case ixgbe_mac_82598EB: 566 index_mult = 0x11; 567 break; 568 case ixgbe_mac_X550: 569 case ixgbe_mac_X550EM_x: 570 case ixgbe_mac_X550EM_a: 571 table_size = 512; 572 break; 573 default: 574 break; 575 } 576 577 /* Set up the redirection table */ 578 for (i = 0, j = 0; i < table_size; i++, j++) { 579 if (j == adapter->num_rx_queues) 580 j = 0; 581 582 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 583 /* 584 * Fetch the RSS bucket id for the given indirection 585 * entry. Cap it at the number of configured buckets 586 * (which is num_rx_queues.) 587 */ 588 queue_id = rss_get_indirection_to_bucket(i); 589 queue_id = queue_id % adapter->num_rx_queues; 590 } else 591 queue_id = (j * index_mult); 592 593 /* 594 * The low 8 bits are for hash value (n+0); 595 * The next 8 bits are for hash value (n+1), etc. 596 */ 597 reta = reta >> 8; 598 reta = reta | (((uint32_t)queue_id) << 24); 599 if ((i & 3) == 3) { 600 if (i < 128) 601 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 602 else 603 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 604 reta); 605 reta = 0; 606 } 607 } 608 609 /* Now fill our hash function seeds */ 610 for (i = 0; i < 10; i++) 611 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 612 613 /* Perform hash on these packet types */ 614 if (adapter->feat_en & IXGBE_FEATURE_RSS) 615 rss_hash_config = rss_gethashconfig(); 616 else { 617 /* 618 * Disable UDP - IP fragments aren't currently being handled 619 * and so we end up with a mix of 2-tuple and 4-tuple 620 * traffic. 621 */ 622 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 623 | RSS_HASHTYPE_RSS_TCP_IPV4 624 | RSS_HASHTYPE_RSS_IPV6 625 | RSS_HASHTYPE_RSS_TCP_IPV6 626 | RSS_HASHTYPE_RSS_IPV6_EX 627 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 628 } 629 630 mrqc = IXGBE_MRQC_RSSEN; 631 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 632 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 633 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 634 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 635 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 636 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 637 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 638 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 639 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 640 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 641 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 642 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 643 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 644 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 645 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 646 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 647 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 648 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 649 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 650 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 651 } /* ixgbe_initialize_rss_mapping */ 652 653 /************************************************************************ 654 * ixgbe_initialize_receive_units - Setup receive registers and features. 655 ************************************************************************/ 656 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 657 658 static void 659 ixgbe_initialize_receive_units(if_ctx_t ctx) 660 { 661 struct adapter *adapter = iflib_get_softc(ctx); 662 if_softc_ctx_t scctx = adapter->shared; 663 struct ixgbe_hw *hw = &adapter->hw; 664 struct ifnet *ifp = iflib_get_ifp(ctx); 665 struct ix_rx_queue *que; 666 int i, j; 667 u32 bufsz, fctrl, srrctl, rxcsum; 668 u32 hlreg; 669 670 /* 671 * Make sure receives are disabled while 672 * setting up the descriptor ring 673 */ 674 ixgbe_disable_rx(hw); 675 676 /* Enable broadcasts */ 677 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 678 fctrl |= IXGBE_FCTRL_BAM; 679 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 680 fctrl |= IXGBE_FCTRL_DPF; 681 fctrl |= IXGBE_FCTRL_PMCF; 682 } 683 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 684 685 /* Set for Jumbo Frames? */ 686 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 687 if (ifp->if_mtu > ETHERMTU) 688 hlreg |= IXGBE_HLREG0_JUMBOEN; 689 else 690 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 691 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 692 693 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 694 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 695 696 /* Setup the Base and Length of the Rx Descriptor Ring */ 697 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) { 698 struct rx_ring *rxr = &que->rxr; 699 u64 rdba = rxr->rx_paddr; 700 701 j = rxr->me; 702 703 /* Setup the Base and Length of the Rx Descriptor Ring */ 704 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 705 (rdba & 0x00000000ffffffffULL)); 706 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 707 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 708 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 709 710 /* Set up the SRRCTL register */ 711 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 712 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 713 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 714 srrctl |= bufsz; 715 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 716 717 /* 718 * Set DROP_EN iff we have no flow control and >1 queue. 719 * Note that srrctl was cleared shortly before during reset, 720 * so we do not need to clear the bit, but do it just in case 721 * this code is moved elsewhere. 722 */ 723 if (adapter->num_rx_queues > 1 && 724 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 725 srrctl |= IXGBE_SRRCTL_DROP_EN; 726 } else { 727 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 728 } 729 730 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 731 732 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 733 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 734 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 735 736 /* Set the driver rx tail address */ 737 rxr->tail = IXGBE_RDT(rxr->me); 738 } 739 740 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 741 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 742 | IXGBE_PSRTYPE_UDPHDR 743 | IXGBE_PSRTYPE_IPV4HDR 744 | IXGBE_PSRTYPE_IPV6HDR; 745 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 746 } 747 748 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 749 750 ixgbe_initialize_rss_mapping(adapter); 751 752 if (adapter->num_rx_queues > 1) { 753 /* RSS and RX IPP Checksum are mutually exclusive */ 754 rxcsum |= IXGBE_RXCSUM_PCSD; 755 } 756 757 if (ifp->if_capenable & IFCAP_RXCSUM) 758 rxcsum |= IXGBE_RXCSUM_PCSD; 759 760 /* This is useful for calculating UDP/IP fragment checksums */ 761 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 762 rxcsum |= IXGBE_RXCSUM_IPPCSE; 763 764 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 765 766 } /* ixgbe_initialize_receive_units */ 767 768 /************************************************************************ 769 * ixgbe_initialize_transmit_units - Enable transmit units. 770 ************************************************************************/ 771 static void 772 ixgbe_initialize_transmit_units(if_ctx_t ctx) 773 { 774 struct adapter *adapter = iflib_get_softc(ctx); 775 struct ixgbe_hw *hw = &adapter->hw; 776 if_softc_ctx_t scctx = adapter->shared; 777 struct ix_tx_queue *que; 778 int i; 779 780 /* Setup the Base and Length of the Tx Descriptor Ring */ 781 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues; 782 i++, que++) { 783 struct tx_ring *txr = &que->txr; 784 u64 tdba = txr->tx_paddr; 785 u32 txctrl = 0; 786 int j = txr->me; 787 788 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 789 (tdba & 0x00000000ffffffffULL)); 790 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 791 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 792 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 793 794 /* Setup the HW Tx Head and Tail descriptor pointers */ 795 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 796 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 797 798 /* Cache the tail address */ 799 txr->tx_rs_cidx = txr->tx_rs_pidx; 800 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 801 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 802 txr->tx_rsq[k] = QIDX_INVALID; 803 804 /* Disable Head Writeback */ 805 /* 806 * Note: for X550 series devices, these registers are actually 807 * prefixed with TPH_ isntead of DCA_, but the addresses and 808 * fields remain the same. 809 */ 810 switch (hw->mac.type) { 811 case ixgbe_mac_82598EB: 812 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 813 break; 814 default: 815 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 816 break; 817 } 818 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 819 switch (hw->mac.type) { 820 case ixgbe_mac_82598EB: 821 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 822 break; 823 default: 824 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 825 break; 826 } 827 828 } 829 830 if (hw->mac.type != ixgbe_mac_82598EB) { 831 u32 dmatxctl, rttdcs; 832 833 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 834 dmatxctl |= IXGBE_DMATXCTL_TE; 835 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 836 /* Disable arbiter to set MTQC */ 837 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 838 rttdcs |= IXGBE_RTTDCS_ARBDIS; 839 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 840 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 841 ixgbe_get_mtqc(adapter->iov_mode)); 842 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 843 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 844 } 845 846 } /* ixgbe_initialize_transmit_units */ 847 848 /************************************************************************ 849 * ixgbe_register 850 ************************************************************************/ 851 static void * 852 ixgbe_register(device_t dev) 853 { 854 return (ixgbe_sctx); 855 } /* ixgbe_register */ 856 857 /************************************************************************ 858 * ixgbe_if_attach_pre - Device initialization routine, part 1 859 * 860 * Called when the driver is being loaded. 861 * Identifies the type of hardware, initializes the hardware, 862 * and initializes iflib structures. 863 * 864 * return 0 on success, positive on failure 865 ************************************************************************/ 866 static int 867 ixgbe_if_attach_pre(if_ctx_t ctx) 868 { 869 struct adapter *adapter; 870 device_t dev; 871 if_softc_ctx_t scctx; 872 struct ixgbe_hw *hw; 873 int error = 0; 874 u32 ctrl_ext; 875 876 INIT_DEBUGOUT("ixgbe_attach: begin"); 877 878 /* Allocate, clear, and link in our adapter structure */ 879 dev = iflib_get_dev(ctx); 880 adapter = iflib_get_softc(ctx); 881 adapter->hw.back = adapter; 882 adapter->ctx = ctx; 883 adapter->dev = dev; 884 scctx = adapter->shared = iflib_get_softc_ctx(ctx); 885 adapter->media = iflib_get_media(ctx); 886 hw = &adapter->hw; 887 888 /* Determine hardware revision */ 889 hw->vendor_id = pci_get_vendor(dev); 890 hw->device_id = pci_get_device(dev); 891 hw->revision_id = pci_get_revid(dev); 892 hw->subsystem_vendor_id = pci_get_subvendor(dev); 893 hw->subsystem_device_id = pci_get_subdevice(dev); 894 895 /* Do base PCI setup - map BAR0 */ 896 if (ixgbe_allocate_pci_resources(ctx)) { 897 device_printf(dev, "Allocation of PCI resources failed\n"); 898 return (ENXIO); 899 } 900 901 /* let hardware know driver is loaded */ 902 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 903 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 904 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 905 906 /* 907 * Initialize the shared code 908 */ 909 if (ixgbe_init_shared_code(hw) != 0) { 910 device_printf(dev, "Unable to initialize the shared code\n"); 911 error = ENXIO; 912 goto err_pci; 913 } 914 915 if (hw->mbx.ops.init_params) 916 hw->mbx.ops.init_params(hw); 917 918 hw->allow_unsupported_sfp = allow_unsupported_sfp; 919 920 if (hw->mac.type != ixgbe_mac_82598EB) 921 hw->phy.smart_speed = ixgbe_smart_speed; 922 923 ixgbe_init_device_features(adapter); 924 925 /* Enable WoL (if supported) */ 926 ixgbe_check_wol_support(adapter); 927 928 /* Verify adapter fan is still functional (if applicable) */ 929 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 930 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 931 ixgbe_check_fan_failure(adapter, esdp, FALSE); 932 } 933 934 /* Ensure SW/FW semaphore is free */ 935 ixgbe_init_swfw_semaphore(hw); 936 937 /* Set an initial default flow control value */ 938 hw->fc.requested_mode = ixgbe_flow_control; 939 940 hw->phy.reset_if_overtemp = TRUE; 941 error = ixgbe_reset_hw(hw); 942 hw->phy.reset_if_overtemp = FALSE; 943 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 944 /* 945 * No optics in this port, set up 946 * so the timer routine will probe 947 * for later insertion. 948 */ 949 adapter->sfp_probe = TRUE; 950 error = 0; 951 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 952 device_printf(dev, "Unsupported SFP+ module detected!\n"); 953 error = EIO; 954 goto err_pci; 955 } else if (error) { 956 device_printf(dev, "Hardware initialization failed\n"); 957 error = EIO; 958 goto err_pci; 959 } 960 961 /* Make sure we have a good EEPROM before we read from it */ 962 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 963 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 964 error = EIO; 965 goto err_pci; 966 } 967 968 error = ixgbe_start_hw(hw); 969 switch (error) { 970 case IXGBE_ERR_EEPROM_VERSION: 971 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 972 break; 973 case IXGBE_ERR_SFP_NOT_SUPPORTED: 974 device_printf(dev, "Unsupported SFP+ Module\n"); 975 error = EIO; 976 goto err_pci; 977 case IXGBE_ERR_SFP_NOT_PRESENT: 978 device_printf(dev, "No SFP+ Module found\n"); 979 /* falls thru */ 980 default: 981 break; 982 } 983 984 /* Most of the iflib initialization... */ 985 986 iflib_set_mac(ctx, hw->mac.addr); 987 switch (adapter->hw.mac.type) { 988 case ixgbe_mac_X550: 989 case ixgbe_mac_X550EM_x: 990 case ixgbe_mac_X550EM_a: 991 scctx->isc_rss_table_size = 512; 992 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 993 break; 994 default: 995 scctx->isc_rss_table_size = 128; 996 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 997 } 998 999 /* Allow legacy interrupts */ 1000 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1001 1002 scctx->isc_txqsizes[0] = 1003 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1004 sizeof(u32), DBA_ALIGN), 1005 scctx->isc_rxqsizes[0] = 1006 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1007 DBA_ALIGN); 1008 1009 /* XXX */ 1010 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1011 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1012 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1013 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1014 scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR); 1015 } else { 1016 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1017 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1018 scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR); 1019 } 1020 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1021 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1022 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1023 1024 scctx->isc_txrx = &ixgbe_txrx; 1025 1026 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1027 1028 return (0); 1029 1030 err_pci: 1031 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 1032 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 1034 ixgbe_free_pci_resources(ctx); 1035 1036 return (error); 1037 } /* ixgbe_if_attach_pre */ 1038 1039 /********************************************************************* 1040 * ixgbe_if_attach_post - Device initialization routine, part 2 1041 * 1042 * Called during driver load, but after interrupts and 1043 * resources have been allocated and configured. 1044 * Sets up some data structures not relevant to iflib. 1045 * 1046 * return 0 on success, positive on failure 1047 *********************************************************************/ 1048 static int 1049 ixgbe_if_attach_post(if_ctx_t ctx) 1050 { 1051 device_t dev; 1052 struct adapter *adapter; 1053 struct ixgbe_hw *hw; 1054 int error = 0; 1055 1056 dev = iflib_get_dev(ctx); 1057 adapter = iflib_get_softc(ctx); 1058 hw = &adapter->hw; 1059 1060 1061 if (adapter->intr_type == IFLIB_INTR_LEGACY && 1062 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1063 device_printf(dev, "Device does not support legacy interrupts"); 1064 error = ENXIO; 1065 goto err; 1066 } 1067 1068 /* Allocate multicast array memory. */ 1069 adapter->mta = malloc(sizeof(*adapter->mta) * 1070 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1071 if (adapter->mta == NULL) { 1072 device_printf(dev, "Can not allocate multicast setup array\n"); 1073 error = ENOMEM; 1074 goto err; 1075 } 1076 1077 /* hw.ix defaults init */ 1078 ixgbe_set_advertise(adapter, ixgbe_advertise_speed); 1079 1080 /* Enable the optics for 82599 SFP+ fiber */ 1081 ixgbe_enable_tx_laser(hw); 1082 1083 /* Enable power to the phy. */ 1084 ixgbe_set_phy_power(hw, TRUE); 1085 1086 ixgbe_initialize_iov(adapter); 1087 1088 error = ixgbe_setup_interface(ctx); 1089 if (error) { 1090 device_printf(dev, "Interface setup failed: %d\n", error); 1091 goto err; 1092 } 1093 1094 ixgbe_if_update_admin_status(ctx); 1095 1096 /* Initialize statistics */ 1097 ixgbe_update_stats_counters(adapter); 1098 ixgbe_add_hw_stats(adapter); 1099 1100 /* Check PCIE slot type/speed/width */ 1101 ixgbe_get_slot_info(adapter); 1102 1103 /* 1104 * Do time init and sysctl init here, but 1105 * only on the first port of a bypass adapter. 1106 */ 1107 ixgbe_bypass_init(adapter); 1108 1109 /* Set an initial dmac value */ 1110 adapter->dmac = 0; 1111 /* Set initial advertised speeds (if applicable) */ 1112 adapter->advertise = ixgbe_get_advertise(adapter); 1113 1114 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1115 ixgbe_define_iov_schemas(dev, &error); 1116 1117 /* Add sysctls */ 1118 ixgbe_add_device_sysctls(ctx); 1119 1120 return (0); 1121 err: 1122 return (error); 1123 } /* ixgbe_if_attach_post */ 1124 1125 /************************************************************************ 1126 * ixgbe_check_wol_support 1127 * 1128 * Checks whether the adapter's ports are capable of 1129 * Wake On LAN by reading the adapter's NVM. 1130 * 1131 * Sets each port's hw->wol_enabled value depending 1132 * on the value read here. 1133 ************************************************************************/ 1134 static void 1135 ixgbe_check_wol_support(struct adapter *adapter) 1136 { 1137 struct ixgbe_hw *hw = &adapter->hw; 1138 u16 dev_caps = 0; 1139 1140 /* Find out WoL support for port */ 1141 adapter->wol_support = hw->wol_enabled = 0; 1142 ixgbe_get_device_caps(hw, &dev_caps); 1143 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1144 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1145 hw->bus.func == 0)) 1146 adapter->wol_support = hw->wol_enabled = 1; 1147 1148 /* Save initial wake up filter configuration */ 1149 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1150 1151 return; 1152 } /* ixgbe_check_wol_support */ 1153 1154 /************************************************************************ 1155 * ixgbe_setup_interface 1156 * 1157 * Setup networking device structure and register an interface. 1158 ************************************************************************/ 1159 static int 1160 ixgbe_setup_interface(if_ctx_t ctx) 1161 { 1162 struct ifnet *ifp = iflib_get_ifp(ctx); 1163 struct adapter *adapter = iflib_get_softc(ctx); 1164 1165 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1166 1167 if_setbaudrate(ifp, IF_Gbps(10)); 1168 1169 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1170 1171 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1172 1173 ixgbe_add_media_types(ctx); 1174 1175 /* Autoselect media by default */ 1176 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 1177 1178 return (0); 1179 } /* ixgbe_setup_interface */ 1180 1181 /************************************************************************ 1182 * ixgbe_if_get_counter 1183 ************************************************************************/ 1184 static uint64_t 1185 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1186 { 1187 struct adapter *adapter = iflib_get_softc(ctx); 1188 if_t ifp = iflib_get_ifp(ctx); 1189 1190 switch (cnt) { 1191 case IFCOUNTER_IPACKETS: 1192 return (adapter->ipackets); 1193 case IFCOUNTER_OPACKETS: 1194 return (adapter->opackets); 1195 case IFCOUNTER_IBYTES: 1196 return (adapter->ibytes); 1197 case IFCOUNTER_OBYTES: 1198 return (adapter->obytes); 1199 case IFCOUNTER_IMCASTS: 1200 return (adapter->imcasts); 1201 case IFCOUNTER_OMCASTS: 1202 return (adapter->omcasts); 1203 case IFCOUNTER_COLLISIONS: 1204 return (0); 1205 case IFCOUNTER_IQDROPS: 1206 return (adapter->iqdrops); 1207 case IFCOUNTER_OQDROPS: 1208 return (0); 1209 case IFCOUNTER_IERRORS: 1210 return (adapter->ierrors); 1211 default: 1212 return (if_get_counter_default(ifp, cnt)); 1213 } 1214 } /* ixgbe_if_get_counter */ 1215 1216 /************************************************************************ 1217 * ixgbe_if_i2c_req 1218 ************************************************************************/ 1219 static int 1220 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1221 { 1222 struct adapter *adapter = iflib_get_softc(ctx); 1223 struct ixgbe_hw *hw = &adapter->hw; 1224 int i; 1225 1226 1227 if (hw->phy.ops.read_i2c_byte == NULL) 1228 return (ENXIO); 1229 for (i = 0; i < req->len; i++) 1230 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1231 req->dev_addr, &req->data[i]); 1232 return (0); 1233 } /* ixgbe_if_i2c_req */ 1234 1235 /************************************************************************ 1236 * ixgbe_add_media_types 1237 ************************************************************************/ 1238 static void 1239 ixgbe_add_media_types(if_ctx_t ctx) 1240 { 1241 struct adapter *adapter = iflib_get_softc(ctx); 1242 struct ixgbe_hw *hw = &adapter->hw; 1243 device_t dev = iflib_get_dev(ctx); 1244 u64 layer; 1245 1246 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 1247 1248 /* Media types with matching FreeBSD media defines */ 1249 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1250 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1251 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1252 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1253 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1254 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1255 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1256 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1257 1258 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1259 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1260 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1261 NULL); 1262 1263 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1264 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1265 if (hw->phy.multispeed_fiber) 1266 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0, 1267 NULL); 1268 } 1269 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1270 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1271 if (hw->phy.multispeed_fiber) 1272 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, 1273 NULL); 1274 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1275 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1276 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1277 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1278 1279 #ifdef IFM_ETH_XTYPE 1280 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1281 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1282 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1283 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1284 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1285 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1286 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1287 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1288 #else 1289 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1290 device_printf(dev, "Media supported: 10GbaseKR\n"); 1291 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1292 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1293 } 1294 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1295 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1296 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1297 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1298 } 1299 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1300 device_printf(dev, "Media supported: 1000baseKX\n"); 1301 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1302 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1303 } 1304 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1305 device_printf(dev, "Media supported: 2500baseKX\n"); 1306 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1307 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1308 } 1309 #endif 1310 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1311 device_printf(dev, "Media supported: 1000baseBX\n"); 1312 1313 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1314 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1315 0, NULL); 1316 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1317 } 1318 1319 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1320 } /* ixgbe_add_media_types */ 1321 1322 /************************************************************************ 1323 * ixgbe_is_sfp 1324 ************************************************************************/ 1325 static inline bool 1326 ixgbe_is_sfp(struct ixgbe_hw *hw) 1327 { 1328 switch (hw->mac.type) { 1329 case ixgbe_mac_82598EB: 1330 if (hw->phy.type == ixgbe_phy_nl) 1331 return (TRUE); 1332 return (FALSE); 1333 case ixgbe_mac_82599EB: 1334 switch (hw->mac.ops.get_media_type(hw)) { 1335 case ixgbe_media_type_fiber: 1336 case ixgbe_media_type_fiber_qsfp: 1337 return (TRUE); 1338 default: 1339 return (FALSE); 1340 } 1341 case ixgbe_mac_X550EM_x: 1342 case ixgbe_mac_X550EM_a: 1343 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1344 return (TRUE); 1345 return (FALSE); 1346 default: 1347 return (FALSE); 1348 } 1349 } /* ixgbe_is_sfp */ 1350 1351 /************************************************************************ 1352 * ixgbe_config_link 1353 ************************************************************************/ 1354 static void 1355 ixgbe_config_link(if_ctx_t ctx) 1356 { 1357 struct adapter *adapter = iflib_get_softc(ctx); 1358 struct ixgbe_hw *hw = &adapter->hw; 1359 u32 autoneg, err = 0; 1360 bool sfp, negotiate; 1361 1362 sfp = ixgbe_is_sfp(hw); 1363 1364 if (sfp) { 1365 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 1366 iflib_admin_intr_deferred(ctx); 1367 } else { 1368 if (hw->mac.ops.check_link) 1369 err = ixgbe_check_link(hw, &adapter->link_speed, 1370 &adapter->link_up, FALSE); 1371 if (err) 1372 return; 1373 autoneg = hw->phy.autoneg_advertised; 1374 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1375 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1376 &negotiate); 1377 if (err) 1378 return; 1379 if (hw->mac.ops.setup_link) 1380 err = hw->mac.ops.setup_link(hw, autoneg, 1381 adapter->link_up); 1382 } 1383 } /* ixgbe_config_link */ 1384 1385 /************************************************************************ 1386 * ixgbe_update_stats_counters - Update board statistics counters. 1387 ************************************************************************/ 1388 static void 1389 ixgbe_update_stats_counters(struct adapter *adapter) 1390 { 1391 struct ixgbe_hw *hw = &adapter->hw; 1392 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1393 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1394 u64 total_missed_rx = 0; 1395 1396 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1397 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1398 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1399 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1400 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1401 1402 for (int i = 0; i < 16; i++) { 1403 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1404 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1405 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1406 } 1407 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1408 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1409 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1410 1411 /* Hardware workaround, gprc counts missed packets */ 1412 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1413 stats->gprc -= missed_rx; 1414 1415 if (hw->mac.type != ixgbe_mac_82598EB) { 1416 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1417 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1418 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1419 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1420 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1421 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1422 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1423 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1424 } else { 1425 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1426 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1427 /* 82598 only has a counter in the high register */ 1428 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1429 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1430 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1431 } 1432 1433 /* 1434 * Workaround: mprc hardware is incorrectly counting 1435 * broadcasts, so for now we subtract those. 1436 */ 1437 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1438 stats->bprc += bprc; 1439 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1440 if (hw->mac.type == ixgbe_mac_82598EB) 1441 stats->mprc -= bprc; 1442 1443 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1444 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1445 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1446 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1447 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1448 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1449 1450 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1451 stats->lxontxc += lxon; 1452 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1453 stats->lxofftxc += lxoff; 1454 total = lxon + lxoff; 1455 1456 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1457 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1458 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1459 stats->gptc -= total; 1460 stats->mptc -= total; 1461 stats->ptc64 -= total; 1462 stats->gotc -= total * ETHER_MIN_LEN; 1463 1464 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1465 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1466 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1467 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1468 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1469 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1470 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1471 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1472 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1473 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1474 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1475 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1476 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1477 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1478 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1479 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1480 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1481 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1482 /* Only read FCOE on 82599 */ 1483 if (hw->mac.type != ixgbe_mac_82598EB) { 1484 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1485 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1486 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1487 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1488 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1489 } 1490 1491 /* Fill out the OS statistics structure */ 1492 IXGBE_SET_IPACKETS(adapter, stats->gprc); 1493 IXGBE_SET_OPACKETS(adapter, stats->gptc); 1494 IXGBE_SET_IBYTES(adapter, stats->gorc); 1495 IXGBE_SET_OBYTES(adapter, stats->gotc); 1496 IXGBE_SET_IMCASTS(adapter, stats->mprc); 1497 IXGBE_SET_OMCASTS(adapter, stats->mptc); 1498 IXGBE_SET_COLLISIONS(adapter, 0); 1499 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 1500 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec); 1501 } /* ixgbe_update_stats_counters */ 1502 1503 /************************************************************************ 1504 * ixgbe_add_hw_stats 1505 * 1506 * Add sysctl variables, one per statistic, to the system. 1507 ************************************************************************/ 1508 static void 1509 ixgbe_add_hw_stats(struct adapter *adapter) 1510 { 1511 device_t dev = iflib_get_dev(adapter->ctx); 1512 struct ix_rx_queue *rx_que; 1513 struct ix_tx_queue *tx_que; 1514 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1515 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1516 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1517 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1518 struct sysctl_oid *stat_node, *queue_node; 1519 struct sysctl_oid_list *stat_list, *queue_list; 1520 int i; 1521 1522 #define QUEUE_NAME_LEN 32 1523 char namebuf[QUEUE_NAME_LEN]; 1524 1525 /* Driver Statistics */ 1526 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1527 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); 1528 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1529 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); 1530 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1531 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); 1532 1533 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 1534 struct tx_ring *txr = &tx_que->txr; 1535 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1536 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1537 CTLFLAG_RD, NULL, "Queue Name"); 1538 queue_list = SYSCTL_CHILDREN(queue_node); 1539 1540 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1541 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1542 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1543 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1544 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1545 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1546 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1547 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1548 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1549 CTLFLAG_RD, &txr->total_packets, 1550 "Queue Packets Transmitted"); 1551 } 1552 1553 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 1554 struct rx_ring *rxr = &rx_que->rxr; 1555 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1556 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1557 CTLFLAG_RD, NULL, "Queue Name"); 1558 queue_list = SYSCTL_CHILDREN(queue_node); 1559 1560 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1561 CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i], 1562 sizeof(&adapter->rx_queues[i]), 1563 ixgbe_sysctl_interrupt_rate_handler, "IU", 1564 "Interrupt Rate"); 1565 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1566 CTLFLAG_RD, &(adapter->rx_queues[i].irqs), 1567 "irqs on this queue"); 1568 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1569 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1570 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1571 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1572 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1573 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1574 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1575 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1576 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1577 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1578 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1579 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1580 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1581 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1582 } 1583 1584 /* MAC stats get their own sub node */ 1585 1586 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1587 CTLFLAG_RD, NULL, "MAC Statistics"); 1588 stat_list = SYSCTL_CHILDREN(stat_node); 1589 1590 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1591 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1592 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1593 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1594 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1595 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1596 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1597 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1598 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1599 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1601 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1603 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1604 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1605 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1606 1607 /* Flow Control stats */ 1608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1609 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1610 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1611 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1613 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1615 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1616 1617 /* Packet Reception Stats */ 1618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1619 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1621 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1623 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1625 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1626 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1627 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1629 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1631 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1633 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1635 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1637 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1639 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1641 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1643 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1645 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1647 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1649 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1651 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1653 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1655 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1656 1657 /* Packet Transmission Stats */ 1658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1659 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1661 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1663 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1664 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1665 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1666 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1667 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1669 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1671 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1673 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1675 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1677 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1679 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1681 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1682 } /* ixgbe_add_hw_stats */ 1683 1684 /************************************************************************ 1685 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1686 * 1687 * Retrieves the TDH value from the hardware 1688 ************************************************************************/ 1689 static int 1690 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1691 { 1692 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1693 int error; 1694 unsigned int val; 1695 1696 if (!txr) 1697 return (0); 1698 1699 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 1700 error = sysctl_handle_int(oidp, &val, 0, req); 1701 if (error || !req->newptr) 1702 return error; 1703 1704 return (0); 1705 } /* ixgbe_sysctl_tdh_handler */ 1706 1707 /************************************************************************ 1708 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1709 * 1710 * Retrieves the TDT value from the hardware 1711 ************************************************************************/ 1712 static int 1713 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1714 { 1715 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1716 int error; 1717 unsigned int val; 1718 1719 if (!txr) 1720 return (0); 1721 1722 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 1723 error = sysctl_handle_int(oidp, &val, 0, req); 1724 if (error || !req->newptr) 1725 return error; 1726 1727 return (0); 1728 } /* ixgbe_sysctl_tdt_handler */ 1729 1730 /************************************************************************ 1731 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1732 * 1733 * Retrieves the RDH value from the hardware 1734 ************************************************************************/ 1735 static int 1736 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1737 { 1738 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1739 int error; 1740 unsigned int val; 1741 1742 if (!rxr) 1743 return (0); 1744 1745 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 1746 error = sysctl_handle_int(oidp, &val, 0, req); 1747 if (error || !req->newptr) 1748 return error; 1749 1750 return (0); 1751 } /* ixgbe_sysctl_rdh_handler */ 1752 1753 /************************************************************************ 1754 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1755 * 1756 * Retrieves the RDT value from the hardware 1757 ************************************************************************/ 1758 static int 1759 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1760 { 1761 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1762 int error; 1763 unsigned int val; 1764 1765 if (!rxr) 1766 return (0); 1767 1768 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 1769 error = sysctl_handle_int(oidp, &val, 0, req); 1770 if (error || !req->newptr) 1771 return error; 1772 1773 return (0); 1774 } /* ixgbe_sysctl_rdt_handler */ 1775 1776 /************************************************************************ 1777 * ixgbe_if_vlan_register 1778 * 1779 * Run via vlan config EVENT, it enables us to use the 1780 * HW Filter table since we can get the vlan id. This 1781 * just creates the entry in the soft version of the 1782 * VFTA, init will repopulate the real table. 1783 ************************************************************************/ 1784 static void 1785 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1786 { 1787 struct adapter *adapter = iflib_get_softc(ctx); 1788 u16 index, bit; 1789 1790 index = (vtag >> 5) & 0x7F; 1791 bit = vtag & 0x1F; 1792 adapter->shadow_vfta[index] |= (1 << bit); 1793 ++adapter->num_vlans; 1794 ixgbe_setup_vlan_hw_support(ctx); 1795 } /* ixgbe_if_vlan_register */ 1796 1797 /************************************************************************ 1798 * ixgbe_if_vlan_unregister 1799 * 1800 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1801 ************************************************************************/ 1802 static void 1803 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1804 { 1805 struct adapter *adapter = iflib_get_softc(ctx); 1806 u16 index, bit; 1807 1808 index = (vtag >> 5) & 0x7F; 1809 bit = vtag & 0x1F; 1810 adapter->shadow_vfta[index] &= ~(1 << bit); 1811 --adapter->num_vlans; 1812 /* Re-init to load the changes */ 1813 ixgbe_setup_vlan_hw_support(ctx); 1814 } /* ixgbe_if_vlan_unregister */ 1815 1816 /************************************************************************ 1817 * ixgbe_setup_vlan_hw_support 1818 ************************************************************************/ 1819 static void 1820 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1821 { 1822 struct ifnet *ifp = iflib_get_ifp(ctx); 1823 struct adapter *adapter = iflib_get_softc(ctx); 1824 struct ixgbe_hw *hw = &adapter->hw; 1825 struct rx_ring *rxr; 1826 int i; 1827 u32 ctrl; 1828 1829 1830 /* 1831 * We get here thru init_locked, meaning 1832 * a soft reset, this has already cleared 1833 * the VFTA and other state, so if there 1834 * have been no vlan's registered do nothing. 1835 */ 1836 if (adapter->num_vlans == 0) 1837 return; 1838 1839 /* Setup the queues for vlans */ 1840 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1841 for (i = 0; i < adapter->num_rx_queues; i++) { 1842 rxr = &adapter->rx_queues[i].rxr; 1843 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1844 if (hw->mac.type != ixgbe_mac_82598EB) { 1845 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1846 ctrl |= IXGBE_RXDCTL_VME; 1847 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1848 } 1849 rxr->vtag_strip = TRUE; 1850 } 1851 } 1852 1853 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1854 return; 1855 /* 1856 * A soft reset zero's out the VFTA, so 1857 * we need to repopulate it now. 1858 */ 1859 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1860 if (adapter->shadow_vfta[i] != 0) 1861 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1862 adapter->shadow_vfta[i]); 1863 1864 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1865 /* Enable the Filter Table if enabled */ 1866 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1867 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1868 ctrl |= IXGBE_VLNCTRL_VFE; 1869 } 1870 if (hw->mac.type == ixgbe_mac_82598EB) 1871 ctrl |= IXGBE_VLNCTRL_VME; 1872 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1873 } /* ixgbe_setup_vlan_hw_support */ 1874 1875 /************************************************************************ 1876 * ixgbe_get_slot_info 1877 * 1878 * Get the width and transaction speed of 1879 * the slot this adapter is plugged into. 1880 ************************************************************************/ 1881 static void 1882 ixgbe_get_slot_info(struct adapter *adapter) 1883 { 1884 device_t dev = iflib_get_dev(adapter->ctx); 1885 struct ixgbe_hw *hw = &adapter->hw; 1886 int bus_info_valid = TRUE; 1887 u32 offset; 1888 u16 link; 1889 1890 /* Some devices are behind an internal bridge */ 1891 switch (hw->device_id) { 1892 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1893 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1894 goto get_parent_info; 1895 default: 1896 break; 1897 } 1898 1899 ixgbe_get_bus_info(hw); 1900 1901 /* 1902 * Some devices don't use PCI-E, but there is no need 1903 * to display "Unknown" for bus speed and width. 1904 */ 1905 switch (hw->mac.type) { 1906 case ixgbe_mac_X550EM_x: 1907 case ixgbe_mac_X550EM_a: 1908 return; 1909 default: 1910 goto display; 1911 } 1912 1913 get_parent_info: 1914 /* 1915 * For the Quad port adapter we need to parse back 1916 * up the PCI tree to find the speed of the expansion 1917 * slot into which this adapter is plugged. A bit more work. 1918 */ 1919 dev = device_get_parent(device_get_parent(dev)); 1920 #ifdef IXGBE_DEBUG 1921 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 1922 pci_get_slot(dev), pci_get_function(dev)); 1923 #endif 1924 dev = device_get_parent(device_get_parent(dev)); 1925 #ifdef IXGBE_DEBUG 1926 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 1927 pci_get_slot(dev), pci_get_function(dev)); 1928 #endif 1929 /* Now get the PCI Express Capabilities offset */ 1930 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 1931 /* 1932 * Hmm...can't get PCI-Express capabilities. 1933 * Falling back to default method. 1934 */ 1935 bus_info_valid = FALSE; 1936 ixgbe_get_bus_info(hw); 1937 goto display; 1938 } 1939 /* ...and read the Link Status Register */ 1940 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 1941 ixgbe_set_pci_config_data_generic(hw, link); 1942 1943 display: 1944 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 1945 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 1946 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 1947 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 1948 "Unknown"), 1949 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 1950 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 1951 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 1952 "Unknown")); 1953 1954 if (bus_info_valid) { 1955 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 1956 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 1957 (hw->bus.speed == ixgbe_bus_speed_2500))) { 1958 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1959 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 1960 } 1961 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 1962 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 1963 (hw->bus.speed < ixgbe_bus_speed_8000))) { 1964 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1965 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 1966 } 1967 } else 1968 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 1969 1970 return; 1971 } /* ixgbe_get_slot_info */ 1972 1973 /************************************************************************ 1974 * ixgbe_if_msix_intr_assign 1975 * 1976 * Setup MSI-X Interrupt resources and handlers 1977 ************************************************************************/ 1978 static int 1979 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 1980 { 1981 struct adapter *adapter = iflib_get_softc(ctx); 1982 struct ix_rx_queue *rx_que = adapter->rx_queues; 1983 struct ix_tx_queue *tx_que; 1984 int error, rid, vector = 0; 1985 int cpu_id = 0; 1986 char buf[16]; 1987 1988 /* Admin Que is vector 0*/ 1989 rid = vector + 1; 1990 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { 1991 rid = vector + 1; 1992 1993 snprintf(buf, sizeof(buf), "rxq%d", i); 1994 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1995 IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 1996 1997 if (error) { 1998 device_printf(iflib_get_dev(ctx), 1999 "Failed to allocate que int %d err: %d", i, error); 2000 adapter->num_rx_queues = i + 1; 2001 goto fail; 2002 } 2003 2004 rx_que->msix = vector; 2005 adapter->active_queues |= (u64)(1 << rx_que->msix); 2006 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 2007 /* 2008 * The queue ID is used as the RSS layer bucket ID. 2009 * We look up the queue ID -> RSS CPU ID and select 2010 * that. 2011 */ 2012 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2013 } else { 2014 /* 2015 * Bind the MSI-X vector, and thus the 2016 * rings to the corresponding cpu. 2017 * 2018 * This just happens to match the default RSS 2019 * round-robin bucket -> queue -> CPU allocation. 2020 */ 2021 if (adapter->num_rx_queues > 1) 2022 cpu_id = i; 2023 } 2024 2025 } 2026 for (int i = 0; i < adapter->num_tx_queues; i++) { 2027 snprintf(buf, sizeof(buf), "txq%d", i); 2028 tx_que = &adapter->tx_queues[i]; 2029 tx_que->msix = i % adapter->num_rx_queues; 2030 iflib_softirq_alloc_generic(ctx, 2031 &adapter->rx_queues[tx_que->msix].que_irq, 2032 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2033 } 2034 rid = vector + 1; 2035 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, 2036 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq"); 2037 if (error) { 2038 device_printf(iflib_get_dev(ctx), 2039 "Failed to register admin handler"); 2040 return (error); 2041 } 2042 2043 adapter->vector = vector; 2044 2045 return (0); 2046 fail: 2047 iflib_irq_free(ctx, &adapter->irq); 2048 rx_que = adapter->rx_queues; 2049 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) 2050 iflib_irq_free(ctx, &rx_que->que_irq); 2051 2052 return (error); 2053 } /* ixgbe_if_msix_intr_assign */ 2054 2055 /********************************************************************* 2056 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2057 **********************************************************************/ 2058 static int 2059 ixgbe_msix_que(void *arg) 2060 { 2061 struct ix_rx_queue *que = arg; 2062 struct adapter *adapter = que->adapter; 2063 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx); 2064 2065 /* Protect against spurious interrupts */ 2066 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2067 return (FILTER_HANDLED); 2068 2069 ixgbe_disable_queue(adapter, que->msix); 2070 ++que->irqs; 2071 2072 return (FILTER_SCHEDULE_THREAD); 2073 } /* ixgbe_msix_que */ 2074 2075 /************************************************************************ 2076 * ixgbe_media_status - Media Ioctl callback 2077 * 2078 * Called whenever the user queries the status of 2079 * the interface using ifconfig. 2080 ************************************************************************/ 2081 static void 2082 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2083 { 2084 struct adapter *adapter = iflib_get_softc(ctx); 2085 struct ixgbe_hw *hw = &adapter->hw; 2086 int layer; 2087 2088 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2089 2090 ifmr->ifm_status = IFM_AVALID; 2091 ifmr->ifm_active = IFM_ETHER; 2092 2093 if (!adapter->link_active) 2094 return; 2095 2096 ifmr->ifm_status |= IFM_ACTIVE; 2097 layer = adapter->phy_layer; 2098 2099 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2100 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2101 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2102 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2103 switch (adapter->link_speed) { 2104 case IXGBE_LINK_SPEED_10GB_FULL: 2105 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2106 break; 2107 case IXGBE_LINK_SPEED_1GB_FULL: 2108 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2109 break; 2110 case IXGBE_LINK_SPEED_100_FULL: 2111 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2112 break; 2113 case IXGBE_LINK_SPEED_10_FULL: 2114 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2115 break; 2116 } 2117 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2118 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2119 switch (adapter->link_speed) { 2120 case IXGBE_LINK_SPEED_10GB_FULL: 2121 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2122 break; 2123 } 2124 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2125 switch (adapter->link_speed) { 2126 case IXGBE_LINK_SPEED_10GB_FULL: 2127 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2128 break; 2129 case IXGBE_LINK_SPEED_1GB_FULL: 2130 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2131 break; 2132 } 2133 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2134 switch (adapter->link_speed) { 2135 case IXGBE_LINK_SPEED_10GB_FULL: 2136 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2137 break; 2138 case IXGBE_LINK_SPEED_1GB_FULL: 2139 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2140 break; 2141 } 2142 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2143 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2144 switch (adapter->link_speed) { 2145 case IXGBE_LINK_SPEED_10GB_FULL: 2146 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2147 break; 2148 case IXGBE_LINK_SPEED_1GB_FULL: 2149 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2150 break; 2151 } 2152 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2153 switch (adapter->link_speed) { 2154 case IXGBE_LINK_SPEED_10GB_FULL: 2155 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2156 break; 2157 } 2158 /* 2159 * XXX: These need to use the proper media types once 2160 * they're added. 2161 */ 2162 #ifndef IFM_ETH_XTYPE 2163 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2164 switch (adapter->link_speed) { 2165 case IXGBE_LINK_SPEED_10GB_FULL: 2166 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2167 break; 2168 case IXGBE_LINK_SPEED_2_5GB_FULL: 2169 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2170 break; 2171 case IXGBE_LINK_SPEED_1GB_FULL: 2172 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2173 break; 2174 } 2175 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2176 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2177 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2178 switch (adapter->link_speed) { 2179 case IXGBE_LINK_SPEED_10GB_FULL: 2180 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2181 break; 2182 case IXGBE_LINK_SPEED_2_5GB_FULL: 2183 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2184 break; 2185 case IXGBE_LINK_SPEED_1GB_FULL: 2186 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2187 break; 2188 } 2189 #else 2190 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2191 switch (adapter->link_speed) { 2192 case IXGBE_LINK_SPEED_10GB_FULL: 2193 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2194 break; 2195 case IXGBE_LINK_SPEED_2_5GB_FULL: 2196 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2197 break; 2198 case IXGBE_LINK_SPEED_1GB_FULL: 2199 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2200 break; 2201 } 2202 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2203 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2204 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2205 switch (adapter->link_speed) { 2206 case IXGBE_LINK_SPEED_10GB_FULL: 2207 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2208 break; 2209 case IXGBE_LINK_SPEED_2_5GB_FULL: 2210 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2211 break; 2212 case IXGBE_LINK_SPEED_1GB_FULL: 2213 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2214 break; 2215 } 2216 #endif 2217 2218 /* If nothing is recognized... */ 2219 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2220 ifmr->ifm_active |= IFM_UNKNOWN; 2221 2222 /* Display current flow control setting used on link */ 2223 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2224 hw->fc.current_mode == ixgbe_fc_full) 2225 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2226 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2227 hw->fc.current_mode == ixgbe_fc_full) 2228 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2229 } /* ixgbe_media_status */ 2230 2231 /************************************************************************ 2232 * ixgbe_media_change - Media Ioctl callback 2233 * 2234 * Called when the user changes speed/duplex using 2235 * media/mediopt option with ifconfig. 2236 ************************************************************************/ 2237 static int 2238 ixgbe_if_media_change(if_ctx_t ctx) 2239 { 2240 struct adapter *adapter = iflib_get_softc(ctx); 2241 struct ifmedia *ifm = iflib_get_media(ctx); 2242 struct ixgbe_hw *hw = &adapter->hw; 2243 ixgbe_link_speed speed = 0; 2244 2245 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2246 2247 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2248 return (EINVAL); 2249 2250 if (hw->phy.media_type == ixgbe_media_type_backplane) 2251 return (EPERM); 2252 2253 /* 2254 * We don't actually need to check against the supported 2255 * media types of the adapter; ifmedia will take care of 2256 * that for us. 2257 */ 2258 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2259 case IFM_AUTO: 2260 case IFM_10G_T: 2261 speed |= IXGBE_LINK_SPEED_100_FULL; 2262 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2263 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2264 break; 2265 case IFM_10G_LRM: 2266 case IFM_10G_LR: 2267 #ifndef IFM_ETH_XTYPE 2268 case IFM_10G_SR: /* KR, too */ 2269 case IFM_10G_CX4: /* KX4 */ 2270 #else 2271 case IFM_10G_KR: 2272 case IFM_10G_KX4: 2273 #endif 2274 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2275 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2276 break; 2277 #ifndef IFM_ETH_XTYPE 2278 case IFM_1000_CX: /* KX */ 2279 #else 2280 case IFM_1000_KX: 2281 #endif 2282 case IFM_1000_LX: 2283 case IFM_1000_SX: 2284 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2285 break; 2286 case IFM_1000_T: 2287 speed |= IXGBE_LINK_SPEED_100_FULL; 2288 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2289 break; 2290 case IFM_10G_TWINAX: 2291 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2292 break; 2293 case IFM_100_TX: 2294 speed |= IXGBE_LINK_SPEED_100_FULL; 2295 break; 2296 case IFM_10_T: 2297 speed |= IXGBE_LINK_SPEED_10_FULL; 2298 break; 2299 default: 2300 goto invalid; 2301 } 2302 2303 hw->mac.autotry_restart = TRUE; 2304 hw->mac.ops.setup_link(hw, speed, TRUE); 2305 adapter->advertise = 2306 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 2307 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 2308 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 2309 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 2310 2311 return (0); 2312 2313 invalid: 2314 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2315 2316 return (EINVAL); 2317 } /* ixgbe_if_media_change */ 2318 2319 /************************************************************************ 2320 * ixgbe_set_promisc 2321 ************************************************************************/ 2322 static int 2323 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2324 { 2325 struct adapter *adapter = iflib_get_softc(ctx); 2326 struct ifnet *ifp = iflib_get_ifp(ctx); 2327 u32 rctl; 2328 int mcnt = 0; 2329 2330 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2331 rctl &= (~IXGBE_FCTRL_UPE); 2332 if (ifp->if_flags & IFF_ALLMULTI) 2333 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2334 else { 2335 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES); 2336 } 2337 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2338 rctl &= (~IXGBE_FCTRL_MPE); 2339 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2340 2341 if (ifp->if_flags & IFF_PROMISC) { 2342 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2343 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2344 } else if (ifp->if_flags & IFF_ALLMULTI) { 2345 rctl |= IXGBE_FCTRL_MPE; 2346 rctl &= ~IXGBE_FCTRL_UPE; 2347 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2348 } 2349 return (0); 2350 } /* ixgbe_if_promisc_set */ 2351 2352 /************************************************************************ 2353 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2354 ************************************************************************/ 2355 static int 2356 ixgbe_msix_link(void *arg) 2357 { 2358 struct adapter *adapter = arg; 2359 struct ixgbe_hw *hw = &adapter->hw; 2360 u32 eicr, eicr_mask; 2361 s32 retval; 2362 2363 ++adapter->link_irq; 2364 2365 /* Pause other interrupts */ 2366 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2367 2368 /* First get the cause */ 2369 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2370 /* Be sure the queue bits are not cleared */ 2371 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2372 /* Clear interrupt with write */ 2373 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2374 2375 /* Link status change */ 2376 if (eicr & IXGBE_EICR_LSC) { 2377 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2378 adapter->task_requests |= IXGBE_REQUEST_TASK_LSC; 2379 } 2380 2381 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 2382 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 2383 (eicr & IXGBE_EICR_FLOW_DIR)) { 2384 /* This is probably overkill :) */ 2385 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 2386 return (FILTER_HANDLED); 2387 /* Disable the interrupt */ 2388 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2389 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2390 } else 2391 if (eicr & IXGBE_EICR_ECC) { 2392 device_printf(iflib_get_dev(adapter->ctx), 2393 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); 2394 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2395 } 2396 2397 /* Check for over temp condition */ 2398 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2399 switch (adapter->hw.mac.type) { 2400 case ixgbe_mac_X550EM_a: 2401 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2402 break; 2403 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2404 IXGBE_EICR_GPI_SDP0_X550EM_a); 2405 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2406 IXGBE_EICR_GPI_SDP0_X550EM_a); 2407 retval = hw->phy.ops.check_overtemp(hw); 2408 if (retval != IXGBE_ERR_OVERTEMP) 2409 break; 2410 device_printf(iflib_get_dev(adapter->ctx), 2411 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2412 device_printf(iflib_get_dev(adapter->ctx), 2413 "System shutdown required!\n"); 2414 break; 2415 default: 2416 if (!(eicr & IXGBE_EICR_TS)) 2417 break; 2418 retval = hw->phy.ops.check_overtemp(hw); 2419 if (retval != IXGBE_ERR_OVERTEMP) 2420 break; 2421 device_printf(iflib_get_dev(adapter->ctx), 2422 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2423 device_printf(iflib_get_dev(adapter->ctx), 2424 "System shutdown required!\n"); 2425 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2426 break; 2427 } 2428 } 2429 2430 /* Check for VF message */ 2431 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 2432 (eicr & IXGBE_EICR_MAILBOX)) 2433 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX; 2434 } 2435 2436 if (ixgbe_is_sfp(hw)) { 2437 /* Pluggable optics-related interrupt */ 2438 if (hw->mac.type >= ixgbe_mac_X540) 2439 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2440 else 2441 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2442 2443 if (eicr & eicr_mask) { 2444 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2445 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 2446 } 2447 2448 if ((hw->mac.type == ixgbe_mac_82599EB) && 2449 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2450 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2451 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2452 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 2453 } 2454 } 2455 2456 /* Check for fan failure */ 2457 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2458 ixgbe_check_fan_failure(adapter, eicr, TRUE); 2459 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2460 } 2461 2462 /* External PHY interrupt */ 2463 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2464 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2465 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2466 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; 2467 } 2468 2469 return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2470 } /* ixgbe_msix_link */ 2471 2472 /************************************************************************ 2473 * ixgbe_sysctl_interrupt_rate_handler 2474 ************************************************************************/ 2475 static int 2476 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2477 { 2478 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2479 int error; 2480 unsigned int reg, usec, rate; 2481 2482 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 2483 usec = ((reg & 0x0FF8) >> 3); 2484 if (usec > 0) 2485 rate = 500000 / usec; 2486 else 2487 rate = 0; 2488 error = sysctl_handle_int(oidp, &rate, 0, req); 2489 if (error || !req->newptr) 2490 return error; 2491 reg &= ~0xfff; /* default, no limitation */ 2492 ixgbe_max_interrupt_rate = 0; 2493 if (rate > 0 && rate < 500000) { 2494 if (rate < 1000) 2495 rate = 1000; 2496 ixgbe_max_interrupt_rate = rate; 2497 reg |= ((4000000/rate) & 0xff8); 2498 } 2499 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 2500 2501 return (0); 2502 } /* ixgbe_sysctl_interrupt_rate_handler */ 2503 2504 /************************************************************************ 2505 * ixgbe_add_device_sysctls 2506 ************************************************************************/ 2507 static void 2508 ixgbe_add_device_sysctls(if_ctx_t ctx) 2509 { 2510 struct adapter *adapter = iflib_get_softc(ctx); 2511 device_t dev = iflib_get_dev(ctx); 2512 struct ixgbe_hw *hw = &adapter->hw; 2513 struct sysctl_oid_list *child; 2514 struct sysctl_ctx_list *ctx_list; 2515 2516 ctx_list = device_get_sysctl_ctx(dev); 2517 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2518 2519 /* Sysctls for all devices */ 2520 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2521 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I", 2522 IXGBE_SYSCTL_DESC_SET_FC); 2523 2524 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2525 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I", 2526 IXGBE_SYSCTL_DESC_ADV_SPEED); 2527 2528 #ifdef IXGBE_DEBUG 2529 /* testing sysctls (for all devices) */ 2530 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2531 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state, 2532 "I", "PCI Power State"); 2533 2534 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2535 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, 2536 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2537 #endif 2538 /* for X550 series devices */ 2539 if (hw->mac.type >= ixgbe_mac_X550) 2540 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2541 CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac, 2542 "I", "DMA Coalesce"); 2543 2544 /* for WoL-capable devices */ 2545 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2546 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2547 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2548 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2549 2550 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2551 CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc, 2552 "I", "Enable/Disable Wake Up Filters"); 2553 } 2554 2555 /* for X552/X557-AT devices */ 2556 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2557 struct sysctl_oid *phy_node; 2558 struct sysctl_oid_list *phy_list; 2559 2560 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2561 CTLFLAG_RD, NULL, "External PHY sysctls"); 2562 phy_list = SYSCTL_CHILDREN(phy_node); 2563 2564 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2565 CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp, 2566 "I", "Current External PHY Temperature (Celsius)"); 2567 2568 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2569 "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, 2570 ixgbe_sysctl_phy_overtemp_occurred, "I", 2571 "External PHY High Temperature Event Occurred"); 2572 } 2573 2574 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 2575 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2576 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2577 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2578 } 2579 } /* ixgbe_add_device_sysctls */ 2580 2581 /************************************************************************ 2582 * ixgbe_allocate_pci_resources 2583 ************************************************************************/ 2584 static int 2585 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2586 { 2587 struct adapter *adapter = iflib_get_softc(ctx); 2588 device_t dev = iflib_get_dev(ctx); 2589 int rid; 2590 2591 rid = PCIR_BAR(0); 2592 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2593 RF_ACTIVE); 2594 2595 if (!(adapter->pci_mem)) { 2596 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2597 return (ENXIO); 2598 } 2599 2600 /* Save bus_space values for READ/WRITE_REG macros */ 2601 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); 2602 adapter->osdep.mem_bus_space_handle = 2603 rman_get_bushandle(adapter->pci_mem); 2604 /* Set hw values for shared code */ 2605 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2606 2607 return (0); 2608 } /* ixgbe_allocate_pci_resources */ 2609 2610 /************************************************************************ 2611 * ixgbe_detach - Device removal routine 2612 * 2613 * Called when the driver is being removed. 2614 * Stops the adapter and deallocates all the resources 2615 * that were allocated for driver operation. 2616 * 2617 * return 0 on success, positive on failure 2618 ************************************************************************/ 2619 static int 2620 ixgbe_if_detach(if_ctx_t ctx) 2621 { 2622 struct adapter *adapter = iflib_get_softc(ctx); 2623 device_t dev = iflib_get_dev(ctx); 2624 u32 ctrl_ext; 2625 2626 INIT_DEBUGOUT("ixgbe_detach: begin"); 2627 2628 if (ixgbe_pci_iov_detach(dev) != 0) { 2629 device_printf(dev, "SR-IOV in use; detach first.\n"); 2630 return (EBUSY); 2631 } 2632 2633 ixgbe_setup_low_power_mode(ctx); 2634 2635 /* let hardware know driver is unloading */ 2636 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 2637 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2638 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 2639 2640 ixgbe_free_pci_resources(ctx); 2641 free(adapter->mta, M_IXGBE); 2642 2643 return (0); 2644 } /* ixgbe_if_detach */ 2645 2646 /************************************************************************ 2647 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2648 * 2649 * Prepare the adapter/port for LPLU and/or WoL 2650 ************************************************************************/ 2651 static int 2652 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2653 { 2654 struct adapter *adapter = iflib_get_softc(ctx); 2655 struct ixgbe_hw *hw = &adapter->hw; 2656 device_t dev = iflib_get_dev(ctx); 2657 s32 error = 0; 2658 2659 if (!hw->wol_enabled) 2660 ixgbe_set_phy_power(hw, FALSE); 2661 2662 /* Limit power management flow to X550EM baseT */ 2663 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2664 hw->phy.ops.enter_lplu) { 2665 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2666 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2667 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2668 2669 /* 2670 * Clear Wake Up Status register to prevent any previous wakeup 2671 * events from waking us up immediately after we suspend. 2672 */ 2673 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2674 2675 /* 2676 * Program the Wakeup Filter Control register with user filter 2677 * settings 2678 */ 2679 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 2680 2681 /* Enable wakeups and power management in Wakeup Control */ 2682 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2683 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2684 2685 /* X550EM baseT adapters need a special LPLU flow */ 2686 hw->phy.reset_disable = TRUE; 2687 ixgbe_if_stop(ctx); 2688 error = hw->phy.ops.enter_lplu(hw); 2689 if (error) 2690 device_printf(dev, "Error entering LPLU: %d\n", error); 2691 hw->phy.reset_disable = FALSE; 2692 } else { 2693 /* Just stop for other adapters */ 2694 ixgbe_if_stop(ctx); 2695 } 2696 2697 return error; 2698 } /* ixgbe_setup_low_power_mode */ 2699 2700 /************************************************************************ 2701 * ixgbe_shutdown - Shutdown entry point 2702 ************************************************************************/ 2703 static int 2704 ixgbe_if_shutdown(if_ctx_t ctx) 2705 { 2706 int error = 0; 2707 2708 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2709 2710 error = ixgbe_setup_low_power_mode(ctx); 2711 2712 return (error); 2713 } /* ixgbe_if_shutdown */ 2714 2715 /************************************************************************ 2716 * ixgbe_suspend 2717 * 2718 * From D0 to D3 2719 ************************************************************************/ 2720 static int 2721 ixgbe_if_suspend(if_ctx_t ctx) 2722 { 2723 int error = 0; 2724 2725 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2726 2727 error = ixgbe_setup_low_power_mode(ctx); 2728 2729 return (error); 2730 } /* ixgbe_if_suspend */ 2731 2732 /************************************************************************ 2733 * ixgbe_resume 2734 * 2735 * From D3 to D0 2736 ************************************************************************/ 2737 static int 2738 ixgbe_if_resume(if_ctx_t ctx) 2739 { 2740 struct adapter *adapter = iflib_get_softc(ctx); 2741 device_t dev = iflib_get_dev(ctx); 2742 struct ifnet *ifp = iflib_get_ifp(ctx); 2743 struct ixgbe_hw *hw = &adapter->hw; 2744 u32 wus; 2745 2746 INIT_DEBUGOUT("ixgbe_resume: begin"); 2747 2748 /* Read & clear WUS register */ 2749 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2750 if (wus) 2751 device_printf(dev, "Woken up by (WUS): %#010x\n", 2752 IXGBE_READ_REG(hw, IXGBE_WUS)); 2753 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2754 /* And clear WUFC until next low-power transition */ 2755 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2756 2757 /* 2758 * Required after D3->D0 transition; 2759 * will re-advertise all previous advertised speeds 2760 */ 2761 if (ifp->if_flags & IFF_UP) 2762 ixgbe_if_init(ctx); 2763 2764 return (0); 2765 } /* ixgbe_if_resume */ 2766 2767 /************************************************************************ 2768 * ixgbe_if_mtu_set - Ioctl mtu entry point 2769 * 2770 * Return 0 on success, EINVAL on failure 2771 ************************************************************************/ 2772 static int 2773 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2774 { 2775 struct adapter *adapter = iflib_get_softc(ctx); 2776 int error = 0; 2777 2778 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2779 2780 if (mtu > IXGBE_MAX_MTU) { 2781 error = EINVAL; 2782 } else { 2783 adapter->max_frame_size = mtu + IXGBE_MTU_HDR; 2784 } 2785 2786 return error; 2787 } /* ixgbe_if_mtu_set */ 2788 2789 /************************************************************************ 2790 * ixgbe_if_crcstrip_set 2791 ************************************************************************/ 2792 static void 2793 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2794 { 2795 struct adapter *sc = iflib_get_softc(ctx); 2796 struct ixgbe_hw *hw = &sc->hw; 2797 /* crc stripping is set in two places: 2798 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2799 * IXGBE_RDRXCTL (set by the original driver in 2800 * ixgbe_setup_hw_rsc() called in init_locked. 2801 * We disable the setting when netmap is compiled in). 2802 * We update the values here, but also in ixgbe.c because 2803 * init_locked sometimes is called outside our control. 2804 */ 2805 uint32_t hl, rxc; 2806 2807 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2808 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2809 #ifdef NETMAP 2810 if (netmap_verbose) 2811 D("%s read HLREG 0x%x rxc 0x%x", 2812 onoff ? "enter" : "exit", hl, rxc); 2813 #endif 2814 /* hw requirements ... */ 2815 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2816 rxc |= IXGBE_RDRXCTL_RSCACKC; 2817 if (onoff && !crcstrip) { 2818 /* keep the crc. Fast rx */ 2819 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 2820 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 2821 } else { 2822 /* reset default mode */ 2823 hl |= IXGBE_HLREG0_RXCRCSTRP; 2824 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 2825 } 2826 #ifdef NETMAP 2827 if (netmap_verbose) 2828 D("%s write HLREG 0x%x rxc 0x%x", 2829 onoff ? "enter" : "exit", hl, rxc); 2830 #endif 2831 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 2832 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 2833 } /* ixgbe_if_crcstrip_set */ 2834 2835 /********************************************************************* 2836 * ixgbe_if_init - Init entry point 2837 * 2838 * Used in two ways: It is used by the stack as an init 2839 * entry point in network interface structure. It is also 2840 * used by the driver as a hw/sw initialization routine to 2841 * get to a consistent state. 2842 * 2843 * Return 0 on success, positive on failure 2844 **********************************************************************/ 2845 void 2846 ixgbe_if_init(if_ctx_t ctx) 2847 { 2848 struct adapter *adapter = iflib_get_softc(ctx); 2849 struct ifnet *ifp = iflib_get_ifp(ctx); 2850 device_t dev = iflib_get_dev(ctx); 2851 struct ixgbe_hw *hw = &adapter->hw; 2852 struct ix_rx_queue *rx_que; 2853 struct ix_tx_queue *tx_que; 2854 u32 txdctl, mhadd; 2855 u32 rxdctl, rxctrl; 2856 u32 ctrl_ext; 2857 2858 int i, j, err; 2859 2860 INIT_DEBUGOUT("ixgbe_if_init: begin"); 2861 2862 /* Queue indices may change with IOV mode */ 2863 ixgbe_align_all_queue_indices(adapter); 2864 2865 /* reprogram the RAR[0] in case user changed it. */ 2866 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 2867 2868 /* Get the latest mac address, User can use a LAA */ 2869 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 2870 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 2871 hw->addr_ctrl.rar_used_count = 1; 2872 2873 ixgbe_init_hw(hw); 2874 2875 ixgbe_initialize_iov(adapter); 2876 2877 ixgbe_initialize_transmit_units(ctx); 2878 2879 /* Setup Multicast table */ 2880 ixgbe_if_multi_set(ctx); 2881 2882 /* Determine the correct mbuf pool, based on frame size */ 2883 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 2884 2885 /* Configure RX settings */ 2886 ixgbe_initialize_receive_units(ctx); 2887 2888 /* 2889 * Initialize variable holding task enqueue requests 2890 * from MSI-X interrupts 2891 */ 2892 adapter->task_requests = 0; 2893 2894 /* Enable SDP & MSI-X interrupts based on adapter */ 2895 ixgbe_config_gpie(adapter); 2896 2897 /* Set MTU size */ 2898 if (ifp->if_mtu > ETHERMTU) { 2899 /* aka IXGBE_MAXFRS on 82599 and newer */ 2900 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2901 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2902 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 2903 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 2904 } 2905 2906 /* Now enable all the queues */ 2907 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 2908 struct tx_ring *txr = &tx_que->txr; 2909 2910 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 2911 txdctl |= IXGBE_TXDCTL_ENABLE; 2912 /* Set WTHRESH to 8, burst writeback */ 2913 txdctl |= (8 << 16); 2914 /* 2915 * When the internal queue falls below PTHRESH (32), 2916 * start prefetching as long as there are at least 2917 * HTHRESH (1) buffers ready. The values are taken 2918 * from the Intel linux driver 3.8.21. 2919 * Prefetching enables tx line rate even with 1 queue. 2920 */ 2921 txdctl |= (32 << 0) | (1 << 8); 2922 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 2923 } 2924 2925 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 2926 struct rx_ring *rxr = &rx_que->rxr; 2927 2928 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2929 if (hw->mac.type == ixgbe_mac_82598EB) { 2930 /* 2931 * PTHRESH = 21 2932 * HTHRESH = 4 2933 * WTHRESH = 8 2934 */ 2935 rxdctl &= ~0x3FFFFF; 2936 rxdctl |= 0x080420; 2937 } 2938 rxdctl |= IXGBE_RXDCTL_ENABLE; 2939 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 2940 for (j = 0; j < 10; j++) { 2941 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 2942 IXGBE_RXDCTL_ENABLE) 2943 break; 2944 else 2945 msec_delay(1); 2946 } 2947 wmb(); 2948 } 2949 2950 /* Enable Receive engine */ 2951 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2952 if (hw->mac.type == ixgbe_mac_82598EB) 2953 rxctrl |= IXGBE_RXCTRL_DMBYPS; 2954 rxctrl |= IXGBE_RXCTRL_RXEN; 2955 ixgbe_enable_rx_dma(hw, rxctrl); 2956 2957 /* Set up MSI/MSI-X routing */ 2958 if (ixgbe_enable_msix) { 2959 ixgbe_configure_ivars(adapter); 2960 /* Set up auto-mask */ 2961 if (hw->mac.type == ixgbe_mac_82598EB) 2962 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2963 else { 2964 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 2965 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 2966 } 2967 } else { /* Simple settings for Legacy/MSI */ 2968 ixgbe_set_ivar(adapter, 0, 0, 0); 2969 ixgbe_set_ivar(adapter, 0, 0, 1); 2970 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2971 } 2972 2973 ixgbe_init_fdir(adapter); 2974 2975 /* 2976 * Check on any SFP devices that 2977 * need to be kick-started 2978 */ 2979 if (hw->phy.type == ixgbe_phy_none) { 2980 err = hw->phy.ops.identify(hw); 2981 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 2982 device_printf(dev, 2983 "Unsupported SFP+ module type was detected.\n"); 2984 return; 2985 } 2986 } 2987 2988 /* Set moderation on the Link interrupt */ 2989 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 2990 2991 /* Enable power to the phy. */ 2992 ixgbe_set_phy_power(hw, TRUE); 2993 2994 /* Config/Enable Link */ 2995 ixgbe_config_link(ctx); 2996 2997 /* Hardware Packet Buffer & Flow Control setup */ 2998 ixgbe_config_delay_values(adapter); 2999 3000 /* Initialize the FC settings */ 3001 ixgbe_start_hw(hw); 3002 3003 /* Set up VLAN support and filter */ 3004 ixgbe_setup_vlan_hw_support(ctx); 3005 3006 /* Setup DMA Coalescing */ 3007 ixgbe_config_dmac(adapter); 3008 3009 /* And now turn on interrupts */ 3010 ixgbe_if_enable_intr(ctx); 3011 3012 /* Enable the use of the MBX by the VF's */ 3013 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 3014 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3015 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3016 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3017 } 3018 3019 } /* ixgbe_init_locked */ 3020 3021 /************************************************************************ 3022 * ixgbe_set_ivar 3023 * 3024 * Setup the correct IVAR register for a particular MSI-X interrupt 3025 * (yes this is all very magic and confusing :) 3026 * - entry is the register array entry 3027 * - vector is the MSI-X vector for this queue 3028 * - type is RX/TX/MISC 3029 ************************************************************************/ 3030 static void 3031 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3032 { 3033 struct ixgbe_hw *hw = &adapter->hw; 3034 u32 ivar, index; 3035 3036 vector |= IXGBE_IVAR_ALLOC_VAL; 3037 3038 switch (hw->mac.type) { 3039 case ixgbe_mac_82598EB: 3040 if (type == -1) 3041 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3042 else 3043 entry += (type * 64); 3044 index = (entry >> 2) & 0x1F; 3045 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3046 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3047 ivar |= (vector << (8 * (entry & 0x3))); 3048 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3049 break; 3050 case ixgbe_mac_82599EB: 3051 case ixgbe_mac_X540: 3052 case ixgbe_mac_X550: 3053 case ixgbe_mac_X550EM_x: 3054 case ixgbe_mac_X550EM_a: 3055 if (type == -1) { /* MISC IVAR */ 3056 index = (entry & 1) * 8; 3057 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3058 ivar &= ~(0xFF << index); 3059 ivar |= (vector << index); 3060 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3061 } else { /* RX/TX IVARS */ 3062 index = (16 * (entry & 1)) + (8 * type); 3063 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3064 ivar &= ~(0xFF << index); 3065 ivar |= (vector << index); 3066 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3067 } 3068 default: 3069 break; 3070 } 3071 } /* ixgbe_set_ivar */ 3072 3073 /************************************************************************ 3074 * ixgbe_configure_ivars 3075 ************************************************************************/ 3076 static void 3077 ixgbe_configure_ivars(struct adapter *adapter) 3078 { 3079 struct ix_rx_queue *rx_que = adapter->rx_queues; 3080 struct ix_tx_queue *tx_que = adapter->tx_queues; 3081 u32 newitr; 3082 3083 if (ixgbe_max_interrupt_rate > 0) 3084 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3085 else { 3086 /* 3087 * Disable DMA coalescing if interrupt moderation is 3088 * disabled. 3089 */ 3090 adapter->dmac = 0; 3091 newitr = 0; 3092 } 3093 3094 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { 3095 struct rx_ring *rxr = &rx_que->rxr; 3096 3097 /* First the RX queue entry */ 3098 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0); 3099 3100 /* Set an Initial EITR value */ 3101 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr); 3102 } 3103 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 3104 struct tx_ring *txr = &tx_que->txr; 3105 3106 /* ... and the TX */ 3107 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1); 3108 } 3109 /* For the Link interrupt */ 3110 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3111 } /* ixgbe_configure_ivars */ 3112 3113 /************************************************************************ 3114 * ixgbe_config_gpie 3115 ************************************************************************/ 3116 static void 3117 ixgbe_config_gpie(struct adapter *adapter) 3118 { 3119 struct ixgbe_hw *hw = &adapter->hw; 3120 u32 gpie; 3121 3122 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3123 3124 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3125 /* Enable Enhanced MSI-X mode */ 3126 gpie |= IXGBE_GPIE_MSIX_MODE 3127 | IXGBE_GPIE_EIAME 3128 | IXGBE_GPIE_PBA_SUPPORT 3129 | IXGBE_GPIE_OCD; 3130 } 3131 3132 /* Fan Failure Interrupt */ 3133 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3134 gpie |= IXGBE_SDP1_GPIEN; 3135 3136 /* Thermal Sensor Interrupt */ 3137 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3138 gpie |= IXGBE_SDP0_GPIEN_X540; 3139 3140 /* Link detection */ 3141 switch (hw->mac.type) { 3142 case ixgbe_mac_82599EB: 3143 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3144 break; 3145 case ixgbe_mac_X550EM_x: 3146 case ixgbe_mac_X550EM_a: 3147 gpie |= IXGBE_SDP0_GPIEN_X540; 3148 break; 3149 default: 3150 break; 3151 } 3152 3153 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3154 3155 } /* ixgbe_config_gpie */ 3156 3157 /************************************************************************ 3158 * ixgbe_config_delay_values 3159 * 3160 * Requires adapter->max_frame_size to be set. 3161 ************************************************************************/ 3162 static void 3163 ixgbe_config_delay_values(struct adapter *adapter) 3164 { 3165 struct ixgbe_hw *hw = &adapter->hw; 3166 u32 rxpb, frame, size, tmp; 3167 3168 frame = adapter->max_frame_size; 3169 3170 /* Calculate High Water */ 3171 switch (hw->mac.type) { 3172 case ixgbe_mac_X540: 3173 case ixgbe_mac_X550: 3174 case ixgbe_mac_X550EM_x: 3175 case ixgbe_mac_X550EM_a: 3176 tmp = IXGBE_DV_X540(frame, frame); 3177 break; 3178 default: 3179 tmp = IXGBE_DV(frame, frame); 3180 break; 3181 } 3182 size = IXGBE_BT2KB(tmp); 3183 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3184 hw->fc.high_water[0] = rxpb - size; 3185 3186 /* Now calculate Low Water */ 3187 switch (hw->mac.type) { 3188 case ixgbe_mac_X540: 3189 case ixgbe_mac_X550: 3190 case ixgbe_mac_X550EM_x: 3191 case ixgbe_mac_X550EM_a: 3192 tmp = IXGBE_LOW_DV_X540(frame); 3193 break; 3194 default: 3195 tmp = IXGBE_LOW_DV(frame); 3196 break; 3197 } 3198 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3199 3200 hw->fc.pause_time = IXGBE_FC_PAUSE; 3201 hw->fc.send_xon = TRUE; 3202 } /* ixgbe_config_delay_values */ 3203 3204 /************************************************************************ 3205 * ixgbe_set_multi - Multicast Update 3206 * 3207 * Called whenever multicast address list is updated. 3208 ************************************************************************/ 3209 static int 3210 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count) 3211 { 3212 struct adapter *adapter = arg; 3213 struct ixgbe_mc_addr *mta = adapter->mta; 3214 3215 if (ifma->ifma_addr->sa_family != AF_LINK) 3216 return (0); 3217 if (count == MAX_NUM_MULTICAST_ADDRESSES) 3218 return (0); 3219 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 3220 mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3221 mta[count].vmdq = adapter->pool; 3222 3223 return (1); 3224 } /* ixgbe_mc_filter_apply */ 3225 3226 static void 3227 ixgbe_if_multi_set(if_ctx_t ctx) 3228 { 3229 struct adapter *adapter = iflib_get_softc(ctx); 3230 struct ixgbe_mc_addr *mta; 3231 struct ifnet *ifp = iflib_get_ifp(ctx); 3232 u8 *update_ptr; 3233 int mcnt = 0; 3234 u32 fctrl; 3235 3236 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3237 3238 mta = adapter->mta; 3239 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3240 3241 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter); 3242 3243 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3244 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3245 if (ifp->if_flags & IFF_PROMISC) 3246 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3247 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3248 ifp->if_flags & IFF_ALLMULTI) { 3249 fctrl |= IXGBE_FCTRL_MPE; 3250 fctrl &= ~IXGBE_FCTRL_UPE; 3251 } else 3252 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3253 3254 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 3255 3256 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3257 update_ptr = (u8 *)mta; 3258 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 3259 ixgbe_mc_array_itr, TRUE); 3260 } 3261 3262 } /* ixgbe_if_multi_set */ 3263 3264 /************************************************************************ 3265 * ixgbe_mc_array_itr 3266 * 3267 * An iterator function needed by the multicast shared code. 3268 * It feeds the shared code routine the addresses in the 3269 * array of ixgbe_set_multi() one by one. 3270 ************************************************************************/ 3271 static u8 * 3272 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3273 { 3274 struct ixgbe_mc_addr *mta; 3275 3276 mta = (struct ixgbe_mc_addr *)*update_ptr; 3277 *vmdq = mta->vmdq; 3278 3279 *update_ptr = (u8*)(mta + 1); 3280 3281 return (mta->addr); 3282 } /* ixgbe_mc_array_itr */ 3283 3284 /************************************************************************ 3285 * ixgbe_local_timer - Timer routine 3286 * 3287 * Checks for link status, updates statistics, 3288 * and runs the watchdog check. 3289 ************************************************************************/ 3290 static void 3291 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3292 { 3293 struct adapter *adapter = iflib_get_softc(ctx); 3294 3295 if (qid != 0) 3296 return; 3297 3298 /* Check for pluggable optics */ 3299 if (adapter->sfp_probe) 3300 if (!ixgbe_sfp_probe(ctx)) 3301 return; /* Nothing to do */ 3302 3303 ixgbe_check_link(&adapter->hw, &adapter->link_speed, 3304 &adapter->link_up, 0); 3305 3306 /* Fire off the adminq task */ 3307 iflib_admin_intr_deferred(ctx); 3308 3309 } /* ixgbe_if_timer */ 3310 3311 /************************************************************************ 3312 * ixgbe_sfp_probe 3313 * 3314 * Determine if a port had optics inserted. 3315 ************************************************************************/ 3316 static bool 3317 ixgbe_sfp_probe(if_ctx_t ctx) 3318 { 3319 struct adapter *adapter = iflib_get_softc(ctx); 3320 struct ixgbe_hw *hw = &adapter->hw; 3321 device_t dev = iflib_get_dev(ctx); 3322 bool result = FALSE; 3323 3324 if ((hw->phy.type == ixgbe_phy_nl) && 3325 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3326 s32 ret = hw->phy.ops.identify_sfp(hw); 3327 if (ret) 3328 goto out; 3329 ret = hw->phy.ops.reset(hw); 3330 adapter->sfp_probe = FALSE; 3331 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3332 device_printf(dev, "Unsupported SFP+ module detected!"); 3333 device_printf(dev, 3334 "Reload driver with supported module.\n"); 3335 goto out; 3336 } else 3337 device_printf(dev, "SFP+ module detected!\n"); 3338 /* We now have supported optics */ 3339 result = TRUE; 3340 } 3341 out: 3342 3343 return (result); 3344 } /* ixgbe_sfp_probe */ 3345 3346 /************************************************************************ 3347 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3348 ************************************************************************/ 3349 static void 3350 ixgbe_handle_mod(void *context) 3351 { 3352 if_ctx_t ctx = context; 3353 struct adapter *adapter = iflib_get_softc(ctx); 3354 struct ixgbe_hw *hw = &adapter->hw; 3355 device_t dev = iflib_get_dev(ctx); 3356 u32 err, cage_full = 0; 3357 3358 if (adapter->hw.need_crosstalk_fix) { 3359 switch (hw->mac.type) { 3360 case ixgbe_mac_82599EB: 3361 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3362 IXGBE_ESDP_SDP2; 3363 break; 3364 case ixgbe_mac_X550EM_x: 3365 case ixgbe_mac_X550EM_a: 3366 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3367 IXGBE_ESDP_SDP0; 3368 break; 3369 default: 3370 break; 3371 } 3372 3373 if (!cage_full) 3374 goto handle_mod_out; 3375 } 3376 3377 err = hw->phy.ops.identify_sfp(hw); 3378 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3379 device_printf(dev, 3380 "Unsupported SFP+ module type was detected.\n"); 3381 goto handle_mod_out; 3382 } 3383 3384 if (hw->mac.type == ixgbe_mac_82598EB) 3385 err = hw->phy.ops.reset(hw); 3386 else 3387 err = hw->mac.ops.setup_sfp(hw); 3388 3389 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3390 device_printf(dev, 3391 "Setup failure - unsupported SFP+ module type.\n"); 3392 goto handle_mod_out; 3393 } 3394 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 3395 return; 3396 3397 handle_mod_out: 3398 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3399 } /* ixgbe_handle_mod */ 3400 3401 3402 /************************************************************************ 3403 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3404 ************************************************************************/ 3405 static void 3406 ixgbe_handle_msf(void *context) 3407 { 3408 if_ctx_t ctx = context; 3409 struct adapter *adapter = iflib_get_softc(ctx); 3410 struct ixgbe_hw *hw = &adapter->hw; 3411 u32 autoneg; 3412 bool negotiate; 3413 3414 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3415 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 3416 3417 autoneg = hw->phy.autoneg_advertised; 3418 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3419 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3420 if (hw->mac.ops.setup_link) 3421 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3422 3423 /* Adjust media types shown in ifconfig */ 3424 ifmedia_removeall(adapter->media); 3425 ixgbe_add_media_types(adapter->ctx); 3426 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 3427 } /* ixgbe_handle_msf */ 3428 3429 /************************************************************************ 3430 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3431 ************************************************************************/ 3432 static void 3433 ixgbe_handle_phy(void *context) 3434 { 3435 if_ctx_t ctx = context; 3436 struct adapter *adapter = iflib_get_softc(ctx); 3437 struct ixgbe_hw *hw = &adapter->hw; 3438 int error; 3439 3440 error = hw->phy.ops.handle_lasi(hw); 3441 if (error == IXGBE_ERR_OVERTEMP) 3442 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3443 else if (error) 3444 device_printf(adapter->dev, 3445 "Error handling LASI interrupt: %d\n", error); 3446 } /* ixgbe_handle_phy */ 3447 3448 /************************************************************************ 3449 * ixgbe_if_stop - Stop the hardware 3450 * 3451 * Disables all traffic on the adapter by issuing a 3452 * global reset on the MAC and deallocates TX/RX buffers. 3453 ************************************************************************/ 3454 static void 3455 ixgbe_if_stop(if_ctx_t ctx) 3456 { 3457 struct adapter *adapter = iflib_get_softc(ctx); 3458 struct ixgbe_hw *hw = &adapter->hw; 3459 3460 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3461 3462 ixgbe_reset_hw(hw); 3463 hw->adapter_stopped = FALSE; 3464 ixgbe_stop_adapter(hw); 3465 if (hw->mac.type == ixgbe_mac_82599EB) 3466 ixgbe_stop_mac_link_on_d3_82599(hw); 3467 /* Turn off the laser - noop with no optics */ 3468 ixgbe_disable_tx_laser(hw); 3469 3470 /* Update the stack */ 3471 adapter->link_up = FALSE; 3472 ixgbe_if_update_admin_status(ctx); 3473 3474 /* reprogram the RAR[0] in case user changed it. */ 3475 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3476 3477 return; 3478 } /* ixgbe_if_stop */ 3479 3480 /************************************************************************ 3481 * ixgbe_update_link_status - Update OS on link state 3482 * 3483 * Note: Only updates the OS on the cached link state. 3484 * The real check of the hardware only happens with 3485 * a link interrupt. 3486 ************************************************************************/ 3487 static void 3488 ixgbe_if_update_admin_status(if_ctx_t ctx) 3489 { 3490 struct adapter *adapter = iflib_get_softc(ctx); 3491 device_t dev = iflib_get_dev(ctx); 3492 3493 if (adapter->link_up) { 3494 if (adapter->link_active == FALSE) { 3495 if (bootverbose) 3496 device_printf(dev, "Link is up %d Gbps %s \n", 3497 ((adapter->link_speed == 128) ? 10 : 1), 3498 "Full Duplex"); 3499 adapter->link_active = TRUE; 3500 /* Update any Flow Control changes */ 3501 ixgbe_fc_enable(&adapter->hw); 3502 /* Update DMA coalescing config */ 3503 ixgbe_config_dmac(adapter); 3504 /* should actually be negotiated value */ 3505 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3506 3507 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3508 ixgbe_ping_all_vfs(adapter); 3509 } 3510 } else { /* Link down */ 3511 if (adapter->link_active == TRUE) { 3512 if (bootverbose) 3513 device_printf(dev, "Link is Down\n"); 3514 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3515 adapter->link_active = FALSE; 3516 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3517 ixgbe_ping_all_vfs(adapter); 3518 } 3519 } 3520 3521 /* Handle task requests from msix_link() */ 3522 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD) 3523 ixgbe_handle_mod(ctx); 3524 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF) 3525 ixgbe_handle_msf(ctx); 3526 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX) 3527 ixgbe_handle_mbx(ctx); 3528 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR) 3529 ixgbe_reinit_fdir(ctx); 3530 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY) 3531 ixgbe_handle_phy(ctx); 3532 adapter->task_requests = 0; 3533 3534 ixgbe_update_stats_counters(adapter); 3535 } /* ixgbe_if_update_admin_status */ 3536 3537 /************************************************************************ 3538 * ixgbe_config_dmac - Configure DMA Coalescing 3539 ************************************************************************/ 3540 static void 3541 ixgbe_config_dmac(struct adapter *adapter) 3542 { 3543 struct ixgbe_hw *hw = &adapter->hw; 3544 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3545 3546 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3547 return; 3548 3549 if (dcfg->watchdog_timer ^ adapter->dmac || 3550 dcfg->link_speed ^ adapter->link_speed) { 3551 dcfg->watchdog_timer = adapter->dmac; 3552 dcfg->fcoe_en = FALSE; 3553 dcfg->link_speed = adapter->link_speed; 3554 dcfg->num_tcs = 1; 3555 3556 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3557 dcfg->watchdog_timer, dcfg->link_speed); 3558 3559 hw->mac.ops.dmac_config(hw); 3560 } 3561 } /* ixgbe_config_dmac */ 3562 3563 /************************************************************************ 3564 * ixgbe_if_enable_intr 3565 ************************************************************************/ 3566 void 3567 ixgbe_if_enable_intr(if_ctx_t ctx) 3568 { 3569 struct adapter *adapter = iflib_get_softc(ctx); 3570 struct ixgbe_hw *hw = &adapter->hw; 3571 struct ix_rx_queue *que = adapter->rx_queues; 3572 u32 mask, fwsm; 3573 3574 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3575 3576 switch (adapter->hw.mac.type) { 3577 case ixgbe_mac_82599EB: 3578 mask |= IXGBE_EIMS_ECC; 3579 /* Temperature sensor on some adapters */ 3580 mask |= IXGBE_EIMS_GPI_SDP0; 3581 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3582 mask |= IXGBE_EIMS_GPI_SDP1; 3583 mask |= IXGBE_EIMS_GPI_SDP2; 3584 break; 3585 case ixgbe_mac_X540: 3586 /* Detect if Thermal Sensor is enabled */ 3587 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3588 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3589 mask |= IXGBE_EIMS_TS; 3590 mask |= IXGBE_EIMS_ECC; 3591 break; 3592 case ixgbe_mac_X550: 3593 /* MAC thermal sensor is automatically enabled */ 3594 mask |= IXGBE_EIMS_TS; 3595 mask |= IXGBE_EIMS_ECC; 3596 break; 3597 case ixgbe_mac_X550EM_x: 3598 case ixgbe_mac_X550EM_a: 3599 /* Some devices use SDP0 for important information */ 3600 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3601 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3602 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3603 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3604 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3605 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3606 mask |= IXGBE_EICR_GPI_SDP0_X540; 3607 mask |= IXGBE_EIMS_ECC; 3608 break; 3609 default: 3610 break; 3611 } 3612 3613 /* Enable Fan Failure detection */ 3614 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3615 mask |= IXGBE_EIMS_GPI_SDP1; 3616 /* Enable SR-IOV */ 3617 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3618 mask |= IXGBE_EIMS_MAILBOX; 3619 /* Enable Flow Director */ 3620 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 3621 mask |= IXGBE_EIMS_FLOW_DIR; 3622 3623 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3624 3625 /* With MSI-X we use auto clear */ 3626 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3627 mask = IXGBE_EIMS_ENABLE_MASK; 3628 /* Don't autoclear Link */ 3629 mask &= ~IXGBE_EIMS_OTHER; 3630 mask &= ~IXGBE_EIMS_LSC; 3631 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 3632 mask &= ~IXGBE_EIMS_MAILBOX; 3633 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3634 } 3635 3636 /* 3637 * Now enable all queues, this is done separately to 3638 * allow for handling the extended (beyond 32) MSI-X 3639 * vectors that can be used by 82599 3640 */ 3641 for (int i = 0; i < adapter->num_rx_queues; i++, que++) 3642 ixgbe_enable_queue(adapter, que->msix); 3643 3644 IXGBE_WRITE_FLUSH(hw); 3645 3646 } /* ixgbe_if_enable_intr */ 3647 3648 /************************************************************************ 3649 * ixgbe_disable_intr 3650 ************************************************************************/ 3651 static void 3652 ixgbe_if_disable_intr(if_ctx_t ctx) 3653 { 3654 struct adapter *adapter = iflib_get_softc(ctx); 3655 3656 if (adapter->intr_type == IFLIB_INTR_MSIX) 3657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3658 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3659 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3660 } else { 3661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3662 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3663 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3664 } 3665 IXGBE_WRITE_FLUSH(&adapter->hw); 3666 3667 } /* ixgbe_if_disable_intr */ 3668 3669 /************************************************************************ 3670 * ixgbe_link_intr_enable 3671 ************************************************************************/ 3672 static void 3673 ixgbe_link_intr_enable(if_ctx_t ctx) 3674 { 3675 struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw; 3676 3677 /* Re-enable other interrupts */ 3678 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3679 } /* ixgbe_link_intr_enable */ 3680 3681 /************************************************************************ 3682 * ixgbe_if_rx_queue_intr_enable 3683 ************************************************************************/ 3684 static int 3685 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3686 { 3687 struct adapter *adapter = iflib_get_softc(ctx); 3688 struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; 3689 3690 ixgbe_enable_queue(adapter, que->rxr.me); 3691 3692 return (0); 3693 } /* ixgbe_if_rx_queue_intr_enable */ 3694 3695 /************************************************************************ 3696 * ixgbe_enable_queue 3697 ************************************************************************/ 3698 static void 3699 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 3700 { 3701 struct ixgbe_hw *hw = &adapter->hw; 3702 u64 queue = (u64)(1 << vector); 3703 u32 mask; 3704 3705 if (hw->mac.type == ixgbe_mac_82598EB) { 3706 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3707 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3708 } else { 3709 mask = (queue & 0xFFFFFFFF); 3710 if (mask) 3711 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3712 mask = (queue >> 32); 3713 if (mask) 3714 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3715 } 3716 } /* ixgbe_enable_queue */ 3717 3718 /************************************************************************ 3719 * ixgbe_disable_queue 3720 ************************************************************************/ 3721 static void 3722 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 3723 { 3724 struct ixgbe_hw *hw = &adapter->hw; 3725 u64 queue = (u64)(1 << vector); 3726 u32 mask; 3727 3728 if (hw->mac.type == ixgbe_mac_82598EB) { 3729 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3730 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3731 } else { 3732 mask = (queue & 0xFFFFFFFF); 3733 if (mask) 3734 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3735 mask = (queue >> 32); 3736 if (mask) 3737 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3738 } 3739 } /* ixgbe_disable_queue */ 3740 3741 /************************************************************************ 3742 * ixgbe_intr - Legacy Interrupt Service Routine 3743 ************************************************************************/ 3744 int 3745 ixgbe_intr(void *arg) 3746 { 3747 struct adapter *adapter = arg; 3748 struct ix_rx_queue *que = adapter->rx_queues; 3749 struct ixgbe_hw *hw = &adapter->hw; 3750 if_ctx_t ctx = adapter->ctx; 3751 u32 eicr, eicr_mask; 3752 3753 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3754 3755 ++que->irqs; 3756 if (eicr == 0) { 3757 ixgbe_if_enable_intr(ctx); 3758 return (FILTER_HANDLED); 3759 } 3760 3761 /* Check for fan failure */ 3762 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3763 (eicr & IXGBE_EICR_GPI_SDP1)) { 3764 device_printf(adapter->dev, 3765 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3766 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3767 } 3768 3769 /* Link status change */ 3770 if (eicr & IXGBE_EICR_LSC) { 3771 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3772 iflib_admin_intr_deferred(ctx); 3773 } 3774 3775 if (ixgbe_is_sfp(hw)) { 3776 /* Pluggable optics-related interrupt */ 3777 if (hw->mac.type >= ixgbe_mac_X540) 3778 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3779 else 3780 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3781 3782 if (eicr & eicr_mask) { 3783 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3784 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 3785 } 3786 3787 if ((hw->mac.type == ixgbe_mac_82599EB) && 3788 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3789 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3790 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3791 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 3792 } 3793 } 3794 3795 /* External PHY interrupt */ 3796 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3797 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3798 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; 3799 3800 return (FILTER_SCHEDULE_THREAD); 3801 } /* ixgbe_intr */ 3802 3803 /************************************************************************ 3804 * ixgbe_free_pci_resources 3805 ************************************************************************/ 3806 static void 3807 ixgbe_free_pci_resources(if_ctx_t ctx) 3808 { 3809 struct adapter *adapter = iflib_get_softc(ctx); 3810 struct ix_rx_queue *que = adapter->rx_queues; 3811 device_t dev = iflib_get_dev(ctx); 3812 3813 /* Release all MSI-X queue resources */ 3814 if (adapter->intr_type == IFLIB_INTR_MSIX) 3815 iflib_irq_free(ctx, &adapter->irq); 3816 3817 if (que != NULL) { 3818 for (int i = 0; i < adapter->num_rx_queues; i++, que++) { 3819 iflib_irq_free(ctx, &que->que_irq); 3820 } 3821 } 3822 3823 if (adapter->pci_mem != NULL) 3824 bus_release_resource(dev, SYS_RES_MEMORY, 3825 rman_get_rid(adapter->pci_mem), adapter->pci_mem); 3826 } /* ixgbe_free_pci_resources */ 3827 3828 /************************************************************************ 3829 * ixgbe_sysctl_flowcntl 3830 * 3831 * SYSCTL wrapper around setting Flow Control 3832 ************************************************************************/ 3833 static int 3834 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 3835 { 3836 struct adapter *adapter; 3837 int error, fc; 3838 3839 adapter = (struct adapter *)arg1; 3840 fc = adapter->hw.fc.current_mode; 3841 3842 error = sysctl_handle_int(oidp, &fc, 0, req); 3843 if ((error) || (req->newptr == NULL)) 3844 return (error); 3845 3846 /* Don't bother if it's not changed */ 3847 if (fc == adapter->hw.fc.current_mode) 3848 return (0); 3849 3850 return ixgbe_set_flowcntl(adapter, fc); 3851 } /* ixgbe_sysctl_flowcntl */ 3852 3853 /************************************************************************ 3854 * ixgbe_set_flowcntl - Set flow control 3855 * 3856 * Flow control values: 3857 * 0 - off 3858 * 1 - rx pause 3859 * 2 - tx pause 3860 * 3 - full 3861 ************************************************************************/ 3862 static int 3863 ixgbe_set_flowcntl(struct adapter *adapter, int fc) 3864 { 3865 switch (fc) { 3866 case ixgbe_fc_rx_pause: 3867 case ixgbe_fc_tx_pause: 3868 case ixgbe_fc_full: 3869 adapter->hw.fc.requested_mode = fc; 3870 if (adapter->num_rx_queues > 1) 3871 ixgbe_disable_rx_drop(adapter); 3872 break; 3873 case ixgbe_fc_none: 3874 adapter->hw.fc.requested_mode = ixgbe_fc_none; 3875 if (adapter->num_rx_queues > 1) 3876 ixgbe_enable_rx_drop(adapter); 3877 break; 3878 default: 3879 return (EINVAL); 3880 } 3881 3882 /* Don't autoneg if forcing a value */ 3883 adapter->hw.fc.disable_fc_autoneg = TRUE; 3884 ixgbe_fc_enable(&adapter->hw); 3885 3886 return (0); 3887 } /* ixgbe_set_flowcntl */ 3888 3889 /************************************************************************ 3890 * ixgbe_enable_rx_drop 3891 * 3892 * Enable the hardware to drop packets when the buffer is 3893 * full. This is useful with multiqueue, so that no single 3894 * queue being full stalls the entire RX engine. We only 3895 * enable this when Multiqueue is enabled AND Flow Control 3896 * is disabled. 3897 ************************************************************************/ 3898 static void 3899 ixgbe_enable_rx_drop(struct adapter *adapter) 3900 { 3901 struct ixgbe_hw *hw = &adapter->hw; 3902 struct rx_ring *rxr; 3903 u32 srrctl; 3904 3905 for (int i = 0; i < adapter->num_rx_queues; i++) { 3906 rxr = &adapter->rx_queues[i].rxr; 3907 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3908 srrctl |= IXGBE_SRRCTL_DROP_EN; 3909 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3910 } 3911 3912 /* enable drop for each vf */ 3913 for (int i = 0; i < adapter->num_vfs; i++) { 3914 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3915 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 3916 IXGBE_QDE_ENABLE)); 3917 } 3918 } /* ixgbe_enable_rx_drop */ 3919 3920 /************************************************************************ 3921 * ixgbe_disable_rx_drop 3922 ************************************************************************/ 3923 static void 3924 ixgbe_disable_rx_drop(struct adapter *adapter) 3925 { 3926 struct ixgbe_hw *hw = &adapter->hw; 3927 struct rx_ring *rxr; 3928 u32 srrctl; 3929 3930 for (int i = 0; i < adapter->num_rx_queues; i++) { 3931 rxr = &adapter->rx_queues[i].rxr; 3932 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3933 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3934 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3935 } 3936 3937 /* disable drop for each vf */ 3938 for (int i = 0; i < adapter->num_vfs; i++) { 3939 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3940 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 3941 } 3942 } /* ixgbe_disable_rx_drop */ 3943 3944 /************************************************************************ 3945 * ixgbe_sysctl_advertise 3946 * 3947 * SYSCTL wrapper around setting advertised speed 3948 ************************************************************************/ 3949 static int 3950 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 3951 { 3952 struct adapter *adapter; 3953 int error, advertise; 3954 3955 adapter = (struct adapter *)arg1; 3956 advertise = adapter->advertise; 3957 3958 error = sysctl_handle_int(oidp, &advertise, 0, req); 3959 if ((error) || (req->newptr == NULL)) 3960 return (error); 3961 3962 return ixgbe_set_advertise(adapter, advertise); 3963 } /* ixgbe_sysctl_advertise */ 3964 3965 /************************************************************************ 3966 * ixgbe_set_advertise - Control advertised link speed 3967 * 3968 * Flags: 3969 * 0x1 - advertise 100 Mb 3970 * 0x2 - advertise 1G 3971 * 0x4 - advertise 10G 3972 * 0x8 - advertise 10 Mb (yes, Mb) 3973 ************************************************************************/ 3974 static int 3975 ixgbe_set_advertise(struct adapter *adapter, int advertise) 3976 { 3977 device_t dev = iflib_get_dev(adapter->ctx); 3978 struct ixgbe_hw *hw; 3979 ixgbe_link_speed speed = 0; 3980 ixgbe_link_speed link_caps = 0; 3981 s32 err = IXGBE_NOT_IMPLEMENTED; 3982 bool negotiate = FALSE; 3983 3984 /* Checks to validate new value */ 3985 if (adapter->advertise == advertise) /* no change */ 3986 return (0); 3987 3988 hw = &adapter->hw; 3989 3990 /* No speed changes for backplane media */ 3991 if (hw->phy.media_type == ixgbe_media_type_backplane) 3992 return (ENODEV); 3993 3994 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 3995 (hw->phy.multispeed_fiber))) { 3996 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 3997 return (EINVAL); 3998 } 3999 4000 if (advertise < 0x1 || advertise > 0xF) { 4001 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n"); 4002 return (EINVAL); 4003 } 4004 4005 if (hw->mac.ops.get_link_capabilities) { 4006 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4007 &negotiate); 4008 if (err != IXGBE_SUCCESS) { 4009 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4010 return (ENODEV); 4011 } 4012 } 4013 4014 /* Set new value and report new advertised mode */ 4015 if (advertise & 0x1) { 4016 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4017 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4018 return (EINVAL); 4019 } 4020 speed |= IXGBE_LINK_SPEED_100_FULL; 4021 } 4022 if (advertise & 0x2) { 4023 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4024 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4025 return (EINVAL); 4026 } 4027 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4028 } 4029 if (advertise & 0x4) { 4030 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4031 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4032 return (EINVAL); 4033 } 4034 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4035 } 4036 if (advertise & 0x8) { 4037 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4038 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4039 return (EINVAL); 4040 } 4041 speed |= IXGBE_LINK_SPEED_10_FULL; 4042 } 4043 4044 hw->mac.autotry_restart = TRUE; 4045 hw->mac.ops.setup_link(hw, speed, TRUE); 4046 adapter->advertise = advertise; 4047 4048 return (0); 4049 } /* ixgbe_set_advertise */ 4050 4051 /************************************************************************ 4052 * ixgbe_get_advertise - Get current advertised speed settings 4053 * 4054 * Formatted for sysctl usage. 4055 * Flags: 4056 * 0x1 - advertise 100 Mb 4057 * 0x2 - advertise 1G 4058 * 0x4 - advertise 10G 4059 * 0x8 - advertise 10 Mb (yes, Mb) 4060 ************************************************************************/ 4061 static int 4062 ixgbe_get_advertise(struct adapter *adapter) 4063 { 4064 struct ixgbe_hw *hw = &adapter->hw; 4065 int speed; 4066 ixgbe_link_speed link_caps = 0; 4067 s32 err; 4068 bool negotiate = FALSE; 4069 4070 /* 4071 * Advertised speed means nothing unless it's copper or 4072 * multi-speed fiber 4073 */ 4074 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4075 !(hw->phy.multispeed_fiber)) 4076 return (0); 4077 4078 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4079 if (err != IXGBE_SUCCESS) 4080 return (0); 4081 4082 speed = 4083 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 4084 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 4085 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 4086 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 4087 4088 return speed; 4089 } /* ixgbe_get_advertise */ 4090 4091 /************************************************************************ 4092 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4093 * 4094 * Control values: 4095 * 0/1 - off / on (use default value of 1000) 4096 * 4097 * Legal timer values are: 4098 * 50,100,250,500,1000,2000,5000,10000 4099 * 4100 * Turning off interrupt moderation will also turn this off. 4101 ************************************************************************/ 4102 static int 4103 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4104 { 4105 struct adapter *adapter = (struct adapter *)arg1; 4106 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4107 int error; 4108 u16 newval; 4109 4110 newval = adapter->dmac; 4111 error = sysctl_handle_16(oidp, &newval, 0, req); 4112 if ((error) || (req->newptr == NULL)) 4113 return (error); 4114 4115 switch (newval) { 4116 case 0: 4117 /* Disabled */ 4118 adapter->dmac = 0; 4119 break; 4120 case 1: 4121 /* Enable and use default */ 4122 adapter->dmac = 1000; 4123 break; 4124 case 50: 4125 case 100: 4126 case 250: 4127 case 500: 4128 case 1000: 4129 case 2000: 4130 case 5000: 4131 case 10000: 4132 /* Legal values - allow */ 4133 adapter->dmac = newval; 4134 break; 4135 default: 4136 /* Do nothing, illegal value */ 4137 return (EINVAL); 4138 } 4139 4140 /* Re-initialize hardware if it's already running */ 4141 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4142 ifp->if_init(ifp); 4143 4144 return (0); 4145 } /* ixgbe_sysctl_dmac */ 4146 4147 #ifdef IXGBE_DEBUG 4148 /************************************************************************ 4149 * ixgbe_sysctl_power_state 4150 * 4151 * Sysctl to test power states 4152 * Values: 4153 * 0 - set device to D0 4154 * 3 - set device to D3 4155 * (none) - get current device power state 4156 ************************************************************************/ 4157 static int 4158 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4159 { 4160 struct adapter *adapter = (struct adapter *)arg1; 4161 device_t dev = adapter->dev; 4162 int curr_ps, new_ps, error = 0; 4163 4164 curr_ps = new_ps = pci_get_powerstate(dev); 4165 4166 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4167 if ((error) || (req->newptr == NULL)) 4168 return (error); 4169 4170 if (new_ps == curr_ps) 4171 return (0); 4172 4173 if (new_ps == 3 && curr_ps == 0) 4174 error = DEVICE_SUSPEND(dev); 4175 else if (new_ps == 0 && curr_ps == 3) 4176 error = DEVICE_RESUME(dev); 4177 else 4178 return (EINVAL); 4179 4180 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4181 4182 return (error); 4183 } /* ixgbe_sysctl_power_state */ 4184 #endif 4185 4186 /************************************************************************ 4187 * ixgbe_sysctl_wol_enable 4188 * 4189 * Sysctl to enable/disable the WoL capability, 4190 * if supported by the adapter. 4191 * 4192 * Values: 4193 * 0 - disabled 4194 * 1 - enabled 4195 ************************************************************************/ 4196 static int 4197 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4198 { 4199 struct adapter *adapter = (struct adapter *)arg1; 4200 struct ixgbe_hw *hw = &adapter->hw; 4201 int new_wol_enabled; 4202 int error = 0; 4203 4204 new_wol_enabled = hw->wol_enabled; 4205 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4206 if ((error) || (req->newptr == NULL)) 4207 return (error); 4208 new_wol_enabled = !!(new_wol_enabled); 4209 if (new_wol_enabled == hw->wol_enabled) 4210 return (0); 4211 4212 if (new_wol_enabled > 0 && !adapter->wol_support) 4213 return (ENODEV); 4214 else 4215 hw->wol_enabled = new_wol_enabled; 4216 4217 return (0); 4218 } /* ixgbe_sysctl_wol_enable */ 4219 4220 /************************************************************************ 4221 * ixgbe_sysctl_wufc - Wake Up Filter Control 4222 * 4223 * Sysctl to enable/disable the types of packets that the 4224 * adapter will wake up on upon receipt. 4225 * Flags: 4226 * 0x1 - Link Status Change 4227 * 0x2 - Magic Packet 4228 * 0x4 - Direct Exact 4229 * 0x8 - Directed Multicast 4230 * 0x10 - Broadcast 4231 * 0x20 - ARP/IPv4 Request Packet 4232 * 0x40 - Direct IPv4 Packet 4233 * 0x80 - Direct IPv6 Packet 4234 * 4235 * Settings not listed above will cause the sysctl to return an error. 4236 ************************************************************************/ 4237 static int 4238 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4239 { 4240 struct adapter *adapter = (struct adapter *)arg1; 4241 int error = 0; 4242 u32 new_wufc; 4243 4244 new_wufc = adapter->wufc; 4245 4246 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4247 if ((error) || (req->newptr == NULL)) 4248 return (error); 4249 if (new_wufc == adapter->wufc) 4250 return (0); 4251 4252 if (new_wufc & 0xffffff00) 4253 return (EINVAL); 4254 4255 new_wufc &= 0xff; 4256 new_wufc |= (0xffffff & adapter->wufc); 4257 adapter->wufc = new_wufc; 4258 4259 return (0); 4260 } /* ixgbe_sysctl_wufc */ 4261 4262 #ifdef IXGBE_DEBUG 4263 /************************************************************************ 4264 * ixgbe_sysctl_print_rss_config 4265 ************************************************************************/ 4266 static int 4267 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4268 { 4269 struct adapter *adapter = (struct adapter *)arg1; 4270 struct ixgbe_hw *hw = &adapter->hw; 4271 device_t dev = adapter->dev; 4272 struct sbuf *buf; 4273 int error = 0, reta_size; 4274 u32 reg; 4275 4276 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4277 if (!buf) { 4278 device_printf(dev, "Could not allocate sbuf for output.\n"); 4279 return (ENOMEM); 4280 } 4281 4282 // TODO: use sbufs to make a string to print out 4283 /* Set multiplier for RETA setup and table size based on MAC */ 4284 switch (adapter->hw.mac.type) { 4285 case ixgbe_mac_X550: 4286 case ixgbe_mac_X550EM_x: 4287 case ixgbe_mac_X550EM_a: 4288 reta_size = 128; 4289 break; 4290 default: 4291 reta_size = 32; 4292 break; 4293 } 4294 4295 /* Print out the redirection table */ 4296 sbuf_cat(buf, "\n"); 4297 for (int i = 0; i < reta_size; i++) { 4298 if (i < 32) { 4299 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4300 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4301 } else { 4302 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4303 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4304 } 4305 } 4306 4307 // TODO: print more config 4308 4309 error = sbuf_finish(buf); 4310 if (error) 4311 device_printf(dev, "Error finishing sbuf: %d\n", error); 4312 4313 sbuf_delete(buf); 4314 4315 return (0); 4316 } /* ixgbe_sysctl_print_rss_config */ 4317 #endif /* IXGBE_DEBUG */ 4318 4319 /************************************************************************ 4320 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4321 * 4322 * For X552/X557-AT devices using an external PHY 4323 ************************************************************************/ 4324 static int 4325 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4326 { 4327 struct adapter *adapter = (struct adapter *)arg1; 4328 struct ixgbe_hw *hw = &adapter->hw; 4329 u16 reg; 4330 4331 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4332 device_printf(iflib_get_dev(adapter->ctx), 4333 "Device has no supported external thermal sensor.\n"); 4334 return (ENODEV); 4335 } 4336 4337 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4338 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4339 device_printf(iflib_get_dev(adapter->ctx), 4340 "Error reading from PHY's current temperature register\n"); 4341 return (EAGAIN); 4342 } 4343 4344 /* Shift temp for output */ 4345 reg = reg >> 8; 4346 4347 return (sysctl_handle_16(oidp, NULL, reg, req)); 4348 } /* ixgbe_sysctl_phy_temp */ 4349 4350 /************************************************************************ 4351 * ixgbe_sysctl_phy_overtemp_occurred 4352 * 4353 * Reports (directly from the PHY) whether the current PHY 4354 * temperature is over the overtemp threshold. 4355 ************************************************************************/ 4356 static int 4357 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4358 { 4359 struct adapter *adapter = (struct adapter *)arg1; 4360 struct ixgbe_hw *hw = &adapter->hw; 4361 u16 reg; 4362 4363 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4364 device_printf(iflib_get_dev(adapter->ctx), 4365 "Device has no supported external thermal sensor.\n"); 4366 return (ENODEV); 4367 } 4368 4369 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4370 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4371 device_printf(iflib_get_dev(adapter->ctx), 4372 "Error reading from PHY's temperature status register\n"); 4373 return (EAGAIN); 4374 } 4375 4376 /* Get occurrence bit */ 4377 reg = !!(reg & 0x4000); 4378 4379 return (sysctl_handle_16(oidp, 0, reg, req)); 4380 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4381 4382 /************************************************************************ 4383 * ixgbe_sysctl_eee_state 4384 * 4385 * Sysctl to set EEE power saving feature 4386 * Values: 4387 * 0 - disable EEE 4388 * 1 - enable EEE 4389 * (none) - get current device EEE state 4390 ************************************************************************/ 4391 static int 4392 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4393 { 4394 struct adapter *adapter = (struct adapter *)arg1; 4395 device_t dev = adapter->dev; 4396 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4397 int curr_eee, new_eee, error = 0; 4398 s32 retval; 4399 4400 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE); 4401 4402 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4403 if ((error) || (req->newptr == NULL)) 4404 return (error); 4405 4406 /* Nothing to do */ 4407 if (new_eee == curr_eee) 4408 return (0); 4409 4410 /* Not supported */ 4411 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE)) 4412 return (EINVAL); 4413 4414 /* Bounds checking */ 4415 if ((new_eee < 0) || (new_eee > 1)) 4416 return (EINVAL); 4417 4418 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee); 4419 if (retval) { 4420 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4421 return (EINVAL); 4422 } 4423 4424 /* Restart auto-neg */ 4425 ifp->if_init(ifp); 4426 4427 device_printf(dev, "New EEE state: %d\n", new_eee); 4428 4429 /* Cache new value */ 4430 if (new_eee) 4431 adapter->feat_en |= IXGBE_FEATURE_EEE; 4432 else 4433 adapter->feat_en &= ~IXGBE_FEATURE_EEE; 4434 4435 return (error); 4436 } /* ixgbe_sysctl_eee_state */ 4437 4438 /************************************************************************ 4439 * ixgbe_init_device_features 4440 ************************************************************************/ 4441 static void 4442 ixgbe_init_device_features(struct adapter *adapter) 4443 { 4444 adapter->feat_cap = IXGBE_FEATURE_NETMAP 4445 | IXGBE_FEATURE_RSS 4446 | IXGBE_FEATURE_MSI 4447 | IXGBE_FEATURE_MSIX 4448 | IXGBE_FEATURE_LEGACY_IRQ; 4449 4450 /* Set capabilities first... */ 4451 switch (adapter->hw.mac.type) { 4452 case ixgbe_mac_82598EB: 4453 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT) 4454 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4455 break; 4456 case ixgbe_mac_X540: 4457 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4458 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4459 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4460 (adapter->hw.bus.func == 0)) 4461 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4462 break; 4463 case ixgbe_mac_X550: 4464 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4465 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4466 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4467 break; 4468 case ixgbe_mac_X550EM_x: 4469 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4470 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4471 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR) 4472 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4473 break; 4474 case ixgbe_mac_X550EM_a: 4475 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4476 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4477 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4478 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4479 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4480 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4481 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4482 } 4483 break; 4484 case ixgbe_mac_82599EB: 4485 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4486 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4487 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4488 (adapter->hw.bus.func == 0)) 4489 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4490 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4491 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4492 break; 4493 default: 4494 break; 4495 } 4496 4497 /* Enabled by default... */ 4498 /* Fan failure detection */ 4499 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4500 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4501 /* Netmap */ 4502 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 4503 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 4504 /* EEE */ 4505 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4506 adapter->feat_en |= IXGBE_FEATURE_EEE; 4507 /* Thermal Sensor */ 4508 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4509 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4510 4511 /* Enabled via global sysctl... */ 4512 /* Flow Director */ 4513 if (ixgbe_enable_fdir) { 4514 if (adapter->feat_cap & IXGBE_FEATURE_FDIR) 4515 adapter->feat_en |= IXGBE_FEATURE_FDIR; 4516 else 4517 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled."); 4518 } 4519 /* 4520 * Message Signal Interrupts - Extended (MSI-X) 4521 * Normal MSI is only enabled if MSI-X calls fail. 4522 */ 4523 if (!ixgbe_enable_msix) 4524 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX; 4525 /* Receive-Side Scaling (RSS) */ 4526 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4527 adapter->feat_en |= IXGBE_FEATURE_RSS; 4528 4529 /* Disable features with unmet dependencies... */ 4530 /* No MSI-X */ 4531 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) { 4532 adapter->feat_cap &= ~IXGBE_FEATURE_RSS; 4533 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4534 adapter->feat_en &= ~IXGBE_FEATURE_RSS; 4535 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; 4536 } 4537 } /* ixgbe_init_device_features */ 4538 4539 /************************************************************************ 4540 * ixgbe_check_fan_failure 4541 ************************************************************************/ 4542 static void 4543 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt) 4544 { 4545 u32 mask; 4546 4547 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) : 4548 IXGBE_ESDP_SDP1; 4549 4550 if (reg & mask) 4551 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4552 } /* ixgbe_check_fan_failure */ 4553