1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_rss.h" 39 40 #include "ixgbe.h" 41 #include "ixgbe_sriov.h" 42 #include "ifdi_if.h" 43 44 #include <net/netmap.h> 45 #include <dev/netmap/netmap_kern.h> 46 47 /************************************************************************ 48 * Driver version 49 ************************************************************************/ 50 char ixgbe_driver_version[] = "4.0.1-k"; 51 52 53 /************************************************************************ 54 * PCI Device ID Table 55 * 56 * Used by probe to select devices to load on 57 * Last field stores an index into ixgbe_strings 58 * Last entry must be all 0s 59 * 60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 61 ************************************************************************/ 62 static pci_vendor_info_t ixgbe_vendor_info_array[] = 63 { 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 108 /* required last entry */ 109 PVID_END 110 }; 111 112 static void *ixgbe_register(device_t dev); 113 static int ixgbe_if_attach_pre(if_ctx_t ctx); 114 static int ixgbe_if_attach_post(if_ctx_t ctx); 115 static int ixgbe_if_detach(if_ctx_t ctx); 116 static int ixgbe_if_shutdown(if_ctx_t ctx); 117 static int ixgbe_if_suspend(if_ctx_t ctx); 118 static int ixgbe_if_resume(if_ctx_t ctx); 119 120 static void ixgbe_if_stop(if_ctx_t ctx); 121 void ixgbe_if_enable_intr(if_ctx_t ctx); 122 static void ixgbe_if_disable_intr(if_ctx_t ctx); 123 static void ixgbe_link_intr_enable(if_ctx_t ctx); 124 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); 125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); 126 static int ixgbe_if_media_change(if_ctx_t ctx); 127 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 128 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip); 130 static void ixgbe_if_multi_set(if_ctx_t ctx); 131 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags); 132 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 133 uint64_t *paddrs, int nrxqs, int nrxqsets); 134 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 135 uint64_t *paddrs, int nrxqs, int nrxqsets); 136 static void ixgbe_if_queues_free(if_ctx_t ctx); 137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t); 138 static void ixgbe_if_update_admin_status(if_ctx_t ctx); 139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag); 140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 141 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 142 int ixgbe_intr(void *arg); 143 144 /************************************************************************ 145 * Function prototypes 146 ************************************************************************/ 147 #if __FreeBSD_version >= 1100036 148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 149 #endif 150 151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector); 152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector); 153 static void ixgbe_add_device_sysctls(if_ctx_t ctx); 154 static int ixgbe_allocate_pci_resources(if_ctx_t ctx); 155 static int ixgbe_setup_low_power_mode(if_ctx_t ctx); 156 157 static void ixgbe_config_dmac(struct adapter *adapter); 158 static void ixgbe_configure_ivars(struct adapter *adapter); 159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, 160 s8 type); 161 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 162 static bool ixgbe_sfp_probe(if_ctx_t ctx); 163 164 static void ixgbe_free_pci_resources(if_ctx_t ctx); 165 166 static int ixgbe_msix_link(void *arg); 167 static int ixgbe_msix_que(void *arg); 168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter); 169 static void ixgbe_initialize_receive_units(if_ctx_t ctx); 170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx); 171 172 static int ixgbe_setup_interface(if_ctx_t ctx); 173 static void ixgbe_init_device_features(struct adapter *adapter); 174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 175 static void ixgbe_add_media_types(if_ctx_t ctx); 176 static void ixgbe_update_stats_counters(struct adapter *adapter); 177 static void ixgbe_config_link(if_ctx_t ctx); 178 static void ixgbe_get_slot_info(struct adapter *); 179 static void ixgbe_check_wol_support(struct adapter *adapter); 180 static void ixgbe_enable_rx_drop(struct adapter *); 181 static void ixgbe_disable_rx_drop(struct adapter *); 182 183 static void ixgbe_add_hw_stats(struct adapter *adapter); 184 static int ixgbe_set_flowcntl(struct adapter *, int); 185 static int ixgbe_set_advertise(struct adapter *, int); 186 static int ixgbe_get_advertise(struct adapter *); 187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx); 188 static void ixgbe_config_gpie(struct adapter *adapter); 189 static void ixgbe_config_delay_values(struct adapter *adapter); 190 191 /* Sysctl handlers */ 192 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 195 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 196 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 198 #ifdef IXGBE_DEBUG 199 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 201 #endif 202 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 206 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 207 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 208 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 209 210 /* Deferred interrupt tasklets */ 211 static void ixgbe_handle_msf(void *); 212 static void ixgbe_handle_mod(void *); 213 static void ixgbe_handle_phy(void *); 214 215 /************************************************************************ 216 * FreeBSD Device Interface Entry Points 217 ************************************************************************/ 218 static device_method_t ix_methods[] = { 219 /* Device interface */ 220 DEVMETHOD(device_register, ixgbe_register), 221 DEVMETHOD(device_probe, iflib_device_probe), 222 DEVMETHOD(device_attach, iflib_device_attach), 223 DEVMETHOD(device_detach, iflib_device_detach), 224 DEVMETHOD(device_shutdown, iflib_device_shutdown), 225 DEVMETHOD(device_suspend, iflib_device_suspend), 226 DEVMETHOD(device_resume, iflib_device_resume), 227 #ifdef PCI_IOV 228 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 229 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 230 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 231 #endif /* PCI_IOV */ 232 DEVMETHOD_END 233 }; 234 235 static driver_t ix_driver = { 236 "ix", ix_methods, sizeof(struct adapter), 237 }; 238 239 devclass_t ix_devclass; 240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 242 MODULE_DEPEND(ix, pci, 1, 1, 1); 243 MODULE_DEPEND(ix, ether, 1, 1, 1); 244 MODULE_DEPEND(ix, iflib, 1, 1, 1); 245 246 static device_method_t ixgbe_if_methods[] = { 247 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 248 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 249 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 250 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 251 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 252 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 253 DEVMETHOD(ifdi_init, ixgbe_if_init), 254 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 255 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 256 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 257 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 258 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 259 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 260 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 261 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 262 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 263 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 264 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 265 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 266 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 267 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 268 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 269 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 270 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 271 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 272 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 273 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 274 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 275 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 276 #ifdef PCI_IOV 277 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 278 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 279 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 280 #endif /* PCI_IOV */ 281 DEVMETHOD_END 282 }; 283 284 /* 285 * TUNEABLE PARAMETERS: 286 */ 287 288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters"); 289 static driver_t ixgbe_if_driver = { 290 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter) 291 }; 292 293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 295 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 296 297 /* Flow control setting, default to full */ 298 static int ixgbe_flow_control = ixgbe_fc_full; 299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 300 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 301 302 /* Advertise Speed, default to 0 (auto) */ 303 static int ixgbe_advertise_speed = 0; 304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 305 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 306 307 /* 308 * Smart speed setting, default to on 309 * this only works as a compile option 310 * right now as its during attach, set 311 * this to 'ixgbe_smart_speed_off' to 312 * disable. 313 */ 314 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 315 316 /* 317 * MSI-X should be the default for best performance, 318 * but this allows it to be forced off for testing. 319 */ 320 static int ixgbe_enable_msix = 1; 321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 322 "Enable MSI-X interrupts"); 323 324 /* 325 * Defining this on will allow the use 326 * of unsupported SFP+ modules, note that 327 * doing so you are on your own :) 328 */ 329 static int allow_unsupported_sfp = FALSE; 330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 331 &allow_unsupported_sfp, 0, 332 "Allow unsupported SFP modules...use at your own risk"); 333 334 /* 335 * Not sure if Flow Director is fully baked, 336 * so we'll default to turning it off. 337 */ 338 static int ixgbe_enable_fdir = 0; 339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 340 "Enable Flow Director"); 341 342 /* Receive-Side Scaling */ 343 static int ixgbe_enable_rss = 1; 344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 345 "Enable Receive-Side Scaling (RSS)"); 346 347 #if 0 348 /* Keep running tab on them for sanity check */ 349 static int ixgbe_total_ports; 350 #endif 351 352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 353 354 /* 355 * For Flow Director: this is the number of TX packets we sample 356 * for the filter pool, this means every 20th packet will be probed. 357 * 358 * This feature can be disabled by setting this to 0. 359 */ 360 static int atr_sample_rate = 20; 361 362 extern struct if_txrx ixgbe_txrx; 363 364 static struct if_shared_ctx ixgbe_sctx_init = { 365 .isc_magic = IFLIB_MAGIC, 366 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 367 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 368 .isc_tx_maxsegsize = PAGE_SIZE, 369 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 370 .isc_tso_maxsegsize = PAGE_SIZE, 371 .isc_rx_maxsize = PAGE_SIZE*4, 372 .isc_rx_nsegments = 1, 373 .isc_rx_maxsegsize = PAGE_SIZE*4, 374 .isc_nfl = 1, 375 .isc_ntxqs = 1, 376 .isc_nrxqs = 1, 377 378 .isc_admin_intrcnt = 1, 379 .isc_vendor_info = ixgbe_vendor_info_array, 380 .isc_driver_version = ixgbe_driver_version, 381 .isc_driver = &ixgbe_if_driver, 382 .isc_flags = IFLIB_TSO_INIT_IP, 383 384 .isc_nrxd_min = {MIN_RXD}, 385 .isc_ntxd_min = {MIN_TXD}, 386 .isc_nrxd_max = {MAX_RXD}, 387 .isc_ntxd_max = {MAX_TXD}, 388 .isc_nrxd_default = {DEFAULT_RXD}, 389 .isc_ntxd_default = {DEFAULT_TXD}, 390 }; 391 392 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init; 393 394 /************************************************************************ 395 * ixgbe_if_tx_queues_alloc 396 ************************************************************************/ 397 static int 398 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 399 int ntxqs, int ntxqsets) 400 { 401 struct adapter *adapter = iflib_get_softc(ctx); 402 if_softc_ctx_t scctx = adapter->shared; 403 struct ix_tx_queue *que; 404 int i, j, error; 405 406 MPASS(adapter->num_tx_queues > 0); 407 MPASS(adapter->num_tx_queues == ntxqsets); 408 MPASS(ntxqs == 1); 409 410 /* Allocate queue structure memory */ 411 adapter->tx_queues = 412 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 413 M_IXGBE, M_NOWAIT | M_ZERO); 414 if (!adapter->tx_queues) { 415 device_printf(iflib_get_dev(ctx), 416 "Unable to allocate TX ring memory\n"); 417 return (ENOMEM); 418 } 419 420 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { 421 struct tx_ring *txr = &que->txr; 422 423 /* In case SR-IOV is enabled, align the index properly */ 424 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 425 i); 426 427 txr->adapter = que->adapter = adapter; 428 429 /* Allocate report status array */ 430 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 431 if (txr->tx_rsq == NULL) { 432 error = ENOMEM; 433 goto fail; 434 } 435 for (j = 0; j < scctx->isc_ntxd[0]; j++) 436 txr->tx_rsq[j] = QIDX_INVALID; 437 /* get the virtual and physical address of the hardware queues */ 438 txr->tail = IXGBE_TDT(txr->me); 439 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 440 txr->tx_paddr = paddrs[i]; 441 442 txr->bytes = 0; 443 txr->total_packets = 0; 444 445 /* Set the rate at which we sample packets */ 446 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 447 txr->atr_sample = atr_sample_rate; 448 449 } 450 451 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 452 adapter->num_tx_queues); 453 454 return (0); 455 456 fail: 457 ixgbe_if_queues_free(ctx); 458 459 return (error); 460 } /* ixgbe_if_tx_queues_alloc */ 461 462 /************************************************************************ 463 * ixgbe_if_rx_queues_alloc 464 ************************************************************************/ 465 static int 466 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 467 int nrxqs, int nrxqsets) 468 { 469 struct adapter *adapter = iflib_get_softc(ctx); 470 struct ix_rx_queue *que; 471 int i; 472 473 MPASS(adapter->num_rx_queues > 0); 474 MPASS(adapter->num_rx_queues == nrxqsets); 475 MPASS(nrxqs == 1); 476 477 /* Allocate queue structure memory */ 478 adapter->rx_queues = 479 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 480 M_IXGBE, M_NOWAIT | M_ZERO); 481 if (!adapter->rx_queues) { 482 device_printf(iflib_get_dev(ctx), 483 "Unable to allocate TX ring memory\n"); 484 return (ENOMEM); 485 } 486 487 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { 488 struct rx_ring *rxr = &que->rxr; 489 490 /* In case SR-IOV is enabled, align the index properly */ 491 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 492 i); 493 494 rxr->adapter = que->adapter = adapter; 495 496 /* get the virtual and physical address of the hw queues */ 497 rxr->tail = IXGBE_RDT(rxr->me); 498 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 499 rxr->rx_paddr = paddrs[i]; 500 rxr->bytes = 0; 501 rxr->que = que; 502 } 503 504 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 505 adapter->num_rx_queues); 506 507 return (0); 508 } /* ixgbe_if_rx_queues_alloc */ 509 510 /************************************************************************ 511 * ixgbe_if_queues_free 512 ************************************************************************/ 513 static void 514 ixgbe_if_queues_free(if_ctx_t ctx) 515 { 516 struct adapter *adapter = iflib_get_softc(ctx); 517 struct ix_tx_queue *tx_que = adapter->tx_queues; 518 struct ix_rx_queue *rx_que = adapter->rx_queues; 519 int i; 520 521 if (tx_que != NULL) { 522 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 523 struct tx_ring *txr = &tx_que->txr; 524 if (txr->tx_rsq == NULL) 525 break; 526 527 free(txr->tx_rsq, M_IXGBE); 528 txr->tx_rsq = NULL; 529 } 530 531 free(adapter->tx_queues, M_IXGBE); 532 adapter->tx_queues = NULL; 533 } 534 if (rx_que != NULL) { 535 free(adapter->rx_queues, M_IXGBE); 536 adapter->rx_queues = NULL; 537 } 538 } /* ixgbe_if_queues_free */ 539 540 /************************************************************************ 541 * ixgbe_initialize_rss_mapping 542 ************************************************************************/ 543 static void 544 ixgbe_initialize_rss_mapping(struct adapter *adapter) 545 { 546 struct ixgbe_hw *hw = &adapter->hw; 547 u32 reta = 0, mrqc, rss_key[10]; 548 int queue_id, table_size, index_mult; 549 int i, j; 550 u32 rss_hash_config; 551 552 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 553 /* Fetch the configured RSS key */ 554 rss_getkey((uint8_t *)&rss_key); 555 } else { 556 /* set up random bits */ 557 arc4rand(&rss_key, sizeof(rss_key), 0); 558 } 559 560 /* Set multiplier for RETA setup and table size based on MAC */ 561 index_mult = 0x1; 562 table_size = 128; 563 switch (adapter->hw.mac.type) { 564 case ixgbe_mac_82598EB: 565 index_mult = 0x11; 566 break; 567 case ixgbe_mac_X550: 568 case ixgbe_mac_X550EM_x: 569 case ixgbe_mac_X550EM_a: 570 table_size = 512; 571 break; 572 default: 573 break; 574 } 575 576 /* Set up the redirection table */ 577 for (i = 0, j = 0; i < table_size; i++, j++) { 578 if (j == adapter->num_rx_queues) 579 j = 0; 580 581 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 582 /* 583 * Fetch the RSS bucket id for the given indirection 584 * entry. Cap it at the number of configured buckets 585 * (which is num_rx_queues.) 586 */ 587 queue_id = rss_get_indirection_to_bucket(i); 588 queue_id = queue_id % adapter->num_rx_queues; 589 } else 590 queue_id = (j * index_mult); 591 592 /* 593 * The low 8 bits are for hash value (n+0); 594 * The next 8 bits are for hash value (n+1), etc. 595 */ 596 reta = reta >> 8; 597 reta = reta | (((uint32_t)queue_id) << 24); 598 if ((i & 3) == 3) { 599 if (i < 128) 600 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 601 else 602 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 603 reta); 604 reta = 0; 605 } 606 } 607 608 /* Now fill our hash function seeds */ 609 for (i = 0; i < 10; i++) 610 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 611 612 /* Perform hash on these packet types */ 613 if (adapter->feat_en & IXGBE_FEATURE_RSS) 614 rss_hash_config = rss_gethashconfig(); 615 else { 616 /* 617 * Disable UDP - IP fragments aren't currently being handled 618 * and so we end up with a mix of 2-tuple and 4-tuple 619 * traffic. 620 */ 621 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 622 | RSS_HASHTYPE_RSS_TCP_IPV4 623 | RSS_HASHTYPE_RSS_IPV6 624 | RSS_HASHTYPE_RSS_TCP_IPV6 625 | RSS_HASHTYPE_RSS_IPV6_EX 626 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 627 } 628 629 mrqc = IXGBE_MRQC_RSSEN; 630 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 631 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 632 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 633 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 634 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 635 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 636 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 638 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 640 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 642 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 644 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 646 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 648 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 649 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 650 } /* ixgbe_initialize_rss_mapping */ 651 652 /************************************************************************ 653 * ixgbe_initialize_receive_units - Setup receive registers and features. 654 ************************************************************************/ 655 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 656 657 static void 658 ixgbe_initialize_receive_units(if_ctx_t ctx) 659 { 660 struct adapter *adapter = iflib_get_softc(ctx); 661 if_softc_ctx_t scctx = adapter->shared; 662 struct ixgbe_hw *hw = &adapter->hw; 663 struct ifnet *ifp = iflib_get_ifp(ctx); 664 struct ix_rx_queue *que; 665 int i, j; 666 u32 bufsz, fctrl, srrctl, rxcsum; 667 u32 hlreg; 668 669 /* 670 * Make sure receives are disabled while 671 * setting up the descriptor ring 672 */ 673 ixgbe_disable_rx(hw); 674 675 /* Enable broadcasts */ 676 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 677 fctrl |= IXGBE_FCTRL_BAM; 678 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 679 fctrl |= IXGBE_FCTRL_DPF; 680 fctrl |= IXGBE_FCTRL_PMCF; 681 } 682 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 683 684 /* Set for Jumbo Frames? */ 685 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 686 if (ifp->if_mtu > ETHERMTU) 687 hlreg |= IXGBE_HLREG0_JUMBOEN; 688 else 689 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 690 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 691 692 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 693 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 694 695 /* Setup the Base and Length of the Rx Descriptor Ring */ 696 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) { 697 struct rx_ring *rxr = &que->rxr; 698 u64 rdba = rxr->rx_paddr; 699 700 j = rxr->me; 701 702 /* Setup the Base and Length of the Rx Descriptor Ring */ 703 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 704 (rdba & 0x00000000ffffffffULL)); 705 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 706 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 707 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 708 709 /* Set up the SRRCTL register */ 710 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 711 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 712 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 713 srrctl |= bufsz; 714 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 715 716 /* 717 * Set DROP_EN iff we have no flow control and >1 queue. 718 * Note that srrctl was cleared shortly before during reset, 719 * so we do not need to clear the bit, but do it just in case 720 * this code is moved elsewhere. 721 */ 722 if (adapter->num_rx_queues > 1 && 723 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 724 srrctl |= IXGBE_SRRCTL_DROP_EN; 725 } else { 726 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 727 } 728 729 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 730 731 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 732 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 733 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 734 735 /* Set the driver rx tail address */ 736 rxr->tail = IXGBE_RDT(rxr->me); 737 } 738 739 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 740 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 741 | IXGBE_PSRTYPE_UDPHDR 742 | IXGBE_PSRTYPE_IPV4HDR 743 | IXGBE_PSRTYPE_IPV6HDR; 744 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 745 } 746 747 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 748 749 ixgbe_initialize_rss_mapping(adapter); 750 751 if (adapter->num_rx_queues > 1) { 752 /* RSS and RX IPP Checksum are mutually exclusive */ 753 rxcsum |= IXGBE_RXCSUM_PCSD; 754 } 755 756 if (ifp->if_capenable & IFCAP_RXCSUM) 757 rxcsum |= IXGBE_RXCSUM_PCSD; 758 759 /* This is useful for calculating UDP/IP fragment checksums */ 760 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 761 rxcsum |= IXGBE_RXCSUM_IPPCSE; 762 763 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 764 765 } /* ixgbe_initialize_receive_units */ 766 767 /************************************************************************ 768 * ixgbe_initialize_transmit_units - Enable transmit units. 769 ************************************************************************/ 770 static void 771 ixgbe_initialize_transmit_units(if_ctx_t ctx) 772 { 773 struct adapter *adapter = iflib_get_softc(ctx); 774 struct ixgbe_hw *hw = &adapter->hw; 775 if_softc_ctx_t scctx = adapter->shared; 776 struct ix_tx_queue *que; 777 int i; 778 779 /* Setup the Base and Length of the Tx Descriptor Ring */ 780 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues; 781 i++, que++) { 782 struct tx_ring *txr = &que->txr; 783 u64 tdba = txr->tx_paddr; 784 u32 txctrl = 0; 785 int j = txr->me; 786 787 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 788 (tdba & 0x00000000ffffffffULL)); 789 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 790 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 791 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 792 793 /* Setup the HW Tx Head and Tail descriptor pointers */ 794 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 795 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 796 797 /* Cache the tail address */ 798 txr->tail = IXGBE_TDT(txr->me); 799 800 txr->tx_rs_cidx = txr->tx_rs_pidx; 801 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 802 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 803 txr->tx_rsq[k] = QIDX_INVALID; 804 805 /* Disable Head Writeback */ 806 /* 807 * Note: for X550 series devices, these registers are actually 808 * prefixed with TPH_ isntead of DCA_, but the addresses and 809 * fields remain the same. 810 */ 811 switch (hw->mac.type) { 812 case ixgbe_mac_82598EB: 813 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 814 break; 815 default: 816 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 817 break; 818 } 819 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 820 switch (hw->mac.type) { 821 case ixgbe_mac_82598EB: 822 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 823 break; 824 default: 825 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 826 break; 827 } 828 829 } 830 831 if (hw->mac.type != ixgbe_mac_82598EB) { 832 u32 dmatxctl, rttdcs; 833 834 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 835 dmatxctl |= IXGBE_DMATXCTL_TE; 836 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 837 /* Disable arbiter to set MTQC */ 838 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 839 rttdcs |= IXGBE_RTTDCS_ARBDIS; 840 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 841 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 842 ixgbe_get_mtqc(adapter->iov_mode)); 843 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 844 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 845 } 846 847 } /* ixgbe_initialize_transmit_units */ 848 849 /************************************************************************ 850 * ixgbe_register 851 ************************************************************************/ 852 static void * 853 ixgbe_register(device_t dev) 854 { 855 return (ixgbe_sctx); 856 } /* ixgbe_register */ 857 858 /************************************************************************ 859 * ixgbe_if_attach_pre - Device initialization routine, part 1 860 * 861 * Called when the driver is being loaded. 862 * Identifies the type of hardware, initializes the hardware, 863 * and initializes iflib structures. 864 * 865 * return 0 on success, positive on failure 866 ************************************************************************/ 867 static int 868 ixgbe_if_attach_pre(if_ctx_t ctx) 869 { 870 struct adapter *adapter; 871 device_t dev; 872 if_softc_ctx_t scctx; 873 struct ixgbe_hw *hw; 874 int error = 0; 875 u32 ctrl_ext; 876 877 INIT_DEBUGOUT("ixgbe_attach: begin"); 878 879 /* Allocate, clear, and link in our adapter structure */ 880 dev = iflib_get_dev(ctx); 881 adapter = iflib_get_softc(ctx); 882 adapter->hw.back = adapter; 883 adapter->ctx = ctx; 884 adapter->dev = dev; 885 scctx = adapter->shared = iflib_get_softc_ctx(ctx); 886 adapter->media = iflib_get_media(ctx); 887 hw = &adapter->hw; 888 889 /* Determine hardware revision */ 890 hw->vendor_id = pci_get_vendor(dev); 891 hw->device_id = pci_get_device(dev); 892 hw->revision_id = pci_get_revid(dev); 893 hw->subsystem_vendor_id = pci_get_subvendor(dev); 894 hw->subsystem_device_id = pci_get_subdevice(dev); 895 896 /* Do base PCI setup - map BAR0 */ 897 if (ixgbe_allocate_pci_resources(ctx)) { 898 device_printf(dev, "Allocation of PCI resources failed\n"); 899 return (ENXIO); 900 } 901 902 /* let hardware know driver is loaded */ 903 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 904 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 905 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 906 907 /* 908 * Initialize the shared code 909 */ 910 if (ixgbe_init_shared_code(hw) != 0) { 911 device_printf(dev, "Unable to initialize the shared code\n"); 912 error = ENXIO; 913 goto err_pci; 914 } 915 916 if (hw->mbx.ops.init_params) 917 hw->mbx.ops.init_params(hw); 918 919 hw->allow_unsupported_sfp = allow_unsupported_sfp; 920 921 if (hw->mac.type != ixgbe_mac_82598EB) 922 hw->phy.smart_speed = ixgbe_smart_speed; 923 924 ixgbe_init_device_features(adapter); 925 926 /* Enable WoL (if supported) */ 927 ixgbe_check_wol_support(adapter); 928 929 /* Verify adapter fan is still functional (if applicable) */ 930 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 931 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 932 ixgbe_check_fan_failure(adapter, esdp, FALSE); 933 } 934 935 /* Ensure SW/FW semaphore is free */ 936 ixgbe_init_swfw_semaphore(hw); 937 938 /* Set an initial default flow control value */ 939 hw->fc.requested_mode = ixgbe_flow_control; 940 941 hw->phy.reset_if_overtemp = TRUE; 942 error = ixgbe_reset_hw(hw); 943 hw->phy.reset_if_overtemp = FALSE; 944 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 945 /* 946 * No optics in this port, set up 947 * so the timer routine will probe 948 * for later insertion. 949 */ 950 adapter->sfp_probe = TRUE; 951 error = 0; 952 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 953 device_printf(dev, "Unsupported SFP+ module detected!\n"); 954 error = EIO; 955 goto err_pci; 956 } else if (error) { 957 device_printf(dev, "Hardware initialization failed\n"); 958 error = EIO; 959 goto err_pci; 960 } 961 962 /* Make sure we have a good EEPROM before we read from it */ 963 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 964 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 965 error = EIO; 966 goto err_pci; 967 } 968 969 error = ixgbe_start_hw(hw); 970 switch (error) { 971 case IXGBE_ERR_EEPROM_VERSION: 972 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 973 break; 974 case IXGBE_ERR_SFP_NOT_SUPPORTED: 975 device_printf(dev, "Unsupported SFP+ Module\n"); 976 error = EIO; 977 goto err_pci; 978 case IXGBE_ERR_SFP_NOT_PRESENT: 979 device_printf(dev, "No SFP+ Module found\n"); 980 /* falls thru */ 981 default: 982 break; 983 } 984 985 /* Most of the iflib initialization... */ 986 987 iflib_set_mac(ctx, hw->mac.addr); 988 switch (adapter->hw.mac.type) { 989 case ixgbe_mac_X550: 990 case ixgbe_mac_X550EM_x: 991 case ixgbe_mac_X550EM_a: 992 scctx->isc_rss_table_size = 512; 993 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 994 break; 995 default: 996 scctx->isc_rss_table_size = 128; 997 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 998 } 999 1000 /* Allow legacy interrupts */ 1001 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1002 1003 scctx->isc_txqsizes[0] = 1004 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1005 sizeof(u32), DBA_ALIGN), 1006 scctx->isc_rxqsizes[0] = 1007 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1008 DBA_ALIGN); 1009 1010 /* XXX */ 1011 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1012 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1013 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1014 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1015 } else { 1016 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1017 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1018 } 1019 1020 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1021 1022 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1023 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1024 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1025 1026 scctx->isc_txrx = &ixgbe_txrx; 1027 1028 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1029 1030 return (0); 1031 1032 err_pci: 1033 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 1034 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1035 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 1036 ixgbe_free_pci_resources(ctx); 1037 1038 return (error); 1039 } /* ixgbe_if_attach_pre */ 1040 1041 /********************************************************************* 1042 * ixgbe_if_attach_post - Device initialization routine, part 2 1043 * 1044 * Called during driver load, but after interrupts and 1045 * resources have been allocated and configured. 1046 * Sets up some data structures not relevant to iflib. 1047 * 1048 * return 0 on success, positive on failure 1049 *********************************************************************/ 1050 static int 1051 ixgbe_if_attach_post(if_ctx_t ctx) 1052 { 1053 device_t dev; 1054 struct adapter *adapter; 1055 struct ixgbe_hw *hw; 1056 int error = 0; 1057 1058 dev = iflib_get_dev(ctx); 1059 adapter = iflib_get_softc(ctx); 1060 hw = &adapter->hw; 1061 1062 1063 if (adapter->intr_type == IFLIB_INTR_LEGACY && 1064 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1065 device_printf(dev, "Device does not support legacy interrupts"); 1066 error = ENXIO; 1067 goto err; 1068 } 1069 1070 /* Allocate multicast array memory. */ 1071 adapter->mta = malloc(sizeof(*adapter->mta) * 1072 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1073 if (adapter->mta == NULL) { 1074 device_printf(dev, "Can not allocate multicast setup array\n"); 1075 error = ENOMEM; 1076 goto err; 1077 } 1078 1079 /* hw.ix defaults init */ 1080 ixgbe_set_advertise(adapter, ixgbe_advertise_speed); 1081 1082 /* Enable the optics for 82599 SFP+ fiber */ 1083 ixgbe_enable_tx_laser(hw); 1084 1085 /* Enable power to the phy. */ 1086 ixgbe_set_phy_power(hw, TRUE); 1087 1088 ixgbe_initialize_iov(adapter); 1089 1090 error = ixgbe_setup_interface(ctx); 1091 if (error) { 1092 device_printf(dev, "Interface setup failed: %d\n", error); 1093 goto err; 1094 } 1095 1096 ixgbe_if_update_admin_status(ctx); 1097 1098 /* Initialize statistics */ 1099 ixgbe_update_stats_counters(adapter); 1100 ixgbe_add_hw_stats(adapter); 1101 1102 /* Check PCIE slot type/speed/width */ 1103 ixgbe_get_slot_info(adapter); 1104 1105 /* 1106 * Do time init and sysctl init here, but 1107 * only on the first port of a bypass adapter. 1108 */ 1109 ixgbe_bypass_init(adapter); 1110 1111 /* Set an initial dmac value */ 1112 adapter->dmac = 0; 1113 /* Set initial advertised speeds (if applicable) */ 1114 adapter->advertise = ixgbe_get_advertise(adapter); 1115 1116 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1117 ixgbe_define_iov_schemas(dev, &error); 1118 1119 /* Add sysctls */ 1120 ixgbe_add_device_sysctls(ctx); 1121 1122 return (0); 1123 err: 1124 return (error); 1125 } /* ixgbe_if_attach_post */ 1126 1127 /************************************************************************ 1128 * ixgbe_check_wol_support 1129 * 1130 * Checks whether the adapter's ports are capable of 1131 * Wake On LAN by reading the adapter's NVM. 1132 * 1133 * Sets each port's hw->wol_enabled value depending 1134 * on the value read here. 1135 ************************************************************************/ 1136 static void 1137 ixgbe_check_wol_support(struct adapter *adapter) 1138 { 1139 struct ixgbe_hw *hw = &adapter->hw; 1140 u16 dev_caps = 0; 1141 1142 /* Find out WoL support for port */ 1143 adapter->wol_support = hw->wol_enabled = 0; 1144 ixgbe_get_device_caps(hw, &dev_caps); 1145 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1146 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1147 hw->bus.func == 0)) 1148 adapter->wol_support = hw->wol_enabled = 1; 1149 1150 /* Save initial wake up filter configuration */ 1151 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1152 1153 return; 1154 } /* ixgbe_check_wol_support */ 1155 1156 /************************************************************************ 1157 * ixgbe_setup_interface 1158 * 1159 * Setup networking device structure and register an interface. 1160 ************************************************************************/ 1161 static int 1162 ixgbe_setup_interface(if_ctx_t ctx) 1163 { 1164 struct ifnet *ifp = iflib_get_ifp(ctx); 1165 struct adapter *adapter = iflib_get_softc(ctx); 1166 1167 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1168 1169 if_setbaudrate(ifp, IF_Gbps(10)); 1170 1171 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1172 1173 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1174 1175 ixgbe_add_media_types(ctx); 1176 1177 /* Autoselect media by default */ 1178 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 1179 1180 return (0); 1181 } /* ixgbe_setup_interface */ 1182 1183 /************************************************************************ 1184 * ixgbe_if_get_counter 1185 ************************************************************************/ 1186 static uint64_t 1187 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1188 { 1189 struct adapter *adapter = iflib_get_softc(ctx); 1190 if_t ifp = iflib_get_ifp(ctx); 1191 1192 switch (cnt) { 1193 case IFCOUNTER_IPACKETS: 1194 return (adapter->ipackets); 1195 case IFCOUNTER_OPACKETS: 1196 return (adapter->opackets); 1197 case IFCOUNTER_IBYTES: 1198 return (adapter->ibytes); 1199 case IFCOUNTER_OBYTES: 1200 return (adapter->obytes); 1201 case IFCOUNTER_IMCASTS: 1202 return (adapter->imcasts); 1203 case IFCOUNTER_OMCASTS: 1204 return (adapter->omcasts); 1205 case IFCOUNTER_COLLISIONS: 1206 return (0); 1207 case IFCOUNTER_IQDROPS: 1208 return (adapter->iqdrops); 1209 case IFCOUNTER_OQDROPS: 1210 return (0); 1211 case IFCOUNTER_IERRORS: 1212 return (adapter->ierrors); 1213 default: 1214 return (if_get_counter_default(ifp, cnt)); 1215 } 1216 } /* ixgbe_if_get_counter */ 1217 1218 /************************************************************************ 1219 * ixgbe_if_i2c_req 1220 ************************************************************************/ 1221 static int 1222 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1223 { 1224 struct adapter *adapter = iflib_get_softc(ctx); 1225 struct ixgbe_hw *hw = &adapter->hw; 1226 int i; 1227 1228 1229 if (hw->phy.ops.read_i2c_byte == NULL) 1230 return (ENXIO); 1231 for (i = 0; i < req->len; i++) 1232 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1233 req->dev_addr, &req->data[i]); 1234 return (0); 1235 } /* ixgbe_if_i2c_req */ 1236 1237 /************************************************************************ 1238 * ixgbe_add_media_types 1239 ************************************************************************/ 1240 static void 1241 ixgbe_add_media_types(if_ctx_t ctx) 1242 { 1243 struct adapter *adapter = iflib_get_softc(ctx); 1244 struct ixgbe_hw *hw = &adapter->hw; 1245 device_t dev = iflib_get_dev(ctx); 1246 u64 layer; 1247 1248 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 1249 1250 /* Media types with matching FreeBSD media defines */ 1251 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1252 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1253 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1254 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1255 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1256 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1257 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1258 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1259 1260 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1261 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1262 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1263 NULL); 1264 1265 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1266 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1267 if (hw->phy.multispeed_fiber) 1268 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0, 1269 NULL); 1270 } 1271 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1272 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1273 if (hw->phy.multispeed_fiber) 1274 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, 1275 NULL); 1276 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1277 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1278 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1279 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1280 1281 #ifdef IFM_ETH_XTYPE 1282 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1283 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1284 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1285 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1286 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1287 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1288 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1289 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1290 #else 1291 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1292 device_printf(dev, "Media supported: 10GbaseKR\n"); 1293 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1294 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1295 } 1296 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1297 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1298 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1299 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1300 } 1301 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1302 device_printf(dev, "Media supported: 1000baseKX\n"); 1303 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1304 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1305 } 1306 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1307 device_printf(dev, "Media supported: 2500baseKX\n"); 1308 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1309 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1310 } 1311 #endif 1312 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1313 device_printf(dev, "Media supported: 1000baseBX\n"); 1314 1315 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1316 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1317 0, NULL); 1318 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1319 } 1320 1321 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1322 } /* ixgbe_add_media_types */ 1323 1324 /************************************************************************ 1325 * ixgbe_is_sfp 1326 ************************************************************************/ 1327 static inline bool 1328 ixgbe_is_sfp(struct ixgbe_hw *hw) 1329 { 1330 switch (hw->mac.type) { 1331 case ixgbe_mac_82598EB: 1332 if (hw->phy.type == ixgbe_phy_nl) 1333 return (TRUE); 1334 return (FALSE); 1335 case ixgbe_mac_82599EB: 1336 switch (hw->mac.ops.get_media_type(hw)) { 1337 case ixgbe_media_type_fiber: 1338 case ixgbe_media_type_fiber_qsfp: 1339 return (TRUE); 1340 default: 1341 return (FALSE); 1342 } 1343 case ixgbe_mac_X550EM_x: 1344 case ixgbe_mac_X550EM_a: 1345 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1346 return (TRUE); 1347 return (FALSE); 1348 default: 1349 return (FALSE); 1350 } 1351 } /* ixgbe_is_sfp */ 1352 1353 /************************************************************************ 1354 * ixgbe_config_link 1355 ************************************************************************/ 1356 static void 1357 ixgbe_config_link(if_ctx_t ctx) 1358 { 1359 struct adapter *adapter = iflib_get_softc(ctx); 1360 struct ixgbe_hw *hw = &adapter->hw; 1361 u32 autoneg, err = 0; 1362 bool sfp, negotiate; 1363 1364 sfp = ixgbe_is_sfp(hw); 1365 1366 if (sfp) { 1367 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 1368 iflib_admin_intr_deferred(ctx); 1369 } else { 1370 if (hw->mac.ops.check_link) 1371 err = ixgbe_check_link(hw, &adapter->link_speed, 1372 &adapter->link_up, FALSE); 1373 if (err) 1374 return; 1375 autoneg = hw->phy.autoneg_advertised; 1376 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1377 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1378 &negotiate); 1379 if (err) 1380 return; 1381 if (hw->mac.ops.setup_link) 1382 err = hw->mac.ops.setup_link(hw, autoneg, 1383 adapter->link_up); 1384 } 1385 } /* ixgbe_config_link */ 1386 1387 /************************************************************************ 1388 * ixgbe_update_stats_counters - Update board statistics counters. 1389 ************************************************************************/ 1390 static void 1391 ixgbe_update_stats_counters(struct adapter *adapter) 1392 { 1393 struct ixgbe_hw *hw = &adapter->hw; 1394 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1395 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1396 u64 total_missed_rx = 0; 1397 1398 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1399 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1400 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1401 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1402 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1403 1404 for (int i = 0; i < 16; i++) { 1405 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1406 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1407 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1408 } 1409 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1410 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1411 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1412 1413 /* Hardware workaround, gprc counts missed packets */ 1414 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1415 stats->gprc -= missed_rx; 1416 1417 if (hw->mac.type != ixgbe_mac_82598EB) { 1418 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1419 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1420 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1421 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1422 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1423 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1424 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1425 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1426 } else { 1427 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1428 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1429 /* 82598 only has a counter in the high register */ 1430 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1431 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1432 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1433 } 1434 1435 /* 1436 * Workaround: mprc hardware is incorrectly counting 1437 * broadcasts, so for now we subtract those. 1438 */ 1439 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1440 stats->bprc += bprc; 1441 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1442 if (hw->mac.type == ixgbe_mac_82598EB) 1443 stats->mprc -= bprc; 1444 1445 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1446 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1447 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1448 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1449 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1450 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1451 1452 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1453 stats->lxontxc += lxon; 1454 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1455 stats->lxofftxc += lxoff; 1456 total = lxon + lxoff; 1457 1458 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1459 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1460 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1461 stats->gptc -= total; 1462 stats->mptc -= total; 1463 stats->ptc64 -= total; 1464 stats->gotc -= total * ETHER_MIN_LEN; 1465 1466 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1467 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1468 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1469 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1470 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1471 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1472 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1473 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1474 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1475 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1476 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1477 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1478 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1479 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1480 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1481 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1482 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1483 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1484 /* Only read FCOE on 82599 */ 1485 if (hw->mac.type != ixgbe_mac_82598EB) { 1486 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1487 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1488 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1489 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1490 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1491 } 1492 1493 /* Fill out the OS statistics structure */ 1494 IXGBE_SET_IPACKETS(adapter, stats->gprc); 1495 IXGBE_SET_OPACKETS(adapter, stats->gptc); 1496 IXGBE_SET_IBYTES(adapter, stats->gorc); 1497 IXGBE_SET_OBYTES(adapter, stats->gotc); 1498 IXGBE_SET_IMCASTS(adapter, stats->mprc); 1499 IXGBE_SET_OMCASTS(adapter, stats->mptc); 1500 IXGBE_SET_COLLISIONS(adapter, 0); 1501 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 1502 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec); 1503 } /* ixgbe_update_stats_counters */ 1504 1505 /************************************************************************ 1506 * ixgbe_add_hw_stats 1507 * 1508 * Add sysctl variables, one per statistic, to the system. 1509 ************************************************************************/ 1510 static void 1511 ixgbe_add_hw_stats(struct adapter *adapter) 1512 { 1513 device_t dev = iflib_get_dev(adapter->ctx); 1514 struct ix_rx_queue *rx_que; 1515 struct ix_tx_queue *tx_que; 1516 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1517 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1518 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1519 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1520 struct sysctl_oid *stat_node, *queue_node; 1521 struct sysctl_oid_list *stat_list, *queue_list; 1522 int i; 1523 1524 #define QUEUE_NAME_LEN 32 1525 char namebuf[QUEUE_NAME_LEN]; 1526 1527 /* Driver Statistics */ 1528 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1529 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); 1530 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1531 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); 1532 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1533 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); 1534 1535 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 1536 struct tx_ring *txr = &tx_que->txr; 1537 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1538 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1539 CTLFLAG_RD, NULL, "Queue Name"); 1540 queue_list = SYSCTL_CHILDREN(queue_node); 1541 1542 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1543 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1544 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1545 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1546 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1547 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1548 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1549 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1550 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1551 CTLFLAG_RD, &txr->total_packets, 1552 "Queue Packets Transmitted"); 1553 } 1554 1555 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 1556 struct rx_ring *rxr = &rx_que->rxr; 1557 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1558 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1559 CTLFLAG_RD, NULL, "Queue Name"); 1560 queue_list = SYSCTL_CHILDREN(queue_node); 1561 1562 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1563 CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i], 1564 sizeof(&adapter->rx_queues[i]), 1565 ixgbe_sysctl_interrupt_rate_handler, "IU", 1566 "Interrupt Rate"); 1567 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1568 CTLFLAG_RD, &(adapter->rx_queues[i].irqs), 1569 "irqs on this queue"); 1570 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1571 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1572 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1573 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1574 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1575 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1576 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1577 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1578 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1579 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1580 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1581 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1582 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1583 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1584 } 1585 1586 /* MAC stats get their own sub node */ 1587 1588 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1589 CTLFLAG_RD, NULL, "MAC Statistics"); 1590 stat_list = SYSCTL_CHILDREN(stat_node); 1591 1592 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1593 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1594 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1595 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1596 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1597 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1598 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1599 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1601 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1603 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1604 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1605 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1607 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1608 1609 /* Flow Control stats */ 1610 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1611 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1613 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1615 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1616 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1617 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1618 1619 /* Packet Reception Stats */ 1620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1621 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1623 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1625 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1626 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1627 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1629 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1631 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1633 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1635 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1637 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1639 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1641 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1643 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1645 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1647 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1649 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1651 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1653 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1655 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1657 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1658 1659 /* Packet Transmission Stats */ 1660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1661 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1663 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1664 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1665 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1666 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1667 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1669 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1671 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1673 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1675 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1677 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1679 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1681 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1683 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1684 } /* ixgbe_add_hw_stats */ 1685 1686 /************************************************************************ 1687 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1688 * 1689 * Retrieves the TDH value from the hardware 1690 ************************************************************************/ 1691 static int 1692 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1693 { 1694 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1695 int error; 1696 unsigned int val; 1697 1698 if (!txr) 1699 return (0); 1700 1701 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 1702 error = sysctl_handle_int(oidp, &val, 0, req); 1703 if (error || !req->newptr) 1704 return error; 1705 1706 return (0); 1707 } /* ixgbe_sysctl_tdh_handler */ 1708 1709 /************************************************************************ 1710 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1711 * 1712 * Retrieves the TDT value from the hardware 1713 ************************************************************************/ 1714 static int 1715 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1716 { 1717 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1718 int error; 1719 unsigned int val; 1720 1721 if (!txr) 1722 return (0); 1723 1724 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 1725 error = sysctl_handle_int(oidp, &val, 0, req); 1726 if (error || !req->newptr) 1727 return error; 1728 1729 return (0); 1730 } /* ixgbe_sysctl_tdt_handler */ 1731 1732 /************************************************************************ 1733 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1734 * 1735 * Retrieves the RDH value from the hardware 1736 ************************************************************************/ 1737 static int 1738 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1739 { 1740 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1741 int error; 1742 unsigned int val; 1743 1744 if (!rxr) 1745 return (0); 1746 1747 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 1748 error = sysctl_handle_int(oidp, &val, 0, req); 1749 if (error || !req->newptr) 1750 return error; 1751 1752 return (0); 1753 } /* ixgbe_sysctl_rdh_handler */ 1754 1755 /************************************************************************ 1756 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1757 * 1758 * Retrieves the RDT value from the hardware 1759 ************************************************************************/ 1760 static int 1761 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1762 { 1763 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1764 int error; 1765 unsigned int val; 1766 1767 if (!rxr) 1768 return (0); 1769 1770 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 1771 error = sysctl_handle_int(oidp, &val, 0, req); 1772 if (error || !req->newptr) 1773 return error; 1774 1775 return (0); 1776 } /* ixgbe_sysctl_rdt_handler */ 1777 1778 /************************************************************************ 1779 * ixgbe_if_vlan_register 1780 * 1781 * Run via vlan config EVENT, it enables us to use the 1782 * HW Filter table since we can get the vlan id. This 1783 * just creates the entry in the soft version of the 1784 * VFTA, init will repopulate the real table. 1785 ************************************************************************/ 1786 static void 1787 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1788 { 1789 struct adapter *adapter = iflib_get_softc(ctx); 1790 u16 index, bit; 1791 1792 index = (vtag >> 5) & 0x7F; 1793 bit = vtag & 0x1F; 1794 adapter->shadow_vfta[index] |= (1 << bit); 1795 ++adapter->num_vlans; 1796 ixgbe_setup_vlan_hw_support(ctx); 1797 } /* ixgbe_if_vlan_register */ 1798 1799 /************************************************************************ 1800 * ixgbe_if_vlan_unregister 1801 * 1802 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1803 ************************************************************************/ 1804 static void 1805 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1806 { 1807 struct adapter *adapter = iflib_get_softc(ctx); 1808 u16 index, bit; 1809 1810 index = (vtag >> 5) & 0x7F; 1811 bit = vtag & 0x1F; 1812 adapter->shadow_vfta[index] &= ~(1 << bit); 1813 --adapter->num_vlans; 1814 /* Re-init to load the changes */ 1815 ixgbe_setup_vlan_hw_support(ctx); 1816 } /* ixgbe_if_vlan_unregister */ 1817 1818 /************************************************************************ 1819 * ixgbe_setup_vlan_hw_support 1820 ************************************************************************/ 1821 static void 1822 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1823 { 1824 struct ifnet *ifp = iflib_get_ifp(ctx); 1825 struct adapter *adapter = iflib_get_softc(ctx); 1826 struct ixgbe_hw *hw = &adapter->hw; 1827 struct rx_ring *rxr; 1828 int i; 1829 u32 ctrl; 1830 1831 1832 /* 1833 * We get here thru init_locked, meaning 1834 * a soft reset, this has already cleared 1835 * the VFTA and other state, so if there 1836 * have been no vlan's registered do nothing. 1837 */ 1838 if (adapter->num_vlans == 0) 1839 return; 1840 1841 /* Setup the queues for vlans */ 1842 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1843 for (i = 0; i < adapter->num_rx_queues; i++) { 1844 rxr = &adapter->rx_queues[i].rxr; 1845 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1846 if (hw->mac.type != ixgbe_mac_82598EB) { 1847 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1848 ctrl |= IXGBE_RXDCTL_VME; 1849 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1850 } 1851 rxr->vtag_strip = TRUE; 1852 } 1853 } 1854 1855 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1856 return; 1857 /* 1858 * A soft reset zero's out the VFTA, so 1859 * we need to repopulate it now. 1860 */ 1861 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1862 if (adapter->shadow_vfta[i] != 0) 1863 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1864 adapter->shadow_vfta[i]); 1865 1866 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1867 /* Enable the Filter Table if enabled */ 1868 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1869 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1870 ctrl |= IXGBE_VLNCTRL_VFE; 1871 } 1872 if (hw->mac.type == ixgbe_mac_82598EB) 1873 ctrl |= IXGBE_VLNCTRL_VME; 1874 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1875 } /* ixgbe_setup_vlan_hw_support */ 1876 1877 /************************************************************************ 1878 * ixgbe_get_slot_info 1879 * 1880 * Get the width and transaction speed of 1881 * the slot this adapter is plugged into. 1882 ************************************************************************/ 1883 static void 1884 ixgbe_get_slot_info(struct adapter *adapter) 1885 { 1886 device_t dev = iflib_get_dev(adapter->ctx); 1887 struct ixgbe_hw *hw = &adapter->hw; 1888 int bus_info_valid = TRUE; 1889 u32 offset; 1890 u16 link; 1891 1892 /* Some devices are behind an internal bridge */ 1893 switch (hw->device_id) { 1894 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1895 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1896 goto get_parent_info; 1897 default: 1898 break; 1899 } 1900 1901 ixgbe_get_bus_info(hw); 1902 1903 /* 1904 * Some devices don't use PCI-E, but there is no need 1905 * to display "Unknown" for bus speed and width. 1906 */ 1907 switch (hw->mac.type) { 1908 case ixgbe_mac_X550EM_x: 1909 case ixgbe_mac_X550EM_a: 1910 return; 1911 default: 1912 goto display; 1913 } 1914 1915 get_parent_info: 1916 /* 1917 * For the Quad port adapter we need to parse back 1918 * up the PCI tree to find the speed of the expansion 1919 * slot into which this adapter is plugged. A bit more work. 1920 */ 1921 dev = device_get_parent(device_get_parent(dev)); 1922 #ifdef IXGBE_DEBUG 1923 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 1924 pci_get_slot(dev), pci_get_function(dev)); 1925 #endif 1926 dev = device_get_parent(device_get_parent(dev)); 1927 #ifdef IXGBE_DEBUG 1928 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 1929 pci_get_slot(dev), pci_get_function(dev)); 1930 #endif 1931 /* Now get the PCI Express Capabilities offset */ 1932 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 1933 /* 1934 * Hmm...can't get PCI-Express capabilities. 1935 * Falling back to default method. 1936 */ 1937 bus_info_valid = FALSE; 1938 ixgbe_get_bus_info(hw); 1939 goto display; 1940 } 1941 /* ...and read the Link Status Register */ 1942 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 1943 ixgbe_set_pci_config_data_generic(hw, link); 1944 1945 display: 1946 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 1947 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 1948 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 1949 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 1950 "Unknown"), 1951 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 1952 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 1953 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 1954 "Unknown")); 1955 1956 if (bus_info_valid) { 1957 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 1958 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 1959 (hw->bus.speed == ixgbe_bus_speed_2500))) { 1960 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1961 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 1962 } 1963 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 1964 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 1965 (hw->bus.speed < ixgbe_bus_speed_8000))) { 1966 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1967 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 1968 } 1969 } else 1970 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 1971 1972 return; 1973 } /* ixgbe_get_slot_info */ 1974 1975 /************************************************************************ 1976 * ixgbe_if_msix_intr_assign 1977 * 1978 * Setup MSI-X Interrupt resources and handlers 1979 ************************************************************************/ 1980 static int 1981 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 1982 { 1983 struct adapter *adapter = iflib_get_softc(ctx); 1984 struct ix_rx_queue *rx_que = adapter->rx_queues; 1985 struct ix_tx_queue *tx_que; 1986 int error, rid, vector = 0; 1987 int cpu_id = 0; 1988 char buf[16]; 1989 1990 /* Admin Que is vector 0*/ 1991 rid = vector + 1; 1992 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { 1993 rid = vector + 1; 1994 1995 snprintf(buf, sizeof(buf), "rxq%d", i); 1996 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1997 IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 1998 1999 if (error) { 2000 device_printf(iflib_get_dev(ctx), 2001 "Failed to allocate que int %d err: %d", i, error); 2002 adapter->num_rx_queues = i + 1; 2003 goto fail; 2004 } 2005 2006 rx_que->msix = vector; 2007 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 2008 /* 2009 * The queue ID is used as the RSS layer bucket ID. 2010 * We look up the queue ID -> RSS CPU ID and select 2011 * that. 2012 */ 2013 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2014 } else { 2015 /* 2016 * Bind the MSI-X vector, and thus the 2017 * rings to the corresponding cpu. 2018 * 2019 * This just happens to match the default RSS 2020 * round-robin bucket -> queue -> CPU allocation. 2021 */ 2022 if (adapter->num_rx_queues > 1) 2023 cpu_id = i; 2024 } 2025 2026 } 2027 for (int i = 0; i < adapter->num_tx_queues; i++) { 2028 snprintf(buf, sizeof(buf), "txq%d", i); 2029 tx_que = &adapter->tx_queues[i]; 2030 tx_que->msix = i % adapter->num_rx_queues; 2031 iflib_softirq_alloc_generic(ctx, 2032 &adapter->rx_queues[tx_que->msix].que_irq, 2033 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2034 } 2035 rid = vector + 1; 2036 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, 2037 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq"); 2038 if (error) { 2039 device_printf(iflib_get_dev(ctx), 2040 "Failed to register admin handler"); 2041 return (error); 2042 } 2043 2044 adapter->vector = vector; 2045 2046 return (0); 2047 fail: 2048 iflib_irq_free(ctx, &adapter->irq); 2049 rx_que = adapter->rx_queues; 2050 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) 2051 iflib_irq_free(ctx, &rx_que->que_irq); 2052 2053 return (error); 2054 } /* ixgbe_if_msix_intr_assign */ 2055 2056 /********************************************************************* 2057 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2058 **********************************************************************/ 2059 static int 2060 ixgbe_msix_que(void *arg) 2061 { 2062 struct ix_rx_queue *que = arg; 2063 struct adapter *adapter = que->adapter; 2064 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx); 2065 2066 /* Protect against spurious interrupts */ 2067 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2068 return (FILTER_HANDLED); 2069 2070 ixgbe_disable_queue(adapter, que->msix); 2071 ++que->irqs; 2072 2073 return (FILTER_SCHEDULE_THREAD); 2074 } /* ixgbe_msix_que */ 2075 2076 /************************************************************************ 2077 * ixgbe_media_status - Media Ioctl callback 2078 * 2079 * Called whenever the user queries the status of 2080 * the interface using ifconfig. 2081 ************************************************************************/ 2082 static void 2083 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2084 { 2085 struct adapter *adapter = iflib_get_softc(ctx); 2086 struct ixgbe_hw *hw = &adapter->hw; 2087 int layer; 2088 2089 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2090 2091 ifmr->ifm_status = IFM_AVALID; 2092 ifmr->ifm_active = IFM_ETHER; 2093 2094 if (!adapter->link_active) 2095 return; 2096 2097 ifmr->ifm_status |= IFM_ACTIVE; 2098 layer = adapter->phy_layer; 2099 2100 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2101 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2102 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2103 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2104 switch (adapter->link_speed) { 2105 case IXGBE_LINK_SPEED_10GB_FULL: 2106 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2107 break; 2108 case IXGBE_LINK_SPEED_1GB_FULL: 2109 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2110 break; 2111 case IXGBE_LINK_SPEED_100_FULL: 2112 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2113 break; 2114 case IXGBE_LINK_SPEED_10_FULL: 2115 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2116 break; 2117 } 2118 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2119 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2120 switch (adapter->link_speed) { 2121 case IXGBE_LINK_SPEED_10GB_FULL: 2122 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2123 break; 2124 } 2125 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2126 switch (adapter->link_speed) { 2127 case IXGBE_LINK_SPEED_10GB_FULL: 2128 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2129 break; 2130 case IXGBE_LINK_SPEED_1GB_FULL: 2131 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2132 break; 2133 } 2134 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2135 switch (adapter->link_speed) { 2136 case IXGBE_LINK_SPEED_10GB_FULL: 2137 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2138 break; 2139 case IXGBE_LINK_SPEED_1GB_FULL: 2140 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2141 break; 2142 } 2143 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2144 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2145 switch (adapter->link_speed) { 2146 case IXGBE_LINK_SPEED_10GB_FULL: 2147 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2148 break; 2149 case IXGBE_LINK_SPEED_1GB_FULL: 2150 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2151 break; 2152 } 2153 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2154 switch (adapter->link_speed) { 2155 case IXGBE_LINK_SPEED_10GB_FULL: 2156 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2157 break; 2158 } 2159 /* 2160 * XXX: These need to use the proper media types once 2161 * they're added. 2162 */ 2163 #ifndef IFM_ETH_XTYPE 2164 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2165 switch (adapter->link_speed) { 2166 case IXGBE_LINK_SPEED_10GB_FULL: 2167 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2168 break; 2169 case IXGBE_LINK_SPEED_2_5GB_FULL: 2170 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2171 break; 2172 case IXGBE_LINK_SPEED_1GB_FULL: 2173 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2174 break; 2175 } 2176 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2177 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2178 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2179 switch (adapter->link_speed) { 2180 case IXGBE_LINK_SPEED_10GB_FULL: 2181 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2182 break; 2183 case IXGBE_LINK_SPEED_2_5GB_FULL: 2184 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2185 break; 2186 case IXGBE_LINK_SPEED_1GB_FULL: 2187 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2188 break; 2189 } 2190 #else 2191 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2192 switch (adapter->link_speed) { 2193 case IXGBE_LINK_SPEED_10GB_FULL: 2194 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2195 break; 2196 case IXGBE_LINK_SPEED_2_5GB_FULL: 2197 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2198 break; 2199 case IXGBE_LINK_SPEED_1GB_FULL: 2200 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2201 break; 2202 } 2203 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2204 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2205 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2206 switch (adapter->link_speed) { 2207 case IXGBE_LINK_SPEED_10GB_FULL: 2208 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2209 break; 2210 case IXGBE_LINK_SPEED_2_5GB_FULL: 2211 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2212 break; 2213 case IXGBE_LINK_SPEED_1GB_FULL: 2214 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2215 break; 2216 } 2217 #endif 2218 2219 /* If nothing is recognized... */ 2220 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2221 ifmr->ifm_active |= IFM_UNKNOWN; 2222 2223 /* Display current flow control setting used on link */ 2224 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2225 hw->fc.current_mode == ixgbe_fc_full) 2226 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2227 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2228 hw->fc.current_mode == ixgbe_fc_full) 2229 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2230 } /* ixgbe_media_status */ 2231 2232 /************************************************************************ 2233 * ixgbe_media_change - Media Ioctl callback 2234 * 2235 * Called when the user changes speed/duplex using 2236 * media/mediopt option with ifconfig. 2237 ************************************************************************/ 2238 static int 2239 ixgbe_if_media_change(if_ctx_t ctx) 2240 { 2241 struct adapter *adapter = iflib_get_softc(ctx); 2242 struct ifmedia *ifm = iflib_get_media(ctx); 2243 struct ixgbe_hw *hw = &adapter->hw; 2244 ixgbe_link_speed speed = 0; 2245 2246 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2247 2248 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2249 return (EINVAL); 2250 2251 if (hw->phy.media_type == ixgbe_media_type_backplane) 2252 return (EPERM); 2253 2254 /* 2255 * We don't actually need to check against the supported 2256 * media types of the adapter; ifmedia will take care of 2257 * that for us. 2258 */ 2259 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2260 case IFM_AUTO: 2261 case IFM_10G_T: 2262 speed |= IXGBE_LINK_SPEED_100_FULL; 2263 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2264 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2265 break; 2266 case IFM_10G_LRM: 2267 case IFM_10G_LR: 2268 #ifndef IFM_ETH_XTYPE 2269 case IFM_10G_SR: /* KR, too */ 2270 case IFM_10G_CX4: /* KX4 */ 2271 #else 2272 case IFM_10G_KR: 2273 case IFM_10G_KX4: 2274 #endif 2275 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2276 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2277 break; 2278 #ifndef IFM_ETH_XTYPE 2279 case IFM_1000_CX: /* KX */ 2280 #else 2281 case IFM_1000_KX: 2282 #endif 2283 case IFM_1000_LX: 2284 case IFM_1000_SX: 2285 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2286 break; 2287 case IFM_1000_T: 2288 speed |= IXGBE_LINK_SPEED_100_FULL; 2289 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2290 break; 2291 case IFM_10G_TWINAX: 2292 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2293 break; 2294 case IFM_100_TX: 2295 speed |= IXGBE_LINK_SPEED_100_FULL; 2296 break; 2297 case IFM_10_T: 2298 speed |= IXGBE_LINK_SPEED_10_FULL; 2299 break; 2300 default: 2301 goto invalid; 2302 } 2303 2304 hw->mac.autotry_restart = TRUE; 2305 hw->mac.ops.setup_link(hw, speed, TRUE); 2306 adapter->advertise = 2307 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 2308 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 2309 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 2310 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 2311 2312 return (0); 2313 2314 invalid: 2315 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2316 2317 return (EINVAL); 2318 } /* ixgbe_if_media_change */ 2319 2320 /************************************************************************ 2321 * ixgbe_set_promisc 2322 ************************************************************************/ 2323 static int 2324 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2325 { 2326 struct adapter *adapter = iflib_get_softc(ctx); 2327 struct ifnet *ifp = iflib_get_ifp(ctx); 2328 u32 rctl; 2329 int mcnt = 0; 2330 2331 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2332 rctl &= (~IXGBE_FCTRL_UPE); 2333 if (ifp->if_flags & IFF_ALLMULTI) 2334 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2335 else { 2336 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2337 } 2338 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2339 rctl &= (~IXGBE_FCTRL_MPE); 2340 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2341 2342 if (ifp->if_flags & IFF_PROMISC) { 2343 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2344 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2345 } else if (ifp->if_flags & IFF_ALLMULTI) { 2346 rctl |= IXGBE_FCTRL_MPE; 2347 rctl &= ~IXGBE_FCTRL_UPE; 2348 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2349 } 2350 return (0); 2351 } /* ixgbe_if_promisc_set */ 2352 2353 /************************************************************************ 2354 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2355 ************************************************************************/ 2356 static int 2357 ixgbe_msix_link(void *arg) 2358 { 2359 struct adapter *adapter = arg; 2360 struct ixgbe_hw *hw = &adapter->hw; 2361 u32 eicr, eicr_mask; 2362 s32 retval; 2363 2364 ++adapter->link_irq; 2365 2366 /* Pause other interrupts */ 2367 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2368 2369 /* First get the cause */ 2370 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2371 /* Be sure the queue bits are not cleared */ 2372 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2373 /* Clear interrupt with write */ 2374 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2375 2376 /* Link status change */ 2377 if (eicr & IXGBE_EICR_LSC) { 2378 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2379 adapter->task_requests |= IXGBE_REQUEST_TASK_LSC; 2380 } 2381 2382 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 2383 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 2384 (eicr & IXGBE_EICR_FLOW_DIR)) { 2385 /* This is probably overkill :) */ 2386 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 2387 return (FILTER_HANDLED); 2388 /* Disable the interrupt */ 2389 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2390 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2391 } else 2392 if (eicr & IXGBE_EICR_ECC) { 2393 device_printf(iflib_get_dev(adapter->ctx), 2394 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); 2395 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2396 } 2397 2398 /* Check for over temp condition */ 2399 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2400 switch (adapter->hw.mac.type) { 2401 case ixgbe_mac_X550EM_a: 2402 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2403 break; 2404 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2405 IXGBE_EICR_GPI_SDP0_X550EM_a); 2406 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2407 IXGBE_EICR_GPI_SDP0_X550EM_a); 2408 retval = hw->phy.ops.check_overtemp(hw); 2409 if (retval != IXGBE_ERR_OVERTEMP) 2410 break; 2411 device_printf(iflib_get_dev(adapter->ctx), 2412 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2413 device_printf(iflib_get_dev(adapter->ctx), 2414 "System shutdown required!\n"); 2415 break; 2416 default: 2417 if (!(eicr & IXGBE_EICR_TS)) 2418 break; 2419 retval = hw->phy.ops.check_overtemp(hw); 2420 if (retval != IXGBE_ERR_OVERTEMP) 2421 break; 2422 device_printf(iflib_get_dev(adapter->ctx), 2423 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2424 device_printf(iflib_get_dev(adapter->ctx), 2425 "System shutdown required!\n"); 2426 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2427 break; 2428 } 2429 } 2430 2431 /* Check for VF message */ 2432 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 2433 (eicr & IXGBE_EICR_MAILBOX)) 2434 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX; 2435 } 2436 2437 if (ixgbe_is_sfp(hw)) { 2438 /* Pluggable optics-related interrupt */ 2439 if (hw->mac.type >= ixgbe_mac_X540) 2440 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2441 else 2442 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2443 2444 if (eicr & eicr_mask) { 2445 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2446 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 2447 } 2448 2449 if ((hw->mac.type == ixgbe_mac_82599EB) && 2450 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2451 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2452 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2453 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 2454 } 2455 } 2456 2457 /* Check for fan failure */ 2458 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2459 ixgbe_check_fan_failure(adapter, eicr, TRUE); 2460 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2461 } 2462 2463 /* External PHY interrupt */ 2464 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2465 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2466 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2467 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; 2468 } 2469 2470 return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2471 } /* ixgbe_msix_link */ 2472 2473 /************************************************************************ 2474 * ixgbe_sysctl_interrupt_rate_handler 2475 ************************************************************************/ 2476 static int 2477 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2478 { 2479 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2480 int error; 2481 unsigned int reg, usec, rate; 2482 2483 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 2484 usec = ((reg & 0x0FF8) >> 3); 2485 if (usec > 0) 2486 rate = 500000 / usec; 2487 else 2488 rate = 0; 2489 error = sysctl_handle_int(oidp, &rate, 0, req); 2490 if (error || !req->newptr) 2491 return error; 2492 reg &= ~0xfff; /* default, no limitation */ 2493 ixgbe_max_interrupt_rate = 0; 2494 if (rate > 0 && rate < 500000) { 2495 if (rate < 1000) 2496 rate = 1000; 2497 ixgbe_max_interrupt_rate = rate; 2498 reg |= ((4000000/rate) & 0xff8); 2499 } 2500 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 2501 2502 return (0); 2503 } /* ixgbe_sysctl_interrupt_rate_handler */ 2504 2505 /************************************************************************ 2506 * ixgbe_add_device_sysctls 2507 ************************************************************************/ 2508 static void 2509 ixgbe_add_device_sysctls(if_ctx_t ctx) 2510 { 2511 struct adapter *adapter = iflib_get_softc(ctx); 2512 device_t dev = iflib_get_dev(ctx); 2513 struct ixgbe_hw *hw = &adapter->hw; 2514 struct sysctl_oid_list *child; 2515 struct sysctl_ctx_list *ctx_list; 2516 2517 ctx_list = device_get_sysctl_ctx(dev); 2518 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2519 2520 /* Sysctls for all devices */ 2521 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2522 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I", 2523 IXGBE_SYSCTL_DESC_SET_FC); 2524 2525 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2526 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I", 2527 IXGBE_SYSCTL_DESC_ADV_SPEED); 2528 2529 #ifdef IXGBE_DEBUG 2530 /* testing sysctls (for all devices) */ 2531 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2532 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state, 2533 "I", "PCI Power State"); 2534 2535 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2536 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, 2537 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2538 #endif 2539 /* for X550 series devices */ 2540 if (hw->mac.type >= ixgbe_mac_X550) 2541 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2542 CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac, 2543 "I", "DMA Coalesce"); 2544 2545 /* for WoL-capable devices */ 2546 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2547 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2548 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2549 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2550 2551 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2552 CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc, 2553 "I", "Enable/Disable Wake Up Filters"); 2554 } 2555 2556 /* for X552/X557-AT devices */ 2557 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2558 struct sysctl_oid *phy_node; 2559 struct sysctl_oid_list *phy_list; 2560 2561 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2562 CTLFLAG_RD, NULL, "External PHY sysctls"); 2563 phy_list = SYSCTL_CHILDREN(phy_node); 2564 2565 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2566 CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp, 2567 "I", "Current External PHY Temperature (Celsius)"); 2568 2569 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2570 "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, 2571 ixgbe_sysctl_phy_overtemp_occurred, "I", 2572 "External PHY High Temperature Event Occurred"); 2573 } 2574 2575 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 2576 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2577 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2578 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2579 } 2580 } /* ixgbe_add_device_sysctls */ 2581 2582 /************************************************************************ 2583 * ixgbe_allocate_pci_resources 2584 ************************************************************************/ 2585 static int 2586 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2587 { 2588 struct adapter *adapter = iflib_get_softc(ctx); 2589 device_t dev = iflib_get_dev(ctx); 2590 int rid; 2591 2592 rid = PCIR_BAR(0); 2593 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2594 RF_ACTIVE); 2595 2596 if (!(adapter->pci_mem)) { 2597 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2598 return (ENXIO); 2599 } 2600 2601 /* Save bus_space values for READ/WRITE_REG macros */ 2602 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); 2603 adapter->osdep.mem_bus_space_handle = 2604 rman_get_bushandle(adapter->pci_mem); 2605 /* Set hw values for shared code */ 2606 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2607 2608 return (0); 2609 } /* ixgbe_allocate_pci_resources */ 2610 2611 /************************************************************************ 2612 * ixgbe_detach - Device removal routine 2613 * 2614 * Called when the driver is being removed. 2615 * Stops the adapter and deallocates all the resources 2616 * that were allocated for driver operation. 2617 * 2618 * return 0 on success, positive on failure 2619 ************************************************************************/ 2620 static int 2621 ixgbe_if_detach(if_ctx_t ctx) 2622 { 2623 struct adapter *adapter = iflib_get_softc(ctx); 2624 device_t dev = iflib_get_dev(ctx); 2625 u32 ctrl_ext; 2626 2627 INIT_DEBUGOUT("ixgbe_detach: begin"); 2628 2629 if (ixgbe_pci_iov_detach(dev) != 0) { 2630 device_printf(dev, "SR-IOV in use; detach first.\n"); 2631 return (EBUSY); 2632 } 2633 2634 ixgbe_setup_low_power_mode(ctx); 2635 2636 /* let hardware know driver is unloading */ 2637 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 2638 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2639 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 2640 2641 ixgbe_free_pci_resources(ctx); 2642 free(adapter->mta, M_IXGBE); 2643 2644 return (0); 2645 } /* ixgbe_if_detach */ 2646 2647 /************************************************************************ 2648 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2649 * 2650 * Prepare the adapter/port for LPLU and/or WoL 2651 ************************************************************************/ 2652 static int 2653 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2654 { 2655 struct adapter *adapter = iflib_get_softc(ctx); 2656 struct ixgbe_hw *hw = &adapter->hw; 2657 device_t dev = iflib_get_dev(ctx); 2658 s32 error = 0; 2659 2660 if (!hw->wol_enabled) 2661 ixgbe_set_phy_power(hw, FALSE); 2662 2663 /* Limit power management flow to X550EM baseT */ 2664 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2665 hw->phy.ops.enter_lplu) { 2666 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2667 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2668 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2669 2670 /* 2671 * Clear Wake Up Status register to prevent any previous wakeup 2672 * events from waking us up immediately after we suspend. 2673 */ 2674 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2675 2676 /* 2677 * Program the Wakeup Filter Control register with user filter 2678 * settings 2679 */ 2680 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 2681 2682 /* Enable wakeups and power management in Wakeup Control */ 2683 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2684 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2685 2686 /* X550EM baseT adapters need a special LPLU flow */ 2687 hw->phy.reset_disable = TRUE; 2688 ixgbe_if_stop(ctx); 2689 error = hw->phy.ops.enter_lplu(hw); 2690 if (error) 2691 device_printf(dev, "Error entering LPLU: %d\n", error); 2692 hw->phy.reset_disable = FALSE; 2693 } else { 2694 /* Just stop for other adapters */ 2695 ixgbe_if_stop(ctx); 2696 } 2697 2698 return error; 2699 } /* ixgbe_setup_low_power_mode */ 2700 2701 /************************************************************************ 2702 * ixgbe_shutdown - Shutdown entry point 2703 ************************************************************************/ 2704 static int 2705 ixgbe_if_shutdown(if_ctx_t ctx) 2706 { 2707 int error = 0; 2708 2709 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2710 2711 error = ixgbe_setup_low_power_mode(ctx); 2712 2713 return (error); 2714 } /* ixgbe_if_shutdown */ 2715 2716 /************************************************************************ 2717 * ixgbe_suspend 2718 * 2719 * From D0 to D3 2720 ************************************************************************/ 2721 static int 2722 ixgbe_if_suspend(if_ctx_t ctx) 2723 { 2724 int error = 0; 2725 2726 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2727 2728 error = ixgbe_setup_low_power_mode(ctx); 2729 2730 return (error); 2731 } /* ixgbe_if_suspend */ 2732 2733 /************************************************************************ 2734 * ixgbe_resume 2735 * 2736 * From D3 to D0 2737 ************************************************************************/ 2738 static int 2739 ixgbe_if_resume(if_ctx_t ctx) 2740 { 2741 struct adapter *adapter = iflib_get_softc(ctx); 2742 device_t dev = iflib_get_dev(ctx); 2743 struct ifnet *ifp = iflib_get_ifp(ctx); 2744 struct ixgbe_hw *hw = &adapter->hw; 2745 u32 wus; 2746 2747 INIT_DEBUGOUT("ixgbe_resume: begin"); 2748 2749 /* Read & clear WUS register */ 2750 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2751 if (wus) 2752 device_printf(dev, "Woken up by (WUS): %#010x\n", 2753 IXGBE_READ_REG(hw, IXGBE_WUS)); 2754 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2755 /* And clear WUFC until next low-power transition */ 2756 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2757 2758 /* 2759 * Required after D3->D0 transition; 2760 * will re-advertise all previous advertised speeds 2761 */ 2762 if (ifp->if_flags & IFF_UP) 2763 ixgbe_if_init(ctx); 2764 2765 return (0); 2766 } /* ixgbe_if_resume */ 2767 2768 /************************************************************************ 2769 * ixgbe_if_mtu_set - Ioctl mtu entry point 2770 * 2771 * Return 0 on success, EINVAL on failure 2772 ************************************************************************/ 2773 static int 2774 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2775 { 2776 struct adapter *adapter = iflib_get_softc(ctx); 2777 int error = 0; 2778 2779 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2780 2781 if (mtu > IXGBE_MAX_MTU) { 2782 error = EINVAL; 2783 } else { 2784 adapter->max_frame_size = mtu + IXGBE_MTU_HDR; 2785 } 2786 2787 return error; 2788 } /* ixgbe_if_mtu_set */ 2789 2790 /************************************************************************ 2791 * ixgbe_if_crcstrip_set 2792 ************************************************************************/ 2793 static void 2794 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2795 { 2796 struct adapter *sc = iflib_get_softc(ctx); 2797 struct ixgbe_hw *hw = &sc->hw; 2798 /* crc stripping is set in two places: 2799 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2800 * IXGBE_RDRXCTL (set by the original driver in 2801 * ixgbe_setup_hw_rsc() called in init_locked. 2802 * We disable the setting when netmap is compiled in). 2803 * We update the values here, but also in ixgbe.c because 2804 * init_locked sometimes is called outside our control. 2805 */ 2806 uint32_t hl, rxc; 2807 2808 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2809 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2810 #ifdef NETMAP 2811 if (netmap_verbose) 2812 D("%s read HLREG 0x%x rxc 0x%x", 2813 onoff ? "enter" : "exit", hl, rxc); 2814 #endif 2815 /* hw requirements ... */ 2816 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2817 rxc |= IXGBE_RDRXCTL_RSCACKC; 2818 if (onoff && !crcstrip) { 2819 /* keep the crc. Fast rx */ 2820 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 2821 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 2822 } else { 2823 /* reset default mode */ 2824 hl |= IXGBE_HLREG0_RXCRCSTRP; 2825 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 2826 } 2827 #ifdef NETMAP 2828 if (netmap_verbose) 2829 D("%s write HLREG 0x%x rxc 0x%x", 2830 onoff ? "enter" : "exit", hl, rxc); 2831 #endif 2832 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 2833 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 2834 } /* ixgbe_if_crcstrip_set */ 2835 2836 /********************************************************************* 2837 * ixgbe_if_init - Init entry point 2838 * 2839 * Used in two ways: It is used by the stack as an init 2840 * entry point in network interface structure. It is also 2841 * used by the driver as a hw/sw initialization routine to 2842 * get to a consistent state. 2843 * 2844 * Return 0 on success, positive on failure 2845 **********************************************************************/ 2846 void 2847 ixgbe_if_init(if_ctx_t ctx) 2848 { 2849 struct adapter *adapter = iflib_get_softc(ctx); 2850 struct ifnet *ifp = iflib_get_ifp(ctx); 2851 device_t dev = iflib_get_dev(ctx); 2852 struct ixgbe_hw *hw = &adapter->hw; 2853 struct ix_rx_queue *rx_que; 2854 struct ix_tx_queue *tx_que; 2855 u32 txdctl, mhadd; 2856 u32 rxdctl, rxctrl; 2857 u32 ctrl_ext; 2858 2859 int i, j, err; 2860 2861 INIT_DEBUGOUT("ixgbe_if_init: begin"); 2862 2863 /* Queue indices may change with IOV mode */ 2864 ixgbe_align_all_queue_indices(adapter); 2865 2866 /* reprogram the RAR[0] in case user changed it. */ 2867 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 2868 2869 /* Get the latest mac address, User can use a LAA */ 2870 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 2871 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 2872 hw->addr_ctrl.rar_used_count = 1; 2873 2874 ixgbe_init_hw(hw); 2875 2876 ixgbe_initialize_iov(adapter); 2877 2878 ixgbe_initialize_transmit_units(ctx); 2879 2880 /* Setup Multicast table */ 2881 ixgbe_if_multi_set(ctx); 2882 2883 /* Determine the correct mbuf pool, based on frame size */ 2884 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 2885 2886 /* Configure RX settings */ 2887 ixgbe_initialize_receive_units(ctx); 2888 2889 /* 2890 * Initialize variable holding task enqueue requests 2891 * from MSI-X interrupts 2892 */ 2893 adapter->task_requests = 0; 2894 2895 /* Enable SDP & MSI-X interrupts based on adapter */ 2896 ixgbe_config_gpie(adapter); 2897 2898 /* Set MTU size */ 2899 if (ifp->if_mtu > ETHERMTU) { 2900 /* aka IXGBE_MAXFRS on 82599 and newer */ 2901 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2902 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2903 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 2904 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 2905 } 2906 2907 /* Now enable all the queues */ 2908 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 2909 struct tx_ring *txr = &tx_que->txr; 2910 2911 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 2912 txdctl |= IXGBE_TXDCTL_ENABLE; 2913 /* Set WTHRESH to 8, burst writeback */ 2914 txdctl |= (8 << 16); 2915 /* 2916 * When the internal queue falls below PTHRESH (32), 2917 * start prefetching as long as there are at least 2918 * HTHRESH (1) buffers ready. The values are taken 2919 * from the Intel linux driver 3.8.21. 2920 * Prefetching enables tx line rate even with 1 queue. 2921 */ 2922 txdctl |= (32 << 0) | (1 << 8); 2923 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 2924 } 2925 2926 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 2927 struct rx_ring *rxr = &rx_que->rxr; 2928 2929 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2930 if (hw->mac.type == ixgbe_mac_82598EB) { 2931 /* 2932 * PTHRESH = 21 2933 * HTHRESH = 4 2934 * WTHRESH = 8 2935 */ 2936 rxdctl &= ~0x3FFFFF; 2937 rxdctl |= 0x080420; 2938 } 2939 rxdctl |= IXGBE_RXDCTL_ENABLE; 2940 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 2941 for (j = 0; j < 10; j++) { 2942 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 2943 IXGBE_RXDCTL_ENABLE) 2944 break; 2945 else 2946 msec_delay(1); 2947 } 2948 wmb(); 2949 } 2950 2951 /* Enable Receive engine */ 2952 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2953 if (hw->mac.type == ixgbe_mac_82598EB) 2954 rxctrl |= IXGBE_RXCTRL_DMBYPS; 2955 rxctrl |= IXGBE_RXCTRL_RXEN; 2956 ixgbe_enable_rx_dma(hw, rxctrl); 2957 2958 /* Set up MSI/MSI-X routing */ 2959 if (ixgbe_enable_msix) { 2960 ixgbe_configure_ivars(adapter); 2961 /* Set up auto-mask */ 2962 if (hw->mac.type == ixgbe_mac_82598EB) 2963 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2964 else { 2965 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 2966 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 2967 } 2968 } else { /* Simple settings for Legacy/MSI */ 2969 ixgbe_set_ivar(adapter, 0, 0, 0); 2970 ixgbe_set_ivar(adapter, 0, 0, 1); 2971 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2972 } 2973 2974 ixgbe_init_fdir(adapter); 2975 2976 /* 2977 * Check on any SFP devices that 2978 * need to be kick-started 2979 */ 2980 if (hw->phy.type == ixgbe_phy_none) { 2981 err = hw->phy.ops.identify(hw); 2982 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 2983 device_printf(dev, 2984 "Unsupported SFP+ module type was detected.\n"); 2985 return; 2986 } 2987 } 2988 2989 /* Set moderation on the Link interrupt */ 2990 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 2991 2992 /* Enable power to the phy. */ 2993 ixgbe_set_phy_power(hw, TRUE); 2994 2995 /* Config/Enable Link */ 2996 ixgbe_config_link(ctx); 2997 2998 /* Hardware Packet Buffer & Flow Control setup */ 2999 ixgbe_config_delay_values(adapter); 3000 3001 /* Initialize the FC settings */ 3002 ixgbe_start_hw(hw); 3003 3004 /* Set up VLAN support and filter */ 3005 ixgbe_setup_vlan_hw_support(ctx); 3006 3007 /* Setup DMA Coalescing */ 3008 ixgbe_config_dmac(adapter); 3009 3010 /* And now turn on interrupts */ 3011 ixgbe_if_enable_intr(ctx); 3012 3013 /* Enable the use of the MBX by the VF's */ 3014 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 3015 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3016 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3017 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3018 } 3019 3020 } /* ixgbe_init_locked */ 3021 3022 /************************************************************************ 3023 * ixgbe_set_ivar 3024 * 3025 * Setup the correct IVAR register for a particular MSI-X interrupt 3026 * (yes this is all very magic and confusing :) 3027 * - entry is the register array entry 3028 * - vector is the MSI-X vector for this queue 3029 * - type is RX/TX/MISC 3030 ************************************************************************/ 3031 static void 3032 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3033 { 3034 struct ixgbe_hw *hw = &adapter->hw; 3035 u32 ivar, index; 3036 3037 vector |= IXGBE_IVAR_ALLOC_VAL; 3038 3039 switch (hw->mac.type) { 3040 case ixgbe_mac_82598EB: 3041 if (type == -1) 3042 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3043 else 3044 entry += (type * 64); 3045 index = (entry >> 2) & 0x1F; 3046 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3047 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3048 ivar |= (vector << (8 * (entry & 0x3))); 3049 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3050 break; 3051 case ixgbe_mac_82599EB: 3052 case ixgbe_mac_X540: 3053 case ixgbe_mac_X550: 3054 case ixgbe_mac_X550EM_x: 3055 case ixgbe_mac_X550EM_a: 3056 if (type == -1) { /* MISC IVAR */ 3057 index = (entry & 1) * 8; 3058 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3059 ivar &= ~(0xFF << index); 3060 ivar |= (vector << index); 3061 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3062 } else { /* RX/TX IVARS */ 3063 index = (16 * (entry & 1)) + (8 * type); 3064 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3065 ivar &= ~(0xFF << index); 3066 ivar |= (vector << index); 3067 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3068 } 3069 default: 3070 break; 3071 } 3072 } /* ixgbe_set_ivar */ 3073 3074 /************************************************************************ 3075 * ixgbe_configure_ivars 3076 ************************************************************************/ 3077 static void 3078 ixgbe_configure_ivars(struct adapter *adapter) 3079 { 3080 struct ix_rx_queue *rx_que = adapter->rx_queues; 3081 struct ix_tx_queue *tx_que = adapter->tx_queues; 3082 u32 newitr; 3083 3084 if (ixgbe_max_interrupt_rate > 0) 3085 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3086 else { 3087 /* 3088 * Disable DMA coalescing if interrupt moderation is 3089 * disabled. 3090 */ 3091 adapter->dmac = 0; 3092 newitr = 0; 3093 } 3094 3095 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { 3096 struct rx_ring *rxr = &rx_que->rxr; 3097 3098 /* First the RX queue entry */ 3099 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0); 3100 3101 /* Set an Initial EITR value */ 3102 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr); 3103 } 3104 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 3105 struct tx_ring *txr = &tx_que->txr; 3106 3107 /* ... and the TX */ 3108 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1); 3109 } 3110 /* For the Link interrupt */ 3111 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3112 } /* ixgbe_configure_ivars */ 3113 3114 /************************************************************************ 3115 * ixgbe_config_gpie 3116 ************************************************************************/ 3117 static void 3118 ixgbe_config_gpie(struct adapter *adapter) 3119 { 3120 struct ixgbe_hw *hw = &adapter->hw; 3121 u32 gpie; 3122 3123 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3124 3125 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3126 /* Enable Enhanced MSI-X mode */ 3127 gpie |= IXGBE_GPIE_MSIX_MODE 3128 | IXGBE_GPIE_EIAME 3129 | IXGBE_GPIE_PBA_SUPPORT 3130 | IXGBE_GPIE_OCD; 3131 } 3132 3133 /* Fan Failure Interrupt */ 3134 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3135 gpie |= IXGBE_SDP1_GPIEN; 3136 3137 /* Thermal Sensor Interrupt */ 3138 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3139 gpie |= IXGBE_SDP0_GPIEN_X540; 3140 3141 /* Link detection */ 3142 switch (hw->mac.type) { 3143 case ixgbe_mac_82599EB: 3144 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3145 break; 3146 case ixgbe_mac_X550EM_x: 3147 case ixgbe_mac_X550EM_a: 3148 gpie |= IXGBE_SDP0_GPIEN_X540; 3149 break; 3150 default: 3151 break; 3152 } 3153 3154 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3155 3156 } /* ixgbe_config_gpie */ 3157 3158 /************************************************************************ 3159 * ixgbe_config_delay_values 3160 * 3161 * Requires adapter->max_frame_size to be set. 3162 ************************************************************************/ 3163 static void 3164 ixgbe_config_delay_values(struct adapter *adapter) 3165 { 3166 struct ixgbe_hw *hw = &adapter->hw; 3167 u32 rxpb, frame, size, tmp; 3168 3169 frame = adapter->max_frame_size; 3170 3171 /* Calculate High Water */ 3172 switch (hw->mac.type) { 3173 case ixgbe_mac_X540: 3174 case ixgbe_mac_X550: 3175 case ixgbe_mac_X550EM_x: 3176 case ixgbe_mac_X550EM_a: 3177 tmp = IXGBE_DV_X540(frame, frame); 3178 break; 3179 default: 3180 tmp = IXGBE_DV(frame, frame); 3181 break; 3182 } 3183 size = IXGBE_BT2KB(tmp); 3184 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3185 hw->fc.high_water[0] = rxpb - size; 3186 3187 /* Now calculate Low Water */ 3188 switch (hw->mac.type) { 3189 case ixgbe_mac_X540: 3190 case ixgbe_mac_X550: 3191 case ixgbe_mac_X550EM_x: 3192 case ixgbe_mac_X550EM_a: 3193 tmp = IXGBE_LOW_DV_X540(frame); 3194 break; 3195 default: 3196 tmp = IXGBE_LOW_DV(frame); 3197 break; 3198 } 3199 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3200 3201 hw->fc.pause_time = IXGBE_FC_PAUSE; 3202 hw->fc.send_xon = TRUE; 3203 } /* ixgbe_config_delay_values */ 3204 3205 /************************************************************************ 3206 * ixgbe_set_multi - Multicast Update 3207 * 3208 * Called whenever multicast address list is updated. 3209 ************************************************************************/ 3210 static u_int 3211 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count) 3212 { 3213 struct adapter *adapter = arg; 3214 struct ixgbe_mc_addr *mta = adapter->mta; 3215 3216 if (count == MAX_NUM_MULTICAST_ADDRESSES) 3217 return (0); 3218 bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3219 mta[count].vmdq = adapter->pool; 3220 3221 return (1); 3222 } /* ixgbe_mc_filter_apply */ 3223 3224 static void 3225 ixgbe_if_multi_set(if_ctx_t ctx) 3226 { 3227 struct adapter *adapter = iflib_get_softc(ctx); 3228 struct ixgbe_mc_addr *mta; 3229 struct ifnet *ifp = iflib_get_ifp(ctx); 3230 u8 *update_ptr; 3231 u32 fctrl; 3232 u_int mcnt; 3233 3234 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3235 3236 mta = adapter->mta; 3237 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3238 3239 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, 3240 adapter); 3241 3242 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3243 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3244 if (ifp->if_flags & IFF_PROMISC) 3245 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3246 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3247 ifp->if_flags & IFF_ALLMULTI) { 3248 fctrl |= IXGBE_FCTRL_MPE; 3249 fctrl &= ~IXGBE_FCTRL_UPE; 3250 } else 3251 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3252 3253 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 3254 3255 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3256 update_ptr = (u8 *)mta; 3257 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 3258 ixgbe_mc_array_itr, TRUE); 3259 } 3260 3261 } /* ixgbe_if_multi_set */ 3262 3263 /************************************************************************ 3264 * ixgbe_mc_array_itr 3265 * 3266 * An iterator function needed by the multicast shared code. 3267 * It feeds the shared code routine the addresses in the 3268 * array of ixgbe_set_multi() one by one. 3269 ************************************************************************/ 3270 static u8 * 3271 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3272 { 3273 struct ixgbe_mc_addr *mta; 3274 3275 mta = (struct ixgbe_mc_addr *)*update_ptr; 3276 *vmdq = mta->vmdq; 3277 3278 *update_ptr = (u8*)(mta + 1); 3279 3280 return (mta->addr); 3281 } /* ixgbe_mc_array_itr */ 3282 3283 /************************************************************************ 3284 * ixgbe_local_timer - Timer routine 3285 * 3286 * Checks for link status, updates statistics, 3287 * and runs the watchdog check. 3288 ************************************************************************/ 3289 static void 3290 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3291 { 3292 struct adapter *adapter = iflib_get_softc(ctx); 3293 3294 if (qid != 0) 3295 return; 3296 3297 /* Check for pluggable optics */ 3298 if (adapter->sfp_probe) 3299 if (!ixgbe_sfp_probe(ctx)) 3300 return; /* Nothing to do */ 3301 3302 ixgbe_check_link(&adapter->hw, &adapter->link_speed, 3303 &adapter->link_up, 0); 3304 3305 /* Fire off the adminq task */ 3306 iflib_admin_intr_deferred(ctx); 3307 3308 } /* ixgbe_if_timer */ 3309 3310 /************************************************************************ 3311 * ixgbe_sfp_probe 3312 * 3313 * Determine if a port had optics inserted. 3314 ************************************************************************/ 3315 static bool 3316 ixgbe_sfp_probe(if_ctx_t ctx) 3317 { 3318 struct adapter *adapter = iflib_get_softc(ctx); 3319 struct ixgbe_hw *hw = &adapter->hw; 3320 device_t dev = iflib_get_dev(ctx); 3321 bool result = FALSE; 3322 3323 if ((hw->phy.type == ixgbe_phy_nl) && 3324 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3325 s32 ret = hw->phy.ops.identify_sfp(hw); 3326 if (ret) 3327 goto out; 3328 ret = hw->phy.ops.reset(hw); 3329 adapter->sfp_probe = FALSE; 3330 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3331 device_printf(dev, "Unsupported SFP+ module detected!"); 3332 device_printf(dev, 3333 "Reload driver with supported module.\n"); 3334 goto out; 3335 } else 3336 device_printf(dev, "SFP+ module detected!\n"); 3337 /* We now have supported optics */ 3338 result = TRUE; 3339 } 3340 out: 3341 3342 return (result); 3343 } /* ixgbe_sfp_probe */ 3344 3345 /************************************************************************ 3346 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3347 ************************************************************************/ 3348 static void 3349 ixgbe_handle_mod(void *context) 3350 { 3351 if_ctx_t ctx = context; 3352 struct adapter *adapter = iflib_get_softc(ctx); 3353 struct ixgbe_hw *hw = &adapter->hw; 3354 device_t dev = iflib_get_dev(ctx); 3355 u32 err, cage_full = 0; 3356 3357 if (adapter->hw.need_crosstalk_fix) { 3358 switch (hw->mac.type) { 3359 case ixgbe_mac_82599EB: 3360 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3361 IXGBE_ESDP_SDP2; 3362 break; 3363 case ixgbe_mac_X550EM_x: 3364 case ixgbe_mac_X550EM_a: 3365 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3366 IXGBE_ESDP_SDP0; 3367 break; 3368 default: 3369 break; 3370 } 3371 3372 if (!cage_full) 3373 goto handle_mod_out; 3374 } 3375 3376 err = hw->phy.ops.identify_sfp(hw); 3377 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3378 device_printf(dev, 3379 "Unsupported SFP+ module type was detected.\n"); 3380 goto handle_mod_out; 3381 } 3382 3383 if (hw->mac.type == ixgbe_mac_82598EB) 3384 err = hw->phy.ops.reset(hw); 3385 else 3386 err = hw->mac.ops.setup_sfp(hw); 3387 3388 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3389 device_printf(dev, 3390 "Setup failure - unsupported SFP+ module type.\n"); 3391 goto handle_mod_out; 3392 } 3393 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 3394 return; 3395 3396 handle_mod_out: 3397 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3398 } /* ixgbe_handle_mod */ 3399 3400 3401 /************************************************************************ 3402 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3403 ************************************************************************/ 3404 static void 3405 ixgbe_handle_msf(void *context) 3406 { 3407 if_ctx_t ctx = context; 3408 struct adapter *adapter = iflib_get_softc(ctx); 3409 struct ixgbe_hw *hw = &adapter->hw; 3410 u32 autoneg; 3411 bool negotiate; 3412 3413 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3414 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 3415 3416 autoneg = hw->phy.autoneg_advertised; 3417 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3418 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3419 if (hw->mac.ops.setup_link) 3420 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3421 3422 /* Adjust media types shown in ifconfig */ 3423 ifmedia_removeall(adapter->media); 3424 ixgbe_add_media_types(adapter->ctx); 3425 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 3426 } /* ixgbe_handle_msf */ 3427 3428 /************************************************************************ 3429 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3430 ************************************************************************/ 3431 static void 3432 ixgbe_handle_phy(void *context) 3433 { 3434 if_ctx_t ctx = context; 3435 struct adapter *adapter = iflib_get_softc(ctx); 3436 struct ixgbe_hw *hw = &adapter->hw; 3437 int error; 3438 3439 error = hw->phy.ops.handle_lasi(hw); 3440 if (error == IXGBE_ERR_OVERTEMP) 3441 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3442 else if (error) 3443 device_printf(adapter->dev, 3444 "Error handling LASI interrupt: %d\n", error); 3445 } /* ixgbe_handle_phy */ 3446 3447 /************************************************************************ 3448 * ixgbe_if_stop - Stop the hardware 3449 * 3450 * Disables all traffic on the adapter by issuing a 3451 * global reset on the MAC and deallocates TX/RX buffers. 3452 ************************************************************************/ 3453 static void 3454 ixgbe_if_stop(if_ctx_t ctx) 3455 { 3456 struct adapter *adapter = iflib_get_softc(ctx); 3457 struct ixgbe_hw *hw = &adapter->hw; 3458 3459 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3460 3461 ixgbe_reset_hw(hw); 3462 hw->adapter_stopped = FALSE; 3463 ixgbe_stop_adapter(hw); 3464 if (hw->mac.type == ixgbe_mac_82599EB) 3465 ixgbe_stop_mac_link_on_d3_82599(hw); 3466 /* Turn off the laser - noop with no optics */ 3467 ixgbe_disable_tx_laser(hw); 3468 3469 /* Update the stack */ 3470 adapter->link_up = FALSE; 3471 ixgbe_if_update_admin_status(ctx); 3472 3473 /* reprogram the RAR[0] in case user changed it. */ 3474 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3475 3476 return; 3477 } /* ixgbe_if_stop */ 3478 3479 /************************************************************************ 3480 * ixgbe_update_link_status - Update OS on link state 3481 * 3482 * Note: Only updates the OS on the cached link state. 3483 * The real check of the hardware only happens with 3484 * a link interrupt. 3485 ************************************************************************/ 3486 static void 3487 ixgbe_if_update_admin_status(if_ctx_t ctx) 3488 { 3489 struct adapter *adapter = iflib_get_softc(ctx); 3490 device_t dev = iflib_get_dev(ctx); 3491 3492 if (adapter->link_up) { 3493 if (adapter->link_active == FALSE) { 3494 if (bootverbose) 3495 device_printf(dev, "Link is up %d Gbps %s \n", 3496 ((adapter->link_speed == 128) ? 10 : 1), 3497 "Full Duplex"); 3498 adapter->link_active = TRUE; 3499 /* Update any Flow Control changes */ 3500 ixgbe_fc_enable(&adapter->hw); 3501 /* Update DMA coalescing config */ 3502 ixgbe_config_dmac(adapter); 3503 /* should actually be negotiated value */ 3504 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3505 3506 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3507 ixgbe_ping_all_vfs(adapter); 3508 } 3509 } else { /* Link down */ 3510 if (adapter->link_active == TRUE) { 3511 if (bootverbose) 3512 device_printf(dev, "Link is Down\n"); 3513 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3514 adapter->link_active = FALSE; 3515 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3516 ixgbe_ping_all_vfs(adapter); 3517 } 3518 } 3519 3520 /* Handle task requests from msix_link() */ 3521 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD) 3522 ixgbe_handle_mod(ctx); 3523 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF) 3524 ixgbe_handle_msf(ctx); 3525 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX) 3526 ixgbe_handle_mbx(ctx); 3527 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR) 3528 ixgbe_reinit_fdir(ctx); 3529 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY) 3530 ixgbe_handle_phy(ctx); 3531 adapter->task_requests = 0; 3532 3533 ixgbe_update_stats_counters(adapter); 3534 } /* ixgbe_if_update_admin_status */ 3535 3536 /************************************************************************ 3537 * ixgbe_config_dmac - Configure DMA Coalescing 3538 ************************************************************************/ 3539 static void 3540 ixgbe_config_dmac(struct adapter *adapter) 3541 { 3542 struct ixgbe_hw *hw = &adapter->hw; 3543 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3544 3545 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3546 return; 3547 3548 if (dcfg->watchdog_timer ^ adapter->dmac || 3549 dcfg->link_speed ^ adapter->link_speed) { 3550 dcfg->watchdog_timer = adapter->dmac; 3551 dcfg->fcoe_en = FALSE; 3552 dcfg->link_speed = adapter->link_speed; 3553 dcfg->num_tcs = 1; 3554 3555 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3556 dcfg->watchdog_timer, dcfg->link_speed); 3557 3558 hw->mac.ops.dmac_config(hw); 3559 } 3560 } /* ixgbe_config_dmac */ 3561 3562 /************************************************************************ 3563 * ixgbe_if_enable_intr 3564 ************************************************************************/ 3565 void 3566 ixgbe_if_enable_intr(if_ctx_t ctx) 3567 { 3568 struct adapter *adapter = iflib_get_softc(ctx); 3569 struct ixgbe_hw *hw = &adapter->hw; 3570 struct ix_rx_queue *que = adapter->rx_queues; 3571 u32 mask, fwsm; 3572 3573 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3574 3575 switch (adapter->hw.mac.type) { 3576 case ixgbe_mac_82599EB: 3577 mask |= IXGBE_EIMS_ECC; 3578 /* Temperature sensor on some adapters */ 3579 mask |= IXGBE_EIMS_GPI_SDP0; 3580 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3581 mask |= IXGBE_EIMS_GPI_SDP1; 3582 mask |= IXGBE_EIMS_GPI_SDP2; 3583 break; 3584 case ixgbe_mac_X540: 3585 /* Detect if Thermal Sensor is enabled */ 3586 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3587 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3588 mask |= IXGBE_EIMS_TS; 3589 mask |= IXGBE_EIMS_ECC; 3590 break; 3591 case ixgbe_mac_X550: 3592 /* MAC thermal sensor is automatically enabled */ 3593 mask |= IXGBE_EIMS_TS; 3594 mask |= IXGBE_EIMS_ECC; 3595 break; 3596 case ixgbe_mac_X550EM_x: 3597 case ixgbe_mac_X550EM_a: 3598 /* Some devices use SDP0 for important information */ 3599 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3600 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3601 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3602 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3603 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3604 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3605 mask |= IXGBE_EICR_GPI_SDP0_X540; 3606 mask |= IXGBE_EIMS_ECC; 3607 break; 3608 default: 3609 break; 3610 } 3611 3612 /* Enable Fan Failure detection */ 3613 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3614 mask |= IXGBE_EIMS_GPI_SDP1; 3615 /* Enable SR-IOV */ 3616 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3617 mask |= IXGBE_EIMS_MAILBOX; 3618 /* Enable Flow Director */ 3619 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 3620 mask |= IXGBE_EIMS_FLOW_DIR; 3621 3622 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3623 3624 /* With MSI-X we use auto clear */ 3625 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3626 mask = IXGBE_EIMS_ENABLE_MASK; 3627 /* Don't autoclear Link */ 3628 mask &= ~IXGBE_EIMS_OTHER; 3629 mask &= ~IXGBE_EIMS_LSC; 3630 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 3631 mask &= ~IXGBE_EIMS_MAILBOX; 3632 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3633 } 3634 3635 /* 3636 * Now enable all queues, this is done separately to 3637 * allow for handling the extended (beyond 32) MSI-X 3638 * vectors that can be used by 82599 3639 */ 3640 for (int i = 0; i < adapter->num_rx_queues; i++, que++) 3641 ixgbe_enable_queue(adapter, que->msix); 3642 3643 IXGBE_WRITE_FLUSH(hw); 3644 3645 } /* ixgbe_if_enable_intr */ 3646 3647 /************************************************************************ 3648 * ixgbe_disable_intr 3649 ************************************************************************/ 3650 static void 3651 ixgbe_if_disable_intr(if_ctx_t ctx) 3652 { 3653 struct adapter *adapter = iflib_get_softc(ctx); 3654 3655 if (adapter->intr_type == IFLIB_INTR_MSIX) 3656 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3657 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3658 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3659 } else { 3660 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3662 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3663 } 3664 IXGBE_WRITE_FLUSH(&adapter->hw); 3665 3666 } /* ixgbe_if_disable_intr */ 3667 3668 /************************************************************************ 3669 * ixgbe_link_intr_enable 3670 ************************************************************************/ 3671 static void 3672 ixgbe_link_intr_enable(if_ctx_t ctx) 3673 { 3674 struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw; 3675 3676 /* Re-enable other interrupts */ 3677 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3678 } /* ixgbe_link_intr_enable */ 3679 3680 /************************************************************************ 3681 * ixgbe_if_rx_queue_intr_enable 3682 ************************************************************************/ 3683 static int 3684 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3685 { 3686 struct adapter *adapter = iflib_get_softc(ctx); 3687 struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; 3688 3689 ixgbe_enable_queue(adapter, que->msix); 3690 3691 return (0); 3692 } /* ixgbe_if_rx_queue_intr_enable */ 3693 3694 /************************************************************************ 3695 * ixgbe_enable_queue 3696 ************************************************************************/ 3697 static void 3698 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 3699 { 3700 struct ixgbe_hw *hw = &adapter->hw; 3701 u64 queue = 1ULL << vector; 3702 u32 mask; 3703 3704 if (hw->mac.type == ixgbe_mac_82598EB) { 3705 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3706 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3707 } else { 3708 mask = (queue & 0xFFFFFFFF); 3709 if (mask) 3710 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3711 mask = (queue >> 32); 3712 if (mask) 3713 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3714 } 3715 } /* ixgbe_enable_queue */ 3716 3717 /************************************************************************ 3718 * ixgbe_disable_queue 3719 ************************************************************************/ 3720 static void 3721 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 3722 { 3723 struct ixgbe_hw *hw = &adapter->hw; 3724 u64 queue = 1ULL << vector; 3725 u32 mask; 3726 3727 if (hw->mac.type == ixgbe_mac_82598EB) { 3728 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3729 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3730 } else { 3731 mask = (queue & 0xFFFFFFFF); 3732 if (mask) 3733 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3734 mask = (queue >> 32); 3735 if (mask) 3736 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3737 } 3738 } /* ixgbe_disable_queue */ 3739 3740 /************************************************************************ 3741 * ixgbe_intr - Legacy Interrupt Service Routine 3742 ************************************************************************/ 3743 int 3744 ixgbe_intr(void *arg) 3745 { 3746 struct adapter *adapter = arg; 3747 struct ix_rx_queue *que = adapter->rx_queues; 3748 struct ixgbe_hw *hw = &adapter->hw; 3749 if_ctx_t ctx = adapter->ctx; 3750 u32 eicr, eicr_mask; 3751 3752 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3753 3754 ++que->irqs; 3755 if (eicr == 0) { 3756 ixgbe_if_enable_intr(ctx); 3757 return (FILTER_HANDLED); 3758 } 3759 3760 /* Check for fan failure */ 3761 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3762 (eicr & IXGBE_EICR_GPI_SDP1)) { 3763 device_printf(adapter->dev, 3764 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3765 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3766 } 3767 3768 /* Link status change */ 3769 if (eicr & IXGBE_EICR_LSC) { 3770 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3771 iflib_admin_intr_deferred(ctx); 3772 } 3773 3774 if (ixgbe_is_sfp(hw)) { 3775 /* Pluggable optics-related interrupt */ 3776 if (hw->mac.type >= ixgbe_mac_X540) 3777 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3778 else 3779 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3780 3781 if (eicr & eicr_mask) { 3782 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3783 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 3784 } 3785 3786 if ((hw->mac.type == ixgbe_mac_82599EB) && 3787 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3788 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3789 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3790 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 3791 } 3792 } 3793 3794 /* External PHY interrupt */ 3795 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3796 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3797 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; 3798 3799 return (FILTER_SCHEDULE_THREAD); 3800 } /* ixgbe_intr */ 3801 3802 /************************************************************************ 3803 * ixgbe_free_pci_resources 3804 ************************************************************************/ 3805 static void 3806 ixgbe_free_pci_resources(if_ctx_t ctx) 3807 { 3808 struct adapter *adapter = iflib_get_softc(ctx); 3809 struct ix_rx_queue *que = adapter->rx_queues; 3810 device_t dev = iflib_get_dev(ctx); 3811 3812 /* Release all MSI-X queue resources */ 3813 if (adapter->intr_type == IFLIB_INTR_MSIX) 3814 iflib_irq_free(ctx, &adapter->irq); 3815 3816 if (que != NULL) { 3817 for (int i = 0; i < adapter->num_rx_queues; i++, que++) { 3818 iflib_irq_free(ctx, &que->que_irq); 3819 } 3820 } 3821 3822 if (adapter->pci_mem != NULL) 3823 bus_release_resource(dev, SYS_RES_MEMORY, 3824 rman_get_rid(adapter->pci_mem), adapter->pci_mem); 3825 } /* ixgbe_free_pci_resources */ 3826 3827 /************************************************************************ 3828 * ixgbe_sysctl_flowcntl 3829 * 3830 * SYSCTL wrapper around setting Flow Control 3831 ************************************************************************/ 3832 static int 3833 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 3834 { 3835 struct adapter *adapter; 3836 int error, fc; 3837 3838 adapter = (struct adapter *)arg1; 3839 fc = adapter->hw.fc.current_mode; 3840 3841 error = sysctl_handle_int(oidp, &fc, 0, req); 3842 if ((error) || (req->newptr == NULL)) 3843 return (error); 3844 3845 /* Don't bother if it's not changed */ 3846 if (fc == adapter->hw.fc.current_mode) 3847 return (0); 3848 3849 return ixgbe_set_flowcntl(adapter, fc); 3850 } /* ixgbe_sysctl_flowcntl */ 3851 3852 /************************************************************************ 3853 * ixgbe_set_flowcntl - Set flow control 3854 * 3855 * Flow control values: 3856 * 0 - off 3857 * 1 - rx pause 3858 * 2 - tx pause 3859 * 3 - full 3860 ************************************************************************/ 3861 static int 3862 ixgbe_set_flowcntl(struct adapter *adapter, int fc) 3863 { 3864 switch (fc) { 3865 case ixgbe_fc_rx_pause: 3866 case ixgbe_fc_tx_pause: 3867 case ixgbe_fc_full: 3868 adapter->hw.fc.requested_mode = fc; 3869 if (adapter->num_rx_queues > 1) 3870 ixgbe_disable_rx_drop(adapter); 3871 break; 3872 case ixgbe_fc_none: 3873 adapter->hw.fc.requested_mode = ixgbe_fc_none; 3874 if (adapter->num_rx_queues > 1) 3875 ixgbe_enable_rx_drop(adapter); 3876 break; 3877 default: 3878 return (EINVAL); 3879 } 3880 3881 /* Don't autoneg if forcing a value */ 3882 adapter->hw.fc.disable_fc_autoneg = TRUE; 3883 ixgbe_fc_enable(&adapter->hw); 3884 3885 return (0); 3886 } /* ixgbe_set_flowcntl */ 3887 3888 /************************************************************************ 3889 * ixgbe_enable_rx_drop 3890 * 3891 * Enable the hardware to drop packets when the buffer is 3892 * full. This is useful with multiqueue, so that no single 3893 * queue being full stalls the entire RX engine. We only 3894 * enable this when Multiqueue is enabled AND Flow Control 3895 * is disabled. 3896 ************************************************************************/ 3897 static void 3898 ixgbe_enable_rx_drop(struct adapter *adapter) 3899 { 3900 struct ixgbe_hw *hw = &adapter->hw; 3901 struct rx_ring *rxr; 3902 u32 srrctl; 3903 3904 for (int i = 0; i < adapter->num_rx_queues; i++) { 3905 rxr = &adapter->rx_queues[i].rxr; 3906 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3907 srrctl |= IXGBE_SRRCTL_DROP_EN; 3908 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3909 } 3910 3911 /* enable drop for each vf */ 3912 for (int i = 0; i < adapter->num_vfs; i++) { 3913 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3914 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 3915 IXGBE_QDE_ENABLE)); 3916 } 3917 } /* ixgbe_enable_rx_drop */ 3918 3919 /************************************************************************ 3920 * ixgbe_disable_rx_drop 3921 ************************************************************************/ 3922 static void 3923 ixgbe_disable_rx_drop(struct adapter *adapter) 3924 { 3925 struct ixgbe_hw *hw = &adapter->hw; 3926 struct rx_ring *rxr; 3927 u32 srrctl; 3928 3929 for (int i = 0; i < adapter->num_rx_queues; i++) { 3930 rxr = &adapter->rx_queues[i].rxr; 3931 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3932 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3933 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3934 } 3935 3936 /* disable drop for each vf */ 3937 for (int i = 0; i < adapter->num_vfs; i++) { 3938 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3939 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 3940 } 3941 } /* ixgbe_disable_rx_drop */ 3942 3943 /************************************************************************ 3944 * ixgbe_sysctl_advertise 3945 * 3946 * SYSCTL wrapper around setting advertised speed 3947 ************************************************************************/ 3948 static int 3949 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 3950 { 3951 struct adapter *adapter; 3952 int error, advertise; 3953 3954 adapter = (struct adapter *)arg1; 3955 advertise = adapter->advertise; 3956 3957 error = sysctl_handle_int(oidp, &advertise, 0, req); 3958 if ((error) || (req->newptr == NULL)) 3959 return (error); 3960 3961 return ixgbe_set_advertise(adapter, advertise); 3962 } /* ixgbe_sysctl_advertise */ 3963 3964 /************************************************************************ 3965 * ixgbe_set_advertise - Control advertised link speed 3966 * 3967 * Flags: 3968 * 0x1 - advertise 100 Mb 3969 * 0x2 - advertise 1G 3970 * 0x4 - advertise 10G 3971 * 0x8 - advertise 10 Mb (yes, Mb) 3972 ************************************************************************/ 3973 static int 3974 ixgbe_set_advertise(struct adapter *adapter, int advertise) 3975 { 3976 device_t dev = iflib_get_dev(adapter->ctx); 3977 struct ixgbe_hw *hw; 3978 ixgbe_link_speed speed = 0; 3979 ixgbe_link_speed link_caps = 0; 3980 s32 err = IXGBE_NOT_IMPLEMENTED; 3981 bool negotiate = FALSE; 3982 3983 /* Checks to validate new value */ 3984 if (adapter->advertise == advertise) /* no change */ 3985 return (0); 3986 3987 hw = &adapter->hw; 3988 3989 /* No speed changes for backplane media */ 3990 if (hw->phy.media_type == ixgbe_media_type_backplane) 3991 return (ENODEV); 3992 3993 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 3994 (hw->phy.multispeed_fiber))) { 3995 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 3996 return (EINVAL); 3997 } 3998 3999 if (advertise < 0x1 || advertise > 0xF) { 4000 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n"); 4001 return (EINVAL); 4002 } 4003 4004 if (hw->mac.ops.get_link_capabilities) { 4005 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4006 &negotiate); 4007 if (err != IXGBE_SUCCESS) { 4008 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4009 return (ENODEV); 4010 } 4011 } 4012 4013 /* Set new value and report new advertised mode */ 4014 if (advertise & 0x1) { 4015 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4016 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4017 return (EINVAL); 4018 } 4019 speed |= IXGBE_LINK_SPEED_100_FULL; 4020 } 4021 if (advertise & 0x2) { 4022 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4023 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4024 return (EINVAL); 4025 } 4026 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4027 } 4028 if (advertise & 0x4) { 4029 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4030 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4031 return (EINVAL); 4032 } 4033 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4034 } 4035 if (advertise & 0x8) { 4036 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4037 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4038 return (EINVAL); 4039 } 4040 speed |= IXGBE_LINK_SPEED_10_FULL; 4041 } 4042 4043 hw->mac.autotry_restart = TRUE; 4044 hw->mac.ops.setup_link(hw, speed, TRUE); 4045 adapter->advertise = advertise; 4046 4047 return (0); 4048 } /* ixgbe_set_advertise */ 4049 4050 /************************************************************************ 4051 * ixgbe_get_advertise - Get current advertised speed settings 4052 * 4053 * Formatted for sysctl usage. 4054 * Flags: 4055 * 0x1 - advertise 100 Mb 4056 * 0x2 - advertise 1G 4057 * 0x4 - advertise 10G 4058 * 0x8 - advertise 10 Mb (yes, Mb) 4059 ************************************************************************/ 4060 static int 4061 ixgbe_get_advertise(struct adapter *adapter) 4062 { 4063 struct ixgbe_hw *hw = &adapter->hw; 4064 int speed; 4065 ixgbe_link_speed link_caps = 0; 4066 s32 err; 4067 bool negotiate = FALSE; 4068 4069 /* 4070 * Advertised speed means nothing unless it's copper or 4071 * multi-speed fiber 4072 */ 4073 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4074 !(hw->phy.multispeed_fiber)) 4075 return (0); 4076 4077 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4078 if (err != IXGBE_SUCCESS) 4079 return (0); 4080 4081 speed = 4082 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 4083 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 4084 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 4085 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 4086 4087 return speed; 4088 } /* ixgbe_get_advertise */ 4089 4090 /************************************************************************ 4091 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4092 * 4093 * Control values: 4094 * 0/1 - off / on (use default value of 1000) 4095 * 4096 * Legal timer values are: 4097 * 50,100,250,500,1000,2000,5000,10000 4098 * 4099 * Turning off interrupt moderation will also turn this off. 4100 ************************************************************************/ 4101 static int 4102 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4103 { 4104 struct adapter *adapter = (struct adapter *)arg1; 4105 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4106 int error; 4107 u16 newval; 4108 4109 newval = adapter->dmac; 4110 error = sysctl_handle_16(oidp, &newval, 0, req); 4111 if ((error) || (req->newptr == NULL)) 4112 return (error); 4113 4114 switch (newval) { 4115 case 0: 4116 /* Disabled */ 4117 adapter->dmac = 0; 4118 break; 4119 case 1: 4120 /* Enable and use default */ 4121 adapter->dmac = 1000; 4122 break; 4123 case 50: 4124 case 100: 4125 case 250: 4126 case 500: 4127 case 1000: 4128 case 2000: 4129 case 5000: 4130 case 10000: 4131 /* Legal values - allow */ 4132 adapter->dmac = newval; 4133 break; 4134 default: 4135 /* Do nothing, illegal value */ 4136 return (EINVAL); 4137 } 4138 4139 /* Re-initialize hardware if it's already running */ 4140 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4141 ifp->if_init(ifp); 4142 4143 return (0); 4144 } /* ixgbe_sysctl_dmac */ 4145 4146 #ifdef IXGBE_DEBUG 4147 /************************************************************************ 4148 * ixgbe_sysctl_power_state 4149 * 4150 * Sysctl to test power states 4151 * Values: 4152 * 0 - set device to D0 4153 * 3 - set device to D3 4154 * (none) - get current device power state 4155 ************************************************************************/ 4156 static int 4157 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4158 { 4159 struct adapter *adapter = (struct adapter *)arg1; 4160 device_t dev = adapter->dev; 4161 int curr_ps, new_ps, error = 0; 4162 4163 curr_ps = new_ps = pci_get_powerstate(dev); 4164 4165 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4166 if ((error) || (req->newptr == NULL)) 4167 return (error); 4168 4169 if (new_ps == curr_ps) 4170 return (0); 4171 4172 if (new_ps == 3 && curr_ps == 0) 4173 error = DEVICE_SUSPEND(dev); 4174 else if (new_ps == 0 && curr_ps == 3) 4175 error = DEVICE_RESUME(dev); 4176 else 4177 return (EINVAL); 4178 4179 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4180 4181 return (error); 4182 } /* ixgbe_sysctl_power_state */ 4183 #endif 4184 4185 /************************************************************************ 4186 * ixgbe_sysctl_wol_enable 4187 * 4188 * Sysctl to enable/disable the WoL capability, 4189 * if supported by the adapter. 4190 * 4191 * Values: 4192 * 0 - disabled 4193 * 1 - enabled 4194 ************************************************************************/ 4195 static int 4196 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4197 { 4198 struct adapter *adapter = (struct adapter *)arg1; 4199 struct ixgbe_hw *hw = &adapter->hw; 4200 int new_wol_enabled; 4201 int error = 0; 4202 4203 new_wol_enabled = hw->wol_enabled; 4204 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4205 if ((error) || (req->newptr == NULL)) 4206 return (error); 4207 new_wol_enabled = !!(new_wol_enabled); 4208 if (new_wol_enabled == hw->wol_enabled) 4209 return (0); 4210 4211 if (new_wol_enabled > 0 && !adapter->wol_support) 4212 return (ENODEV); 4213 else 4214 hw->wol_enabled = new_wol_enabled; 4215 4216 return (0); 4217 } /* ixgbe_sysctl_wol_enable */ 4218 4219 /************************************************************************ 4220 * ixgbe_sysctl_wufc - Wake Up Filter Control 4221 * 4222 * Sysctl to enable/disable the types of packets that the 4223 * adapter will wake up on upon receipt. 4224 * Flags: 4225 * 0x1 - Link Status Change 4226 * 0x2 - Magic Packet 4227 * 0x4 - Direct Exact 4228 * 0x8 - Directed Multicast 4229 * 0x10 - Broadcast 4230 * 0x20 - ARP/IPv4 Request Packet 4231 * 0x40 - Direct IPv4 Packet 4232 * 0x80 - Direct IPv6 Packet 4233 * 4234 * Settings not listed above will cause the sysctl to return an error. 4235 ************************************************************************/ 4236 static int 4237 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4238 { 4239 struct adapter *adapter = (struct adapter *)arg1; 4240 int error = 0; 4241 u32 new_wufc; 4242 4243 new_wufc = adapter->wufc; 4244 4245 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4246 if ((error) || (req->newptr == NULL)) 4247 return (error); 4248 if (new_wufc == adapter->wufc) 4249 return (0); 4250 4251 if (new_wufc & 0xffffff00) 4252 return (EINVAL); 4253 4254 new_wufc &= 0xff; 4255 new_wufc |= (0xffffff & adapter->wufc); 4256 adapter->wufc = new_wufc; 4257 4258 return (0); 4259 } /* ixgbe_sysctl_wufc */ 4260 4261 #ifdef IXGBE_DEBUG 4262 /************************************************************************ 4263 * ixgbe_sysctl_print_rss_config 4264 ************************************************************************/ 4265 static int 4266 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4267 { 4268 struct adapter *adapter = (struct adapter *)arg1; 4269 struct ixgbe_hw *hw = &adapter->hw; 4270 device_t dev = adapter->dev; 4271 struct sbuf *buf; 4272 int error = 0, reta_size; 4273 u32 reg; 4274 4275 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4276 if (!buf) { 4277 device_printf(dev, "Could not allocate sbuf for output.\n"); 4278 return (ENOMEM); 4279 } 4280 4281 // TODO: use sbufs to make a string to print out 4282 /* Set multiplier for RETA setup and table size based on MAC */ 4283 switch (adapter->hw.mac.type) { 4284 case ixgbe_mac_X550: 4285 case ixgbe_mac_X550EM_x: 4286 case ixgbe_mac_X550EM_a: 4287 reta_size = 128; 4288 break; 4289 default: 4290 reta_size = 32; 4291 break; 4292 } 4293 4294 /* Print out the redirection table */ 4295 sbuf_cat(buf, "\n"); 4296 for (int i = 0; i < reta_size; i++) { 4297 if (i < 32) { 4298 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4299 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4300 } else { 4301 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4302 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4303 } 4304 } 4305 4306 // TODO: print more config 4307 4308 error = sbuf_finish(buf); 4309 if (error) 4310 device_printf(dev, "Error finishing sbuf: %d\n", error); 4311 4312 sbuf_delete(buf); 4313 4314 return (0); 4315 } /* ixgbe_sysctl_print_rss_config */ 4316 #endif /* IXGBE_DEBUG */ 4317 4318 /************************************************************************ 4319 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4320 * 4321 * For X552/X557-AT devices using an external PHY 4322 ************************************************************************/ 4323 static int 4324 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4325 { 4326 struct adapter *adapter = (struct adapter *)arg1; 4327 struct ixgbe_hw *hw = &adapter->hw; 4328 u16 reg; 4329 4330 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4331 device_printf(iflib_get_dev(adapter->ctx), 4332 "Device has no supported external thermal sensor.\n"); 4333 return (ENODEV); 4334 } 4335 4336 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4337 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4338 device_printf(iflib_get_dev(adapter->ctx), 4339 "Error reading from PHY's current temperature register\n"); 4340 return (EAGAIN); 4341 } 4342 4343 /* Shift temp for output */ 4344 reg = reg >> 8; 4345 4346 return (sysctl_handle_16(oidp, NULL, reg, req)); 4347 } /* ixgbe_sysctl_phy_temp */ 4348 4349 /************************************************************************ 4350 * ixgbe_sysctl_phy_overtemp_occurred 4351 * 4352 * Reports (directly from the PHY) whether the current PHY 4353 * temperature is over the overtemp threshold. 4354 ************************************************************************/ 4355 static int 4356 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4357 { 4358 struct adapter *adapter = (struct adapter *)arg1; 4359 struct ixgbe_hw *hw = &adapter->hw; 4360 u16 reg; 4361 4362 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4363 device_printf(iflib_get_dev(adapter->ctx), 4364 "Device has no supported external thermal sensor.\n"); 4365 return (ENODEV); 4366 } 4367 4368 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4369 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4370 device_printf(iflib_get_dev(adapter->ctx), 4371 "Error reading from PHY's temperature status register\n"); 4372 return (EAGAIN); 4373 } 4374 4375 /* Get occurrence bit */ 4376 reg = !!(reg & 0x4000); 4377 4378 return (sysctl_handle_16(oidp, 0, reg, req)); 4379 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4380 4381 /************************************************************************ 4382 * ixgbe_sysctl_eee_state 4383 * 4384 * Sysctl to set EEE power saving feature 4385 * Values: 4386 * 0 - disable EEE 4387 * 1 - enable EEE 4388 * (none) - get current device EEE state 4389 ************************************************************************/ 4390 static int 4391 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4392 { 4393 struct adapter *adapter = (struct adapter *)arg1; 4394 device_t dev = adapter->dev; 4395 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4396 int curr_eee, new_eee, error = 0; 4397 s32 retval; 4398 4399 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE); 4400 4401 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4402 if ((error) || (req->newptr == NULL)) 4403 return (error); 4404 4405 /* Nothing to do */ 4406 if (new_eee == curr_eee) 4407 return (0); 4408 4409 /* Not supported */ 4410 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE)) 4411 return (EINVAL); 4412 4413 /* Bounds checking */ 4414 if ((new_eee < 0) || (new_eee > 1)) 4415 return (EINVAL); 4416 4417 retval = ixgbe_setup_eee(&adapter->hw, new_eee); 4418 if (retval) { 4419 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4420 return (EINVAL); 4421 } 4422 4423 /* Restart auto-neg */ 4424 ifp->if_init(ifp); 4425 4426 device_printf(dev, "New EEE state: %d\n", new_eee); 4427 4428 /* Cache new value */ 4429 if (new_eee) 4430 adapter->feat_en |= IXGBE_FEATURE_EEE; 4431 else 4432 adapter->feat_en &= ~IXGBE_FEATURE_EEE; 4433 4434 return (error); 4435 } /* ixgbe_sysctl_eee_state */ 4436 4437 /************************************************************************ 4438 * ixgbe_init_device_features 4439 ************************************************************************/ 4440 static void 4441 ixgbe_init_device_features(struct adapter *adapter) 4442 { 4443 adapter->feat_cap = IXGBE_FEATURE_NETMAP 4444 | IXGBE_FEATURE_RSS 4445 | IXGBE_FEATURE_MSI 4446 | IXGBE_FEATURE_MSIX 4447 | IXGBE_FEATURE_LEGACY_IRQ; 4448 4449 /* Set capabilities first... */ 4450 switch (adapter->hw.mac.type) { 4451 case ixgbe_mac_82598EB: 4452 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT) 4453 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4454 break; 4455 case ixgbe_mac_X540: 4456 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4457 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4458 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4459 (adapter->hw.bus.func == 0)) 4460 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4461 break; 4462 case ixgbe_mac_X550: 4463 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4464 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4465 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4466 break; 4467 case ixgbe_mac_X550EM_x: 4468 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4469 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4470 break; 4471 case ixgbe_mac_X550EM_a: 4472 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4473 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4474 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4475 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4476 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4477 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4478 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4479 } 4480 break; 4481 case ixgbe_mac_82599EB: 4482 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4483 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4484 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4485 (adapter->hw.bus.func == 0)) 4486 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4487 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4488 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4489 break; 4490 default: 4491 break; 4492 } 4493 4494 /* Enabled by default... */ 4495 /* Fan failure detection */ 4496 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4497 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4498 /* Netmap */ 4499 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 4500 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 4501 /* EEE */ 4502 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4503 adapter->feat_en |= IXGBE_FEATURE_EEE; 4504 /* Thermal Sensor */ 4505 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4506 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4507 4508 /* Enabled via global sysctl... */ 4509 /* Flow Director */ 4510 if (ixgbe_enable_fdir) { 4511 if (adapter->feat_cap & IXGBE_FEATURE_FDIR) 4512 adapter->feat_en |= IXGBE_FEATURE_FDIR; 4513 else 4514 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled."); 4515 } 4516 /* 4517 * Message Signal Interrupts - Extended (MSI-X) 4518 * Normal MSI is only enabled if MSI-X calls fail. 4519 */ 4520 if (!ixgbe_enable_msix) 4521 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX; 4522 /* Receive-Side Scaling (RSS) */ 4523 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4524 adapter->feat_en |= IXGBE_FEATURE_RSS; 4525 4526 /* Disable features with unmet dependencies... */ 4527 /* No MSI-X */ 4528 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) { 4529 adapter->feat_cap &= ~IXGBE_FEATURE_RSS; 4530 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4531 adapter->feat_en &= ~IXGBE_FEATURE_RSS; 4532 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; 4533 } 4534 } /* ixgbe_init_device_features */ 4535 4536 /************************************************************************ 4537 * ixgbe_check_fan_failure 4538 ************************************************************************/ 4539 static void 4540 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt) 4541 { 4542 u32 mask; 4543 4544 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) : 4545 IXGBE_ESDP_SDP1; 4546 4547 if (reg & mask) 4548 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4549 } /* ixgbe_check_fan_failure */ 4550