1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_rss.h" 39 40 #include "ixgbe.h" 41 #include "ixgbe_sriov.h" 42 #include "ifdi_if.h" 43 44 #include <net/netmap.h> 45 #include <dev/netmap/netmap_kern.h> 46 47 /************************************************************************ 48 * Driver version 49 ************************************************************************/ 50 char ixgbe_driver_version[] = "4.0.1-k"; 51 52 53 /************************************************************************ 54 * PCI Device ID Table 55 * 56 * Used by probe to select devices to load on 57 * Last field stores an index into ixgbe_strings 58 * Last entry must be all 0s 59 * 60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 61 ************************************************************************/ 62 static pci_vendor_info_t ixgbe_vendor_info_array[] = 63 { 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 107 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"), 108 /* required last entry */ 109 PVID_END 110 }; 111 112 static void *ixgbe_register(device_t dev); 113 static int ixgbe_if_attach_pre(if_ctx_t ctx); 114 static int ixgbe_if_attach_post(if_ctx_t ctx); 115 static int ixgbe_if_detach(if_ctx_t ctx); 116 static int ixgbe_if_shutdown(if_ctx_t ctx); 117 static int ixgbe_if_suspend(if_ctx_t ctx); 118 static int ixgbe_if_resume(if_ctx_t ctx); 119 120 static void ixgbe_if_stop(if_ctx_t ctx); 121 void ixgbe_if_enable_intr(if_ctx_t ctx); 122 static void ixgbe_if_disable_intr(if_ctx_t ctx); 123 static void ixgbe_link_intr_enable(if_ctx_t ctx); 124 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); 125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); 126 static int ixgbe_if_media_change(if_ctx_t ctx); 127 static int ixgbe_if_msix_intr_assign(if_ctx_t, int); 128 static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip); 130 static void ixgbe_if_multi_set(if_ctx_t ctx); 131 static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags); 132 static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 133 uint64_t *paddrs, int nrxqs, int nrxqsets); 134 static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, 135 uint64_t *paddrs, int nrxqs, int nrxqsets); 136 static void ixgbe_if_queues_free(if_ctx_t ctx); 137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t); 138 static void ixgbe_if_update_admin_status(if_ctx_t ctx); 139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag); 140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 141 static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 142 int ixgbe_intr(void *arg); 143 144 /************************************************************************ 145 * Function prototypes 146 ************************************************************************/ 147 #if __FreeBSD_version >= 1100036 148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); 149 #endif 150 151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector); 152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector); 153 static void ixgbe_add_device_sysctls(if_ctx_t ctx); 154 static int ixgbe_allocate_pci_resources(if_ctx_t ctx); 155 static int ixgbe_setup_low_power_mode(if_ctx_t ctx); 156 157 static void ixgbe_config_dmac(struct adapter *adapter); 158 static void ixgbe_configure_ivars(struct adapter *adapter); 159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, 160 s8 type); 161 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 162 static bool ixgbe_sfp_probe(if_ctx_t ctx); 163 164 static void ixgbe_free_pci_resources(if_ctx_t ctx); 165 166 static int ixgbe_msix_link(void *arg); 167 static int ixgbe_msix_que(void *arg); 168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter); 169 static void ixgbe_initialize_receive_units(if_ctx_t ctx); 170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx); 171 172 static int ixgbe_setup_interface(if_ctx_t ctx); 173 static void ixgbe_init_device_features(struct adapter *adapter); 174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 175 static void ixgbe_add_media_types(if_ctx_t ctx); 176 static void ixgbe_update_stats_counters(struct adapter *adapter); 177 static void ixgbe_config_link(if_ctx_t ctx); 178 static void ixgbe_get_slot_info(struct adapter *); 179 static void ixgbe_check_wol_support(struct adapter *adapter); 180 static void ixgbe_enable_rx_drop(struct adapter *); 181 static void ixgbe_disable_rx_drop(struct adapter *); 182 183 static void ixgbe_add_hw_stats(struct adapter *adapter); 184 static int ixgbe_set_flowcntl(struct adapter *, int); 185 static int ixgbe_set_advertise(struct adapter *, int); 186 static int ixgbe_get_advertise(struct adapter *); 187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx); 188 static void ixgbe_config_gpie(struct adapter *adapter); 189 static void ixgbe_config_delay_values(struct adapter *adapter); 190 191 /* Sysctl handlers */ 192 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 193 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 194 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 195 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 196 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 197 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 198 #ifdef IXGBE_DEBUG 199 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 200 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 201 #endif 202 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 203 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 204 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 205 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 206 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 207 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 208 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 209 210 /* Deferred interrupt tasklets */ 211 static void ixgbe_handle_msf(void *); 212 static void ixgbe_handle_mod(void *); 213 static void ixgbe_handle_phy(void *); 214 215 /************************************************************************ 216 * FreeBSD Device Interface Entry Points 217 ************************************************************************/ 218 static device_method_t ix_methods[] = { 219 /* Device interface */ 220 DEVMETHOD(device_register, ixgbe_register), 221 DEVMETHOD(device_probe, iflib_device_probe), 222 DEVMETHOD(device_attach, iflib_device_attach), 223 DEVMETHOD(device_detach, iflib_device_detach), 224 DEVMETHOD(device_shutdown, iflib_device_shutdown), 225 DEVMETHOD(device_suspend, iflib_device_suspend), 226 DEVMETHOD(device_resume, iflib_device_resume), 227 #ifdef PCI_IOV 228 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 229 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 230 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 231 #endif /* PCI_IOV */ 232 DEVMETHOD_END 233 }; 234 235 static driver_t ix_driver = { 236 "ix", ix_methods, sizeof(struct adapter), 237 }; 238 239 devclass_t ix_devclass; 240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); 242 MODULE_DEPEND(ix, pci, 1, 1, 1); 243 MODULE_DEPEND(ix, ether, 1, 1, 1); 244 MODULE_DEPEND(ix, iflib, 1, 1, 1); 245 246 static device_method_t ixgbe_if_methods[] = { 247 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), 248 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), 249 DEVMETHOD(ifdi_detach, ixgbe_if_detach), 250 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), 251 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), 252 DEVMETHOD(ifdi_resume, ixgbe_if_resume), 253 DEVMETHOD(ifdi_init, ixgbe_if_init), 254 DEVMETHOD(ifdi_stop, ixgbe_if_stop), 255 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), 256 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), 257 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), 258 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), 259 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 260 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), 261 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), 262 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), 263 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), 264 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), 265 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), 266 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), 267 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), 268 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), 269 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), 270 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), 271 DEVMETHOD(ifdi_timer, ixgbe_if_timer), 272 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), 273 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), 274 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), 275 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), 276 #ifdef PCI_IOV 277 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), 278 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), 279 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), 280 #endif /* PCI_IOV */ 281 DEVMETHOD_END 282 }; 283 284 /* 285 * TUNEABLE PARAMETERS: 286 */ 287 288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters"); 289 static driver_t ixgbe_if_driver = { 290 "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter) 291 }; 292 293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 295 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 296 297 /* Flow control setting, default to full */ 298 static int ixgbe_flow_control = ixgbe_fc_full; 299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 300 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 301 302 /* Advertise Speed, default to 0 (auto) */ 303 static int ixgbe_advertise_speed = 0; 304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 305 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 306 307 /* 308 * Smart speed setting, default to on 309 * this only works as a compile option 310 * right now as its during attach, set 311 * this to 'ixgbe_smart_speed_off' to 312 * disable. 313 */ 314 static int ixgbe_smart_speed = ixgbe_smart_speed_on; 315 316 /* 317 * MSI-X should be the default for best performance, 318 * but this allows it to be forced off for testing. 319 */ 320 static int ixgbe_enable_msix = 1; 321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 322 "Enable MSI-X interrupts"); 323 324 /* 325 * Defining this on will allow the use 326 * of unsupported SFP+ modules, note that 327 * doing so you are on your own :) 328 */ 329 static int allow_unsupported_sfp = FALSE; 330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 331 &allow_unsupported_sfp, 0, 332 "Allow unsupported SFP modules...use at your own risk"); 333 334 /* 335 * Not sure if Flow Director is fully baked, 336 * so we'll default to turning it off. 337 */ 338 static int ixgbe_enable_fdir = 0; 339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 340 "Enable Flow Director"); 341 342 /* Receive-Side Scaling */ 343 static int ixgbe_enable_rss = 1; 344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 345 "Enable Receive-Side Scaling (RSS)"); 346 347 #if 0 348 /* Keep running tab on them for sanity check */ 349 static int ixgbe_total_ports; 350 #endif 351 352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 353 354 /* 355 * For Flow Director: this is the number of TX packets we sample 356 * for the filter pool, this means every 20th packet will be probed. 357 * 358 * This feature can be disabled by setting this to 0. 359 */ 360 static int atr_sample_rate = 20; 361 362 extern struct if_txrx ixgbe_txrx; 363 364 static struct if_shared_ctx ixgbe_sctx_init = { 365 .isc_magic = IFLIB_MAGIC, 366 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 367 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 368 .isc_tx_maxsegsize = PAGE_SIZE, 369 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 370 .isc_tso_maxsegsize = PAGE_SIZE, 371 .isc_rx_maxsize = PAGE_SIZE*4, 372 .isc_rx_nsegments = 1, 373 .isc_rx_maxsegsize = PAGE_SIZE*4, 374 .isc_nfl = 1, 375 .isc_ntxqs = 1, 376 .isc_nrxqs = 1, 377 378 .isc_admin_intrcnt = 1, 379 .isc_vendor_info = ixgbe_vendor_info_array, 380 .isc_driver_version = ixgbe_driver_version, 381 .isc_driver = &ixgbe_if_driver, 382 .isc_flags = IFLIB_TSO_INIT_IP, 383 384 .isc_nrxd_min = {MIN_RXD}, 385 .isc_ntxd_min = {MIN_TXD}, 386 .isc_nrxd_max = {MAX_RXD}, 387 .isc_ntxd_max = {MAX_TXD}, 388 .isc_nrxd_default = {DEFAULT_RXD}, 389 .isc_ntxd_default = {DEFAULT_TXD}, 390 }; 391 392 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init; 393 394 /************************************************************************ 395 * ixgbe_if_tx_queues_alloc 396 ************************************************************************/ 397 static int 398 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 399 int ntxqs, int ntxqsets) 400 { 401 struct adapter *adapter = iflib_get_softc(ctx); 402 if_softc_ctx_t scctx = adapter->shared; 403 struct ix_tx_queue *que; 404 int i, j, error; 405 406 MPASS(adapter->num_tx_queues > 0); 407 MPASS(adapter->num_tx_queues == ntxqsets); 408 MPASS(ntxqs == 1); 409 410 /* Allocate queue structure memory */ 411 adapter->tx_queues = 412 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 413 M_IXGBE, M_NOWAIT | M_ZERO); 414 if (!adapter->tx_queues) { 415 device_printf(iflib_get_dev(ctx), 416 "Unable to allocate TX ring memory\n"); 417 return (ENOMEM); 418 } 419 420 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { 421 struct tx_ring *txr = &que->txr; 422 423 /* In case SR-IOV is enabled, align the index properly */ 424 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 425 i); 426 427 txr->adapter = que->adapter = adapter; 428 429 /* Allocate report status array */ 430 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); 431 if (txr->tx_rsq == NULL) { 432 error = ENOMEM; 433 goto fail; 434 } 435 for (j = 0; j < scctx->isc_ntxd[0]; j++) 436 txr->tx_rsq[j] = QIDX_INVALID; 437 /* get the virtual and physical address of the hardware queues */ 438 txr->tail = IXGBE_TDT(txr->me); 439 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; 440 txr->tx_paddr = paddrs[i]; 441 442 txr->bytes = 0; 443 txr->total_packets = 0; 444 445 /* Set the rate at which we sample packets */ 446 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 447 txr->atr_sample = atr_sample_rate; 448 449 } 450 451 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 452 adapter->num_tx_queues); 453 454 return (0); 455 456 fail: 457 ixgbe_if_queues_free(ctx); 458 459 return (error); 460 } /* ixgbe_if_tx_queues_alloc */ 461 462 /************************************************************************ 463 * ixgbe_if_rx_queues_alloc 464 ************************************************************************/ 465 static int 466 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 467 int nrxqs, int nrxqsets) 468 { 469 struct adapter *adapter = iflib_get_softc(ctx); 470 struct ix_rx_queue *que; 471 int i; 472 473 MPASS(adapter->num_rx_queues > 0); 474 MPASS(adapter->num_rx_queues == nrxqsets); 475 MPASS(nrxqs == 1); 476 477 /* Allocate queue structure memory */ 478 adapter->rx_queues = 479 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, 480 M_IXGBE, M_NOWAIT | M_ZERO); 481 if (!adapter->rx_queues) { 482 device_printf(iflib_get_dev(ctx), 483 "Unable to allocate TX ring memory\n"); 484 return (ENOMEM); 485 } 486 487 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { 488 struct rx_ring *rxr = &que->rxr; 489 490 /* In case SR-IOV is enabled, align the index properly */ 491 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 492 i); 493 494 rxr->adapter = que->adapter = adapter; 495 496 /* get the virtual and physical address of the hw queues */ 497 rxr->tail = IXGBE_RDT(rxr->me); 498 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 499 rxr->rx_paddr = paddrs[i]; 500 rxr->bytes = 0; 501 rxr->que = que; 502 } 503 504 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 505 adapter->num_rx_queues); 506 507 return (0); 508 } /* ixgbe_if_rx_queues_alloc */ 509 510 /************************************************************************ 511 * ixgbe_if_queues_free 512 ************************************************************************/ 513 static void 514 ixgbe_if_queues_free(if_ctx_t ctx) 515 { 516 struct adapter *adapter = iflib_get_softc(ctx); 517 struct ix_tx_queue *tx_que = adapter->tx_queues; 518 struct ix_rx_queue *rx_que = adapter->rx_queues; 519 int i; 520 521 if (tx_que != NULL) { 522 for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 523 struct tx_ring *txr = &tx_que->txr; 524 if (txr->tx_rsq == NULL) 525 break; 526 527 free(txr->tx_rsq, M_IXGBE); 528 txr->tx_rsq = NULL; 529 } 530 531 free(adapter->tx_queues, M_IXGBE); 532 adapter->tx_queues = NULL; 533 } 534 if (rx_que != NULL) { 535 free(adapter->rx_queues, M_IXGBE); 536 adapter->rx_queues = NULL; 537 } 538 } /* ixgbe_if_queues_free */ 539 540 /************************************************************************ 541 * ixgbe_initialize_rss_mapping 542 ************************************************************************/ 543 static void 544 ixgbe_initialize_rss_mapping(struct adapter *adapter) 545 { 546 struct ixgbe_hw *hw = &adapter->hw; 547 u32 reta = 0, mrqc, rss_key[10]; 548 int queue_id, table_size, index_mult; 549 int i, j; 550 u32 rss_hash_config; 551 552 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 553 /* Fetch the configured RSS key */ 554 rss_getkey((uint8_t *)&rss_key); 555 } else { 556 /* set up random bits */ 557 arc4rand(&rss_key, sizeof(rss_key), 0); 558 } 559 560 /* Set multiplier for RETA setup and table size based on MAC */ 561 index_mult = 0x1; 562 table_size = 128; 563 switch (adapter->hw.mac.type) { 564 case ixgbe_mac_82598EB: 565 index_mult = 0x11; 566 break; 567 case ixgbe_mac_X550: 568 case ixgbe_mac_X550EM_x: 569 case ixgbe_mac_X550EM_a: 570 table_size = 512; 571 break; 572 default: 573 break; 574 } 575 576 /* Set up the redirection table */ 577 for (i = 0, j = 0; i < table_size; i++, j++) { 578 if (j == adapter->num_rx_queues) 579 j = 0; 580 581 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 582 /* 583 * Fetch the RSS bucket id for the given indirection 584 * entry. Cap it at the number of configured buckets 585 * (which is num_rx_queues.) 586 */ 587 queue_id = rss_get_indirection_to_bucket(i); 588 queue_id = queue_id % adapter->num_rx_queues; 589 } else 590 queue_id = (j * index_mult); 591 592 /* 593 * The low 8 bits are for hash value (n+0); 594 * The next 8 bits are for hash value (n+1), etc. 595 */ 596 reta = reta >> 8; 597 reta = reta | (((uint32_t)queue_id) << 24); 598 if ((i & 3) == 3) { 599 if (i < 128) 600 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 601 else 602 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 603 reta); 604 reta = 0; 605 } 606 } 607 608 /* Now fill our hash function seeds */ 609 for (i = 0; i < 10; i++) 610 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 611 612 /* Perform hash on these packet types */ 613 if (adapter->feat_en & IXGBE_FEATURE_RSS) 614 rss_hash_config = rss_gethashconfig(); 615 else { 616 /* 617 * Disable UDP - IP fragments aren't currently being handled 618 * and so we end up with a mix of 2-tuple and 4-tuple 619 * traffic. 620 */ 621 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 622 | RSS_HASHTYPE_RSS_TCP_IPV4 623 | RSS_HASHTYPE_RSS_IPV6 624 | RSS_HASHTYPE_RSS_TCP_IPV6 625 | RSS_HASHTYPE_RSS_IPV6_EX 626 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 627 } 628 629 mrqc = IXGBE_MRQC_RSSEN; 630 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 631 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 632 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 633 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 634 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 635 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 636 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 638 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 640 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 642 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 644 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 646 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 648 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 649 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 650 } /* ixgbe_initialize_rss_mapping */ 651 652 /************************************************************************ 653 * ixgbe_initialize_receive_units - Setup receive registers and features. 654 ************************************************************************/ 655 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 656 657 static void 658 ixgbe_initialize_receive_units(if_ctx_t ctx) 659 { 660 struct adapter *adapter = iflib_get_softc(ctx); 661 if_softc_ctx_t scctx = adapter->shared; 662 struct ixgbe_hw *hw = &adapter->hw; 663 struct ifnet *ifp = iflib_get_ifp(ctx); 664 struct ix_rx_queue *que; 665 int i, j; 666 u32 bufsz, fctrl, srrctl, rxcsum; 667 u32 hlreg; 668 669 /* 670 * Make sure receives are disabled while 671 * setting up the descriptor ring 672 */ 673 ixgbe_disable_rx(hw); 674 675 /* Enable broadcasts */ 676 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 677 fctrl |= IXGBE_FCTRL_BAM; 678 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 679 fctrl |= IXGBE_FCTRL_DPF; 680 fctrl |= IXGBE_FCTRL_PMCF; 681 } 682 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 683 684 /* Set for Jumbo Frames? */ 685 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 686 if (ifp->if_mtu > ETHERMTU) 687 hlreg |= IXGBE_HLREG0_JUMBOEN; 688 else 689 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 690 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 691 692 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 693 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 694 695 /* Setup the Base and Length of the Rx Descriptor Ring */ 696 for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) { 697 struct rx_ring *rxr = &que->rxr; 698 u64 rdba = rxr->rx_paddr; 699 700 j = rxr->me; 701 702 /* Setup the Base and Length of the Rx Descriptor Ring */ 703 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 704 (rdba & 0x00000000ffffffffULL)); 705 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 706 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 707 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 708 709 /* Set up the SRRCTL register */ 710 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 711 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 712 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 713 srrctl |= bufsz; 714 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 715 716 /* 717 * Set DROP_EN iff we have no flow control and >1 queue. 718 * Note that srrctl was cleared shortly before during reset, 719 * so we do not need to clear the bit, but do it just in case 720 * this code is moved elsewhere. 721 */ 722 if (adapter->num_rx_queues > 1 && 723 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 724 srrctl |= IXGBE_SRRCTL_DROP_EN; 725 } else { 726 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 727 } 728 729 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 730 731 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 732 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 733 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 734 735 /* Set the driver rx tail address */ 736 rxr->tail = IXGBE_RDT(rxr->me); 737 } 738 739 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 740 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 741 | IXGBE_PSRTYPE_UDPHDR 742 | IXGBE_PSRTYPE_IPV4HDR 743 | IXGBE_PSRTYPE_IPV6HDR; 744 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 745 } 746 747 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 748 749 ixgbe_initialize_rss_mapping(adapter); 750 751 if (adapter->num_rx_queues > 1) { 752 /* RSS and RX IPP Checksum are mutually exclusive */ 753 rxcsum |= IXGBE_RXCSUM_PCSD; 754 } 755 756 if (ifp->if_capenable & IFCAP_RXCSUM) 757 rxcsum |= IXGBE_RXCSUM_PCSD; 758 759 /* This is useful for calculating UDP/IP fragment checksums */ 760 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 761 rxcsum |= IXGBE_RXCSUM_IPPCSE; 762 763 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 764 765 } /* ixgbe_initialize_receive_units */ 766 767 /************************************************************************ 768 * ixgbe_initialize_transmit_units - Enable transmit units. 769 ************************************************************************/ 770 static void 771 ixgbe_initialize_transmit_units(if_ctx_t ctx) 772 { 773 struct adapter *adapter = iflib_get_softc(ctx); 774 struct ixgbe_hw *hw = &adapter->hw; 775 if_softc_ctx_t scctx = adapter->shared; 776 struct ix_tx_queue *que; 777 int i; 778 779 /* Setup the Base and Length of the Tx Descriptor Ring */ 780 for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues; 781 i++, que++) { 782 struct tx_ring *txr = &que->txr; 783 u64 tdba = txr->tx_paddr; 784 u32 txctrl = 0; 785 int j = txr->me; 786 787 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 788 (tdba & 0x00000000ffffffffULL)); 789 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 790 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 791 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); 792 793 /* Setup the HW Tx Head and Tail descriptor pointers */ 794 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 795 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 796 797 /* Cache the tail address */ 798 txr->tail = IXGBE_TDT(txr->me); 799 800 txr->tx_rs_cidx = txr->tx_rs_pidx; 801 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 802 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 803 txr->tx_rsq[k] = QIDX_INVALID; 804 805 /* Disable Head Writeback */ 806 /* 807 * Note: for X550 series devices, these registers are actually 808 * prefixed with TPH_ isntead of DCA_, but the addresses and 809 * fields remain the same. 810 */ 811 switch (hw->mac.type) { 812 case ixgbe_mac_82598EB: 813 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 814 break; 815 default: 816 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 817 break; 818 } 819 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 820 switch (hw->mac.type) { 821 case ixgbe_mac_82598EB: 822 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 823 break; 824 default: 825 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 826 break; 827 } 828 829 } 830 831 if (hw->mac.type != ixgbe_mac_82598EB) { 832 u32 dmatxctl, rttdcs; 833 834 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 835 dmatxctl |= IXGBE_DMATXCTL_TE; 836 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 837 /* Disable arbiter to set MTQC */ 838 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 839 rttdcs |= IXGBE_RTTDCS_ARBDIS; 840 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 841 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 842 ixgbe_get_mtqc(adapter->iov_mode)); 843 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 844 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 845 } 846 847 } /* ixgbe_initialize_transmit_units */ 848 849 /************************************************************************ 850 * ixgbe_register 851 ************************************************************************/ 852 static void * 853 ixgbe_register(device_t dev) 854 { 855 return (ixgbe_sctx); 856 } /* ixgbe_register */ 857 858 /************************************************************************ 859 * ixgbe_if_attach_pre - Device initialization routine, part 1 860 * 861 * Called when the driver is being loaded. 862 * Identifies the type of hardware, initializes the hardware, 863 * and initializes iflib structures. 864 * 865 * return 0 on success, positive on failure 866 ************************************************************************/ 867 static int 868 ixgbe_if_attach_pre(if_ctx_t ctx) 869 { 870 struct adapter *adapter; 871 device_t dev; 872 if_softc_ctx_t scctx; 873 struct ixgbe_hw *hw; 874 int error = 0; 875 u32 ctrl_ext; 876 877 INIT_DEBUGOUT("ixgbe_attach: begin"); 878 879 /* Allocate, clear, and link in our adapter structure */ 880 dev = iflib_get_dev(ctx); 881 adapter = iflib_get_softc(ctx); 882 adapter->hw.back = adapter; 883 adapter->ctx = ctx; 884 adapter->dev = dev; 885 scctx = adapter->shared = iflib_get_softc_ctx(ctx); 886 adapter->media = iflib_get_media(ctx); 887 hw = &adapter->hw; 888 889 /* Determine hardware revision */ 890 hw->vendor_id = pci_get_vendor(dev); 891 hw->device_id = pci_get_device(dev); 892 hw->revision_id = pci_get_revid(dev); 893 hw->subsystem_vendor_id = pci_get_subvendor(dev); 894 hw->subsystem_device_id = pci_get_subdevice(dev); 895 896 /* Do base PCI setup - map BAR0 */ 897 if (ixgbe_allocate_pci_resources(ctx)) { 898 device_printf(dev, "Allocation of PCI resources failed\n"); 899 return (ENXIO); 900 } 901 902 /* let hardware know driver is loaded */ 903 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 904 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 905 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 906 907 /* 908 * Initialize the shared code 909 */ 910 if (ixgbe_init_shared_code(hw) != 0) { 911 device_printf(dev, "Unable to initialize the shared code\n"); 912 error = ENXIO; 913 goto err_pci; 914 } 915 916 if (hw->mbx.ops.init_params) 917 hw->mbx.ops.init_params(hw); 918 919 hw->allow_unsupported_sfp = allow_unsupported_sfp; 920 921 if (hw->mac.type != ixgbe_mac_82598EB) 922 hw->phy.smart_speed = ixgbe_smart_speed; 923 924 ixgbe_init_device_features(adapter); 925 926 /* Enable WoL (if supported) */ 927 ixgbe_check_wol_support(adapter); 928 929 /* Verify adapter fan is still functional (if applicable) */ 930 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 931 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 932 ixgbe_check_fan_failure(adapter, esdp, FALSE); 933 } 934 935 /* Ensure SW/FW semaphore is free */ 936 ixgbe_init_swfw_semaphore(hw); 937 938 /* Set an initial default flow control value */ 939 hw->fc.requested_mode = ixgbe_flow_control; 940 941 hw->phy.reset_if_overtemp = TRUE; 942 error = ixgbe_reset_hw(hw); 943 hw->phy.reset_if_overtemp = FALSE; 944 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 945 /* 946 * No optics in this port, set up 947 * so the timer routine will probe 948 * for later insertion. 949 */ 950 adapter->sfp_probe = TRUE; 951 error = 0; 952 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 953 device_printf(dev, "Unsupported SFP+ module detected!\n"); 954 error = EIO; 955 goto err_pci; 956 } else if (error) { 957 device_printf(dev, "Hardware initialization failed\n"); 958 error = EIO; 959 goto err_pci; 960 } 961 962 /* Make sure we have a good EEPROM before we read from it */ 963 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 964 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 965 error = EIO; 966 goto err_pci; 967 } 968 969 error = ixgbe_start_hw(hw); 970 switch (error) { 971 case IXGBE_ERR_EEPROM_VERSION: 972 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 973 break; 974 case IXGBE_ERR_SFP_NOT_SUPPORTED: 975 device_printf(dev, "Unsupported SFP+ Module\n"); 976 error = EIO; 977 goto err_pci; 978 case IXGBE_ERR_SFP_NOT_PRESENT: 979 device_printf(dev, "No SFP+ Module found\n"); 980 /* falls thru */ 981 default: 982 break; 983 } 984 985 /* Most of the iflib initialization... */ 986 987 iflib_set_mac(ctx, hw->mac.addr); 988 switch (adapter->hw.mac.type) { 989 case ixgbe_mac_X550: 990 case ixgbe_mac_X550EM_x: 991 case ixgbe_mac_X550EM_a: 992 scctx->isc_rss_table_size = 512; 993 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 994 break; 995 default: 996 scctx->isc_rss_table_size = 128; 997 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; 998 } 999 1000 /* Allow legacy interrupts */ 1001 ixgbe_txrx.ift_legacy_intr = ixgbe_intr; 1002 1003 scctx->isc_txqsizes[0] = 1004 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 1005 sizeof(u32), DBA_ALIGN), 1006 scctx->isc_rxqsizes[0] = 1007 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 1008 DBA_ALIGN); 1009 1010 /* XXX */ 1011 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1012 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 1013 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1014 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; 1015 } else { 1016 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; 1017 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 1018 } 1019 1020 scctx->isc_msix_bar = pci_msix_table_bar(dev); 1021 1022 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 1023 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 1024 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 1025 1026 scctx->isc_txrx = &ixgbe_txrx; 1027 1028 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; 1029 1030 return (0); 1031 1032 err_pci: 1033 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 1034 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1035 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 1036 ixgbe_free_pci_resources(ctx); 1037 1038 return (error); 1039 } /* ixgbe_if_attach_pre */ 1040 1041 /********************************************************************* 1042 * ixgbe_if_attach_post - Device initialization routine, part 2 1043 * 1044 * Called during driver load, but after interrupts and 1045 * resources have been allocated and configured. 1046 * Sets up some data structures not relevant to iflib. 1047 * 1048 * return 0 on success, positive on failure 1049 *********************************************************************/ 1050 static int 1051 ixgbe_if_attach_post(if_ctx_t ctx) 1052 { 1053 device_t dev; 1054 struct adapter *adapter; 1055 struct ixgbe_hw *hw; 1056 int error = 0; 1057 1058 dev = iflib_get_dev(ctx); 1059 adapter = iflib_get_softc(ctx); 1060 hw = &adapter->hw; 1061 1062 1063 if (adapter->intr_type == IFLIB_INTR_LEGACY && 1064 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { 1065 device_printf(dev, "Device does not support legacy interrupts"); 1066 error = ENXIO; 1067 goto err; 1068 } 1069 1070 /* Allocate multicast array memory. */ 1071 adapter->mta = malloc(sizeof(*adapter->mta) * 1072 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 1073 if (adapter->mta == NULL) { 1074 device_printf(dev, "Can not allocate multicast setup array\n"); 1075 error = ENOMEM; 1076 goto err; 1077 } 1078 1079 /* hw.ix defaults init */ 1080 ixgbe_set_advertise(adapter, ixgbe_advertise_speed); 1081 1082 /* Enable the optics for 82599 SFP+ fiber */ 1083 ixgbe_enable_tx_laser(hw); 1084 1085 /* Enable power to the phy. */ 1086 ixgbe_set_phy_power(hw, TRUE); 1087 1088 ixgbe_initialize_iov(adapter); 1089 1090 error = ixgbe_setup_interface(ctx); 1091 if (error) { 1092 device_printf(dev, "Interface setup failed: %d\n", error); 1093 goto err; 1094 } 1095 1096 ixgbe_if_update_admin_status(ctx); 1097 1098 /* Initialize statistics */ 1099 ixgbe_update_stats_counters(adapter); 1100 ixgbe_add_hw_stats(adapter); 1101 1102 /* Check PCIE slot type/speed/width */ 1103 ixgbe_get_slot_info(adapter); 1104 1105 /* 1106 * Do time init and sysctl init here, but 1107 * only on the first port of a bypass adapter. 1108 */ 1109 ixgbe_bypass_init(adapter); 1110 1111 /* Set an initial dmac value */ 1112 adapter->dmac = 0; 1113 /* Set initial advertised speeds (if applicable) */ 1114 adapter->advertise = ixgbe_get_advertise(adapter); 1115 1116 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1117 ixgbe_define_iov_schemas(dev, &error); 1118 1119 /* Add sysctls */ 1120 ixgbe_add_device_sysctls(ctx); 1121 1122 return (0); 1123 err: 1124 return (error); 1125 } /* ixgbe_if_attach_post */ 1126 1127 /************************************************************************ 1128 * ixgbe_check_wol_support 1129 * 1130 * Checks whether the adapter's ports are capable of 1131 * Wake On LAN by reading the adapter's NVM. 1132 * 1133 * Sets each port's hw->wol_enabled value depending 1134 * on the value read here. 1135 ************************************************************************/ 1136 static void 1137 ixgbe_check_wol_support(struct adapter *adapter) 1138 { 1139 struct ixgbe_hw *hw = &adapter->hw; 1140 u16 dev_caps = 0; 1141 1142 /* Find out WoL support for port */ 1143 adapter->wol_support = hw->wol_enabled = 0; 1144 ixgbe_get_device_caps(hw, &dev_caps); 1145 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1146 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1147 hw->bus.func == 0)) 1148 adapter->wol_support = hw->wol_enabled = 1; 1149 1150 /* Save initial wake up filter configuration */ 1151 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1152 1153 return; 1154 } /* ixgbe_check_wol_support */ 1155 1156 /************************************************************************ 1157 * ixgbe_setup_interface 1158 * 1159 * Setup networking device structure and register an interface. 1160 ************************************************************************/ 1161 static int 1162 ixgbe_setup_interface(if_ctx_t ctx) 1163 { 1164 struct ifnet *ifp = iflib_get_ifp(ctx); 1165 struct adapter *adapter = iflib_get_softc(ctx); 1166 1167 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1168 1169 if_setbaudrate(ifp, IF_Gbps(10)); 1170 1171 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1172 1173 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1174 1175 ixgbe_add_media_types(ctx); 1176 1177 /* Autoselect media by default */ 1178 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 1179 1180 return (0); 1181 } /* ixgbe_setup_interface */ 1182 1183 /************************************************************************ 1184 * ixgbe_if_get_counter 1185 ************************************************************************/ 1186 static uint64_t 1187 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1188 { 1189 struct adapter *adapter = iflib_get_softc(ctx); 1190 if_t ifp = iflib_get_ifp(ctx); 1191 1192 switch (cnt) { 1193 case IFCOUNTER_IPACKETS: 1194 return (adapter->ipackets); 1195 case IFCOUNTER_OPACKETS: 1196 return (adapter->opackets); 1197 case IFCOUNTER_IBYTES: 1198 return (adapter->ibytes); 1199 case IFCOUNTER_OBYTES: 1200 return (adapter->obytes); 1201 case IFCOUNTER_IMCASTS: 1202 return (adapter->imcasts); 1203 case IFCOUNTER_OMCASTS: 1204 return (adapter->omcasts); 1205 case IFCOUNTER_COLLISIONS: 1206 return (0); 1207 case IFCOUNTER_IQDROPS: 1208 return (adapter->iqdrops); 1209 case IFCOUNTER_OQDROPS: 1210 return (0); 1211 case IFCOUNTER_IERRORS: 1212 return (adapter->ierrors); 1213 default: 1214 return (if_get_counter_default(ifp, cnt)); 1215 } 1216 } /* ixgbe_if_get_counter */ 1217 1218 /************************************************************************ 1219 * ixgbe_if_i2c_req 1220 ************************************************************************/ 1221 static int 1222 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1223 { 1224 struct adapter *adapter = iflib_get_softc(ctx); 1225 struct ixgbe_hw *hw = &adapter->hw; 1226 int i; 1227 1228 1229 if (hw->phy.ops.read_i2c_byte == NULL) 1230 return (ENXIO); 1231 for (i = 0; i < req->len; i++) 1232 hw->phy.ops.read_i2c_byte(hw, req->offset + i, 1233 req->dev_addr, &req->data[i]); 1234 return (0); 1235 } /* ixgbe_if_i2c_req */ 1236 1237 /************************************************************************ 1238 * ixgbe_add_media_types 1239 ************************************************************************/ 1240 static void 1241 ixgbe_add_media_types(if_ctx_t ctx) 1242 { 1243 struct adapter *adapter = iflib_get_softc(ctx); 1244 struct ixgbe_hw *hw = &adapter->hw; 1245 device_t dev = iflib_get_dev(ctx); 1246 u64 layer; 1247 1248 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 1249 1250 /* Media types with matching FreeBSD media defines */ 1251 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1252 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1253 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1254 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1255 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1256 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1257 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1258 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1259 1260 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1261 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1262 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1263 NULL); 1264 1265 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1266 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1267 if (hw->phy.multispeed_fiber) 1268 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0, 1269 NULL); 1270 } 1271 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1272 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1273 if (hw->phy.multispeed_fiber) 1274 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, 1275 NULL); 1276 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1277 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1278 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1279 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1280 1281 #ifdef IFM_ETH_XTYPE 1282 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1283 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1284 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1285 ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1286 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1287 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1288 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1289 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1290 #else 1291 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1292 device_printf(dev, "Media supported: 10GbaseKR\n"); 1293 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1294 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1295 } 1296 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1297 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1298 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1299 ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1300 } 1301 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1302 device_printf(dev, "Media supported: 1000baseKX\n"); 1303 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1304 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1305 } 1306 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1307 device_printf(dev, "Media supported: 2500baseKX\n"); 1308 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1309 ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1310 } 1311 #endif 1312 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1313 device_printf(dev, "Media supported: 1000baseBX\n"); 1314 1315 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1316 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1317 0, NULL); 1318 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1319 } 1320 1321 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1322 } /* ixgbe_add_media_types */ 1323 1324 /************************************************************************ 1325 * ixgbe_is_sfp 1326 ************************************************************************/ 1327 static inline bool 1328 ixgbe_is_sfp(struct ixgbe_hw *hw) 1329 { 1330 switch (hw->mac.type) { 1331 case ixgbe_mac_82598EB: 1332 if (hw->phy.type == ixgbe_phy_nl) 1333 return (TRUE); 1334 return (FALSE); 1335 case ixgbe_mac_82599EB: 1336 switch (hw->mac.ops.get_media_type(hw)) { 1337 case ixgbe_media_type_fiber: 1338 case ixgbe_media_type_fiber_qsfp: 1339 return (TRUE); 1340 default: 1341 return (FALSE); 1342 } 1343 case ixgbe_mac_X550EM_x: 1344 case ixgbe_mac_X550EM_a: 1345 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1346 return (TRUE); 1347 return (FALSE); 1348 default: 1349 return (FALSE); 1350 } 1351 } /* ixgbe_is_sfp */ 1352 1353 /************************************************************************ 1354 * ixgbe_config_link 1355 ************************************************************************/ 1356 static void 1357 ixgbe_config_link(if_ctx_t ctx) 1358 { 1359 struct adapter *adapter = iflib_get_softc(ctx); 1360 struct ixgbe_hw *hw = &adapter->hw; 1361 u32 autoneg, err = 0; 1362 bool sfp, negotiate; 1363 1364 sfp = ixgbe_is_sfp(hw); 1365 1366 if (sfp) { 1367 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 1368 iflib_admin_intr_deferred(ctx); 1369 } else { 1370 if (hw->mac.ops.check_link) 1371 err = ixgbe_check_link(hw, &adapter->link_speed, 1372 &adapter->link_up, FALSE); 1373 if (err) 1374 return; 1375 autoneg = hw->phy.autoneg_advertised; 1376 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1377 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1378 &negotiate); 1379 if (err) 1380 return; 1381 if (hw->mac.ops.setup_link) 1382 err = hw->mac.ops.setup_link(hw, autoneg, 1383 adapter->link_up); 1384 } 1385 } /* ixgbe_config_link */ 1386 1387 /************************************************************************ 1388 * ixgbe_update_stats_counters - Update board statistics counters. 1389 ************************************************************************/ 1390 static void 1391 ixgbe_update_stats_counters(struct adapter *adapter) 1392 { 1393 struct ixgbe_hw *hw = &adapter->hw; 1394 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1395 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1396 u32 lxoffrxc; 1397 u64 total_missed_rx = 0; 1398 1399 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1400 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1401 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1402 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1403 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1404 1405 for (int i = 0; i < 16; i++) { 1406 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1407 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1408 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1409 } 1410 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1411 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1412 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1413 1414 /* Hardware workaround, gprc counts missed packets */ 1415 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1416 stats->gprc -= missed_rx; 1417 1418 if (hw->mac.type != ixgbe_mac_82598EB) { 1419 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1420 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1421 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1422 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1423 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1424 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1425 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1426 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1427 stats->lxoffrxc += lxoffrxc; 1428 } else { 1429 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1430 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1431 stats->lxoffrxc += lxoffrxc; 1432 /* 82598 only has a counter in the high register */ 1433 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1434 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1435 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1436 } 1437 1438 /* 1439 * For watchdog management we need to know if we have been paused 1440 * during the last interval, so capture that here. 1441 */ 1442 if (lxoffrxc) 1443 adapter->shared->isc_pause_frames = 1; 1444 1445 /* 1446 * Workaround: mprc hardware is incorrectly counting 1447 * broadcasts, so for now we subtract those. 1448 */ 1449 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1450 stats->bprc += bprc; 1451 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1452 if (hw->mac.type == ixgbe_mac_82598EB) 1453 stats->mprc -= bprc; 1454 1455 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1456 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1457 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1458 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1459 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1460 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1461 1462 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1463 stats->lxontxc += lxon; 1464 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1465 stats->lxofftxc += lxoff; 1466 total = lxon + lxoff; 1467 1468 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1469 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1470 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1471 stats->gptc -= total; 1472 stats->mptc -= total; 1473 stats->ptc64 -= total; 1474 stats->gotc -= total * ETHER_MIN_LEN; 1475 1476 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1477 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1478 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1479 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1480 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1481 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1482 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1483 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1484 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1485 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1486 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1487 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1488 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1489 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1490 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1491 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1492 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1493 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1494 /* Only read FCOE on 82599 */ 1495 if (hw->mac.type != ixgbe_mac_82598EB) { 1496 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1497 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1498 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1499 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1500 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1501 } 1502 1503 /* Fill out the OS statistics structure */ 1504 IXGBE_SET_IPACKETS(adapter, stats->gprc); 1505 IXGBE_SET_OPACKETS(adapter, stats->gptc); 1506 IXGBE_SET_IBYTES(adapter, stats->gorc); 1507 IXGBE_SET_OBYTES(adapter, stats->gotc); 1508 IXGBE_SET_IMCASTS(adapter, stats->mprc); 1509 IXGBE_SET_OMCASTS(adapter, stats->mptc); 1510 IXGBE_SET_COLLISIONS(adapter, 0); 1511 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 1512 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec); 1513 } /* ixgbe_update_stats_counters */ 1514 1515 /************************************************************************ 1516 * ixgbe_add_hw_stats 1517 * 1518 * Add sysctl variables, one per statistic, to the system. 1519 ************************************************************************/ 1520 static void 1521 ixgbe_add_hw_stats(struct adapter *adapter) 1522 { 1523 device_t dev = iflib_get_dev(adapter->ctx); 1524 struct ix_rx_queue *rx_que; 1525 struct ix_tx_queue *tx_que; 1526 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1527 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1528 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1529 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1530 struct sysctl_oid *stat_node, *queue_node; 1531 struct sysctl_oid_list *stat_list, *queue_list; 1532 int i; 1533 1534 #define QUEUE_NAME_LEN 32 1535 char namebuf[QUEUE_NAME_LEN]; 1536 1537 /* Driver Statistics */ 1538 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1539 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); 1540 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1541 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); 1542 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1543 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); 1544 1545 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 1546 struct tx_ring *txr = &tx_que->txr; 1547 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1548 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1549 CTLFLAG_RD, NULL, "Queue Name"); 1550 queue_list = SYSCTL_CHILDREN(queue_node); 1551 1552 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1553 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1554 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1555 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1556 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1557 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1558 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1559 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1560 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1561 CTLFLAG_RD, &txr->total_packets, 1562 "Queue Packets Transmitted"); 1563 } 1564 1565 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 1566 struct rx_ring *rxr = &rx_que->rxr; 1567 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1568 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1569 CTLFLAG_RD, NULL, "Queue Name"); 1570 queue_list = SYSCTL_CHILDREN(queue_node); 1571 1572 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1573 CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i], 1574 sizeof(&adapter->rx_queues[i]), 1575 ixgbe_sysctl_interrupt_rate_handler, "IU", 1576 "Interrupt Rate"); 1577 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1578 CTLFLAG_RD, &(adapter->rx_queues[i].irqs), 1579 "irqs on this queue"); 1580 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1581 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1582 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1583 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1584 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1585 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1586 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1587 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1588 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1589 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1590 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1591 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1592 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1593 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1594 } 1595 1596 /* MAC stats get their own sub node */ 1597 1598 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1599 CTLFLAG_RD, NULL, "MAC Statistics"); 1600 stat_list = SYSCTL_CHILDREN(stat_node); 1601 1602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1603 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1604 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1605 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1607 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1609 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1610 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1611 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1613 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1615 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1616 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1617 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1618 1619 /* Flow Control stats */ 1620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1621 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1623 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1625 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1626 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1627 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1628 1629 /* Packet Reception Stats */ 1630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1631 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1633 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1635 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1637 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1639 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1641 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1643 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1645 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1647 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1649 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1651 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1653 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1655 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1657 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1659 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1661 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1663 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1664 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1665 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1666 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1667 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1668 1669 /* Packet Transmission Stats */ 1670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1671 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1673 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1675 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1677 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1679 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1681 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1683 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1685 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1687 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1689 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1691 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1693 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1694 } /* ixgbe_add_hw_stats */ 1695 1696 /************************************************************************ 1697 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1698 * 1699 * Retrieves the TDH value from the hardware 1700 ************************************************************************/ 1701 static int 1702 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1703 { 1704 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1705 int error; 1706 unsigned int val; 1707 1708 if (!txr) 1709 return (0); 1710 1711 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 1712 error = sysctl_handle_int(oidp, &val, 0, req); 1713 if (error || !req->newptr) 1714 return error; 1715 1716 return (0); 1717 } /* ixgbe_sysctl_tdh_handler */ 1718 1719 /************************************************************************ 1720 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1721 * 1722 * Retrieves the TDT value from the hardware 1723 ************************************************************************/ 1724 static int 1725 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1726 { 1727 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1728 int error; 1729 unsigned int val; 1730 1731 if (!txr) 1732 return (0); 1733 1734 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 1735 error = sysctl_handle_int(oidp, &val, 0, req); 1736 if (error || !req->newptr) 1737 return error; 1738 1739 return (0); 1740 } /* ixgbe_sysctl_tdt_handler */ 1741 1742 /************************************************************************ 1743 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1744 * 1745 * Retrieves the RDH value from the hardware 1746 ************************************************************************/ 1747 static int 1748 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1749 { 1750 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1751 int error; 1752 unsigned int val; 1753 1754 if (!rxr) 1755 return (0); 1756 1757 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 1758 error = sysctl_handle_int(oidp, &val, 0, req); 1759 if (error || !req->newptr) 1760 return error; 1761 1762 return (0); 1763 } /* ixgbe_sysctl_rdh_handler */ 1764 1765 /************************************************************************ 1766 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1767 * 1768 * Retrieves the RDT value from the hardware 1769 ************************************************************************/ 1770 static int 1771 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1772 { 1773 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1774 int error; 1775 unsigned int val; 1776 1777 if (!rxr) 1778 return (0); 1779 1780 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 1781 error = sysctl_handle_int(oidp, &val, 0, req); 1782 if (error || !req->newptr) 1783 return error; 1784 1785 return (0); 1786 } /* ixgbe_sysctl_rdt_handler */ 1787 1788 /************************************************************************ 1789 * ixgbe_if_vlan_register 1790 * 1791 * Run via vlan config EVENT, it enables us to use the 1792 * HW Filter table since we can get the vlan id. This 1793 * just creates the entry in the soft version of the 1794 * VFTA, init will repopulate the real table. 1795 ************************************************************************/ 1796 static void 1797 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) 1798 { 1799 struct adapter *adapter = iflib_get_softc(ctx); 1800 u16 index, bit; 1801 1802 index = (vtag >> 5) & 0x7F; 1803 bit = vtag & 0x1F; 1804 adapter->shadow_vfta[index] |= (1 << bit); 1805 ++adapter->num_vlans; 1806 ixgbe_setup_vlan_hw_support(ctx); 1807 } /* ixgbe_if_vlan_register */ 1808 1809 /************************************************************************ 1810 * ixgbe_if_vlan_unregister 1811 * 1812 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1813 ************************************************************************/ 1814 static void 1815 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1816 { 1817 struct adapter *adapter = iflib_get_softc(ctx); 1818 u16 index, bit; 1819 1820 index = (vtag >> 5) & 0x7F; 1821 bit = vtag & 0x1F; 1822 adapter->shadow_vfta[index] &= ~(1 << bit); 1823 --adapter->num_vlans; 1824 /* Re-init to load the changes */ 1825 ixgbe_setup_vlan_hw_support(ctx); 1826 } /* ixgbe_if_vlan_unregister */ 1827 1828 /************************************************************************ 1829 * ixgbe_setup_vlan_hw_support 1830 ************************************************************************/ 1831 static void 1832 ixgbe_setup_vlan_hw_support(if_ctx_t ctx) 1833 { 1834 struct ifnet *ifp = iflib_get_ifp(ctx); 1835 struct adapter *adapter = iflib_get_softc(ctx); 1836 struct ixgbe_hw *hw = &adapter->hw; 1837 struct rx_ring *rxr; 1838 int i; 1839 u32 ctrl; 1840 1841 1842 /* 1843 * We get here thru init_locked, meaning 1844 * a soft reset, this has already cleared 1845 * the VFTA and other state, so if there 1846 * have been no vlan's registered do nothing. 1847 */ 1848 if (adapter->num_vlans == 0) 1849 return; 1850 1851 /* Setup the queues for vlans */ 1852 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1853 for (i = 0; i < adapter->num_rx_queues; i++) { 1854 rxr = &adapter->rx_queues[i].rxr; 1855 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1856 if (hw->mac.type != ixgbe_mac_82598EB) { 1857 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1858 ctrl |= IXGBE_RXDCTL_VME; 1859 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1860 } 1861 rxr->vtag_strip = TRUE; 1862 } 1863 } 1864 1865 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1866 return; 1867 /* 1868 * A soft reset zero's out the VFTA, so 1869 * we need to repopulate it now. 1870 */ 1871 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1872 if (adapter->shadow_vfta[i] != 0) 1873 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1874 adapter->shadow_vfta[i]); 1875 1876 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1877 /* Enable the Filter Table if enabled */ 1878 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1879 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1880 ctrl |= IXGBE_VLNCTRL_VFE; 1881 } 1882 if (hw->mac.type == ixgbe_mac_82598EB) 1883 ctrl |= IXGBE_VLNCTRL_VME; 1884 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1885 } /* ixgbe_setup_vlan_hw_support */ 1886 1887 /************************************************************************ 1888 * ixgbe_get_slot_info 1889 * 1890 * Get the width and transaction speed of 1891 * the slot this adapter is plugged into. 1892 ************************************************************************/ 1893 static void 1894 ixgbe_get_slot_info(struct adapter *adapter) 1895 { 1896 device_t dev = iflib_get_dev(adapter->ctx); 1897 struct ixgbe_hw *hw = &adapter->hw; 1898 int bus_info_valid = TRUE; 1899 u32 offset; 1900 u16 link; 1901 1902 /* Some devices are behind an internal bridge */ 1903 switch (hw->device_id) { 1904 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1905 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1906 goto get_parent_info; 1907 default: 1908 break; 1909 } 1910 1911 ixgbe_get_bus_info(hw); 1912 1913 /* 1914 * Some devices don't use PCI-E, but there is no need 1915 * to display "Unknown" for bus speed and width. 1916 */ 1917 switch (hw->mac.type) { 1918 case ixgbe_mac_X550EM_x: 1919 case ixgbe_mac_X550EM_a: 1920 return; 1921 default: 1922 goto display; 1923 } 1924 1925 get_parent_info: 1926 /* 1927 * For the Quad port adapter we need to parse back 1928 * up the PCI tree to find the speed of the expansion 1929 * slot into which this adapter is plugged. A bit more work. 1930 */ 1931 dev = device_get_parent(device_get_parent(dev)); 1932 #ifdef IXGBE_DEBUG 1933 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 1934 pci_get_slot(dev), pci_get_function(dev)); 1935 #endif 1936 dev = device_get_parent(device_get_parent(dev)); 1937 #ifdef IXGBE_DEBUG 1938 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 1939 pci_get_slot(dev), pci_get_function(dev)); 1940 #endif 1941 /* Now get the PCI Express Capabilities offset */ 1942 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 1943 /* 1944 * Hmm...can't get PCI-Express capabilities. 1945 * Falling back to default method. 1946 */ 1947 bus_info_valid = FALSE; 1948 ixgbe_get_bus_info(hw); 1949 goto display; 1950 } 1951 /* ...and read the Link Status Register */ 1952 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 1953 ixgbe_set_pci_config_data_generic(hw, link); 1954 1955 display: 1956 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 1957 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 1958 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 1959 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 1960 "Unknown"), 1961 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 1962 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 1963 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 1964 "Unknown")); 1965 1966 if (bus_info_valid) { 1967 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 1968 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 1969 (hw->bus.speed == ixgbe_bus_speed_2500))) { 1970 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1971 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 1972 } 1973 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 1974 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 1975 (hw->bus.speed < ixgbe_bus_speed_8000))) { 1976 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1977 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 1978 } 1979 } else 1980 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 1981 1982 return; 1983 } /* ixgbe_get_slot_info */ 1984 1985 /************************************************************************ 1986 * ixgbe_if_msix_intr_assign 1987 * 1988 * Setup MSI-X Interrupt resources and handlers 1989 ************************************************************************/ 1990 static int 1991 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 1992 { 1993 struct adapter *adapter = iflib_get_softc(ctx); 1994 struct ix_rx_queue *rx_que = adapter->rx_queues; 1995 struct ix_tx_queue *tx_que; 1996 int error, rid, vector = 0; 1997 int cpu_id = 0; 1998 char buf[16]; 1999 2000 /* Admin Que is vector 0*/ 2001 rid = vector + 1; 2002 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { 2003 rid = vector + 1; 2004 2005 snprintf(buf, sizeof(buf), "rxq%d", i); 2006 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 2007 IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); 2008 2009 if (error) { 2010 device_printf(iflib_get_dev(ctx), 2011 "Failed to allocate que int %d err: %d", i, error); 2012 adapter->num_rx_queues = i + 1; 2013 goto fail; 2014 } 2015 2016 rx_que->msix = vector; 2017 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 2018 /* 2019 * The queue ID is used as the RSS layer bucket ID. 2020 * We look up the queue ID -> RSS CPU ID and select 2021 * that. 2022 */ 2023 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 2024 } else { 2025 /* 2026 * Bind the MSI-X vector, and thus the 2027 * rings to the corresponding cpu. 2028 * 2029 * This just happens to match the default RSS 2030 * round-robin bucket -> queue -> CPU allocation. 2031 */ 2032 if (adapter->num_rx_queues > 1) 2033 cpu_id = i; 2034 } 2035 2036 } 2037 for (int i = 0; i < adapter->num_tx_queues; i++) { 2038 snprintf(buf, sizeof(buf), "txq%d", i); 2039 tx_que = &adapter->tx_queues[i]; 2040 tx_que->msix = i % adapter->num_rx_queues; 2041 iflib_softirq_alloc_generic(ctx, 2042 &adapter->rx_queues[tx_que->msix].que_irq, 2043 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 2044 } 2045 rid = vector + 1; 2046 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, 2047 IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq"); 2048 if (error) { 2049 device_printf(iflib_get_dev(ctx), 2050 "Failed to register admin handler"); 2051 return (error); 2052 } 2053 2054 adapter->vector = vector; 2055 2056 return (0); 2057 fail: 2058 iflib_irq_free(ctx, &adapter->irq); 2059 rx_que = adapter->rx_queues; 2060 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) 2061 iflib_irq_free(ctx, &rx_que->que_irq); 2062 2063 return (error); 2064 } /* ixgbe_if_msix_intr_assign */ 2065 2066 /********************************************************************* 2067 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2068 **********************************************************************/ 2069 static int 2070 ixgbe_msix_que(void *arg) 2071 { 2072 struct ix_rx_queue *que = arg; 2073 struct adapter *adapter = que->adapter; 2074 struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx); 2075 2076 /* Protect against spurious interrupts */ 2077 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2078 return (FILTER_HANDLED); 2079 2080 ixgbe_disable_queue(adapter, que->msix); 2081 ++que->irqs; 2082 2083 return (FILTER_SCHEDULE_THREAD); 2084 } /* ixgbe_msix_que */ 2085 2086 /************************************************************************ 2087 * ixgbe_media_status - Media Ioctl callback 2088 * 2089 * Called whenever the user queries the status of 2090 * the interface using ifconfig. 2091 ************************************************************************/ 2092 static void 2093 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2094 { 2095 struct adapter *adapter = iflib_get_softc(ctx); 2096 struct ixgbe_hw *hw = &adapter->hw; 2097 int layer; 2098 2099 INIT_DEBUGOUT("ixgbe_if_media_status: begin"); 2100 2101 ifmr->ifm_status = IFM_AVALID; 2102 ifmr->ifm_active = IFM_ETHER; 2103 2104 if (!adapter->link_active) 2105 return; 2106 2107 ifmr->ifm_status |= IFM_ACTIVE; 2108 layer = adapter->phy_layer; 2109 2110 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2111 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2112 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2113 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2114 switch (adapter->link_speed) { 2115 case IXGBE_LINK_SPEED_10GB_FULL: 2116 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2117 break; 2118 case IXGBE_LINK_SPEED_1GB_FULL: 2119 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2120 break; 2121 case IXGBE_LINK_SPEED_100_FULL: 2122 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2123 break; 2124 case IXGBE_LINK_SPEED_10_FULL: 2125 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2126 break; 2127 } 2128 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2129 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2130 switch (adapter->link_speed) { 2131 case IXGBE_LINK_SPEED_10GB_FULL: 2132 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2133 break; 2134 } 2135 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2136 switch (adapter->link_speed) { 2137 case IXGBE_LINK_SPEED_10GB_FULL: 2138 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2139 break; 2140 case IXGBE_LINK_SPEED_1GB_FULL: 2141 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2142 break; 2143 } 2144 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2145 switch (adapter->link_speed) { 2146 case IXGBE_LINK_SPEED_10GB_FULL: 2147 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2148 break; 2149 case IXGBE_LINK_SPEED_1GB_FULL: 2150 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2151 break; 2152 } 2153 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2154 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2155 switch (adapter->link_speed) { 2156 case IXGBE_LINK_SPEED_10GB_FULL: 2157 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2158 break; 2159 case IXGBE_LINK_SPEED_1GB_FULL: 2160 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2161 break; 2162 } 2163 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2164 switch (adapter->link_speed) { 2165 case IXGBE_LINK_SPEED_10GB_FULL: 2166 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2167 break; 2168 } 2169 /* 2170 * XXX: These need to use the proper media types once 2171 * they're added. 2172 */ 2173 #ifndef IFM_ETH_XTYPE 2174 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2175 switch (adapter->link_speed) { 2176 case IXGBE_LINK_SPEED_10GB_FULL: 2177 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2178 break; 2179 case IXGBE_LINK_SPEED_2_5GB_FULL: 2180 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2181 break; 2182 case IXGBE_LINK_SPEED_1GB_FULL: 2183 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2184 break; 2185 } 2186 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2187 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2188 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2189 switch (adapter->link_speed) { 2190 case IXGBE_LINK_SPEED_10GB_FULL: 2191 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2192 break; 2193 case IXGBE_LINK_SPEED_2_5GB_FULL: 2194 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2195 break; 2196 case IXGBE_LINK_SPEED_1GB_FULL: 2197 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2198 break; 2199 } 2200 #else 2201 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2202 switch (adapter->link_speed) { 2203 case IXGBE_LINK_SPEED_10GB_FULL: 2204 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2205 break; 2206 case IXGBE_LINK_SPEED_2_5GB_FULL: 2207 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2208 break; 2209 case IXGBE_LINK_SPEED_1GB_FULL: 2210 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2211 break; 2212 } 2213 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2214 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2215 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2216 switch (adapter->link_speed) { 2217 case IXGBE_LINK_SPEED_10GB_FULL: 2218 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2219 break; 2220 case IXGBE_LINK_SPEED_2_5GB_FULL: 2221 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2222 break; 2223 case IXGBE_LINK_SPEED_1GB_FULL: 2224 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2225 break; 2226 } 2227 #endif 2228 2229 /* If nothing is recognized... */ 2230 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2231 ifmr->ifm_active |= IFM_UNKNOWN; 2232 2233 /* Display current flow control setting used on link */ 2234 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2235 hw->fc.current_mode == ixgbe_fc_full) 2236 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2237 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2238 hw->fc.current_mode == ixgbe_fc_full) 2239 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2240 } /* ixgbe_media_status */ 2241 2242 /************************************************************************ 2243 * ixgbe_media_change - Media Ioctl callback 2244 * 2245 * Called when the user changes speed/duplex using 2246 * media/mediopt option with ifconfig. 2247 ************************************************************************/ 2248 static int 2249 ixgbe_if_media_change(if_ctx_t ctx) 2250 { 2251 struct adapter *adapter = iflib_get_softc(ctx); 2252 struct ifmedia *ifm = iflib_get_media(ctx); 2253 struct ixgbe_hw *hw = &adapter->hw; 2254 ixgbe_link_speed speed = 0; 2255 2256 INIT_DEBUGOUT("ixgbe_if_media_change: begin"); 2257 2258 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2259 return (EINVAL); 2260 2261 if (hw->phy.media_type == ixgbe_media_type_backplane) 2262 return (EPERM); 2263 2264 /* 2265 * We don't actually need to check against the supported 2266 * media types of the adapter; ifmedia will take care of 2267 * that for us. 2268 */ 2269 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2270 case IFM_AUTO: 2271 case IFM_10G_T: 2272 speed |= IXGBE_LINK_SPEED_100_FULL; 2273 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2274 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2275 break; 2276 case IFM_10G_LRM: 2277 case IFM_10G_LR: 2278 #ifndef IFM_ETH_XTYPE 2279 case IFM_10G_SR: /* KR, too */ 2280 case IFM_10G_CX4: /* KX4 */ 2281 #else 2282 case IFM_10G_KR: 2283 case IFM_10G_KX4: 2284 #endif 2285 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2286 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2287 break; 2288 #ifndef IFM_ETH_XTYPE 2289 case IFM_1000_CX: /* KX */ 2290 #else 2291 case IFM_1000_KX: 2292 #endif 2293 case IFM_1000_LX: 2294 case IFM_1000_SX: 2295 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2296 break; 2297 case IFM_1000_T: 2298 speed |= IXGBE_LINK_SPEED_100_FULL; 2299 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2300 break; 2301 case IFM_10G_TWINAX: 2302 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2303 break; 2304 case IFM_100_TX: 2305 speed |= IXGBE_LINK_SPEED_100_FULL; 2306 break; 2307 case IFM_10_T: 2308 speed |= IXGBE_LINK_SPEED_10_FULL; 2309 break; 2310 default: 2311 goto invalid; 2312 } 2313 2314 hw->mac.autotry_restart = TRUE; 2315 hw->mac.ops.setup_link(hw, speed, TRUE); 2316 adapter->advertise = 2317 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 2318 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 2319 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 2320 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 2321 2322 return (0); 2323 2324 invalid: 2325 device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); 2326 2327 return (EINVAL); 2328 } /* ixgbe_if_media_change */ 2329 2330 /************************************************************************ 2331 * ixgbe_set_promisc 2332 ************************************************************************/ 2333 static int 2334 ixgbe_if_promisc_set(if_ctx_t ctx, int flags) 2335 { 2336 struct adapter *adapter = iflib_get_softc(ctx); 2337 struct ifnet *ifp = iflib_get_ifp(ctx); 2338 u32 rctl; 2339 int mcnt = 0; 2340 2341 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2342 rctl &= (~IXGBE_FCTRL_UPE); 2343 if (ifp->if_flags & IFF_ALLMULTI) 2344 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2345 else { 2346 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); 2347 } 2348 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2349 rctl &= (~IXGBE_FCTRL_MPE); 2350 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2351 2352 if (ifp->if_flags & IFF_PROMISC) { 2353 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2354 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2355 } else if (ifp->if_flags & IFF_ALLMULTI) { 2356 rctl |= IXGBE_FCTRL_MPE; 2357 rctl &= ~IXGBE_FCTRL_UPE; 2358 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2359 } 2360 return (0); 2361 } /* ixgbe_if_promisc_set */ 2362 2363 /************************************************************************ 2364 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2365 ************************************************************************/ 2366 static int 2367 ixgbe_msix_link(void *arg) 2368 { 2369 struct adapter *adapter = arg; 2370 struct ixgbe_hw *hw = &adapter->hw; 2371 u32 eicr, eicr_mask; 2372 s32 retval; 2373 2374 ++adapter->link_irq; 2375 2376 /* Pause other interrupts */ 2377 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2378 2379 /* First get the cause */ 2380 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2381 /* Be sure the queue bits are not cleared */ 2382 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2383 /* Clear interrupt with write */ 2384 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2385 2386 /* Link status change */ 2387 if (eicr & IXGBE_EICR_LSC) { 2388 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2389 adapter->task_requests |= IXGBE_REQUEST_TASK_LSC; 2390 } 2391 2392 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 2393 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 2394 (eicr & IXGBE_EICR_FLOW_DIR)) { 2395 /* This is probably overkill :) */ 2396 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 2397 return (FILTER_HANDLED); 2398 /* Disable the interrupt */ 2399 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); 2400 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2401 } else 2402 if (eicr & IXGBE_EICR_ECC) { 2403 device_printf(iflib_get_dev(adapter->ctx), 2404 "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); 2405 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2406 } 2407 2408 /* Check for over temp condition */ 2409 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2410 switch (adapter->hw.mac.type) { 2411 case ixgbe_mac_X550EM_a: 2412 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2413 break; 2414 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2415 IXGBE_EICR_GPI_SDP0_X550EM_a); 2416 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2417 IXGBE_EICR_GPI_SDP0_X550EM_a); 2418 retval = hw->phy.ops.check_overtemp(hw); 2419 if (retval != IXGBE_ERR_OVERTEMP) 2420 break; 2421 device_printf(iflib_get_dev(adapter->ctx), 2422 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2423 device_printf(iflib_get_dev(adapter->ctx), 2424 "System shutdown required!\n"); 2425 break; 2426 default: 2427 if (!(eicr & IXGBE_EICR_TS)) 2428 break; 2429 retval = hw->phy.ops.check_overtemp(hw); 2430 if (retval != IXGBE_ERR_OVERTEMP) 2431 break; 2432 device_printf(iflib_get_dev(adapter->ctx), 2433 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2434 device_printf(iflib_get_dev(adapter->ctx), 2435 "System shutdown required!\n"); 2436 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2437 break; 2438 } 2439 } 2440 2441 /* Check for VF message */ 2442 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 2443 (eicr & IXGBE_EICR_MAILBOX)) 2444 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX; 2445 } 2446 2447 if (ixgbe_is_sfp(hw)) { 2448 /* Pluggable optics-related interrupt */ 2449 if (hw->mac.type >= ixgbe_mac_X540) 2450 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2451 else 2452 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2453 2454 if (eicr & eicr_mask) { 2455 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2456 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 2457 } 2458 2459 if ((hw->mac.type == ixgbe_mac_82599EB) && 2460 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2461 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2462 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2463 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 2464 } 2465 } 2466 2467 /* Check for fan failure */ 2468 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2469 ixgbe_check_fan_failure(adapter, eicr, TRUE); 2470 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2471 } 2472 2473 /* External PHY interrupt */ 2474 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2475 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2476 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2477 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; 2478 } 2479 2480 return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; 2481 } /* ixgbe_msix_link */ 2482 2483 /************************************************************************ 2484 * ixgbe_sysctl_interrupt_rate_handler 2485 ************************************************************************/ 2486 static int 2487 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2488 { 2489 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); 2490 int error; 2491 unsigned int reg, usec, rate; 2492 2493 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 2494 usec = ((reg & 0x0FF8) >> 3); 2495 if (usec > 0) 2496 rate = 500000 / usec; 2497 else 2498 rate = 0; 2499 error = sysctl_handle_int(oidp, &rate, 0, req); 2500 if (error || !req->newptr) 2501 return error; 2502 reg &= ~0xfff; /* default, no limitation */ 2503 ixgbe_max_interrupt_rate = 0; 2504 if (rate > 0 && rate < 500000) { 2505 if (rate < 1000) 2506 rate = 1000; 2507 ixgbe_max_interrupt_rate = rate; 2508 reg |= ((4000000/rate) & 0xff8); 2509 } 2510 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 2511 2512 return (0); 2513 } /* ixgbe_sysctl_interrupt_rate_handler */ 2514 2515 /************************************************************************ 2516 * ixgbe_add_device_sysctls 2517 ************************************************************************/ 2518 static void 2519 ixgbe_add_device_sysctls(if_ctx_t ctx) 2520 { 2521 struct adapter *adapter = iflib_get_softc(ctx); 2522 device_t dev = iflib_get_dev(ctx); 2523 struct ixgbe_hw *hw = &adapter->hw; 2524 struct sysctl_oid_list *child; 2525 struct sysctl_ctx_list *ctx_list; 2526 2527 ctx_list = device_get_sysctl_ctx(dev); 2528 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2529 2530 /* Sysctls for all devices */ 2531 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", 2532 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I", 2533 IXGBE_SYSCTL_DESC_SET_FC); 2534 2535 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", 2536 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I", 2537 IXGBE_SYSCTL_DESC_ADV_SPEED); 2538 2539 #ifdef IXGBE_DEBUG 2540 /* testing sysctls (for all devices) */ 2541 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", 2542 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state, 2543 "I", "PCI Power State"); 2544 2545 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", 2546 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, 2547 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2548 #endif 2549 /* for X550 series devices */ 2550 if (hw->mac.type >= ixgbe_mac_X550) 2551 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", 2552 CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac, 2553 "I", "DMA Coalesce"); 2554 2555 /* for WoL-capable devices */ 2556 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2557 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", 2558 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2559 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2560 2561 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", 2562 CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc, 2563 "I", "Enable/Disable Wake Up Filters"); 2564 } 2565 2566 /* for X552/X557-AT devices */ 2567 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2568 struct sysctl_oid *phy_node; 2569 struct sysctl_oid_list *phy_list; 2570 2571 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", 2572 CTLFLAG_RD, NULL, "External PHY sysctls"); 2573 phy_list = SYSCTL_CHILDREN(phy_node); 2574 2575 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", 2576 CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp, 2577 "I", "Current External PHY Temperature (Celsius)"); 2578 2579 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, 2580 "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, 2581 ixgbe_sysctl_phy_overtemp_occurred, "I", 2582 "External PHY High Temperature Event Occurred"); 2583 } 2584 2585 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 2586 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", 2587 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2588 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2589 } 2590 } /* ixgbe_add_device_sysctls */ 2591 2592 /************************************************************************ 2593 * ixgbe_allocate_pci_resources 2594 ************************************************************************/ 2595 static int 2596 ixgbe_allocate_pci_resources(if_ctx_t ctx) 2597 { 2598 struct adapter *adapter = iflib_get_softc(ctx); 2599 device_t dev = iflib_get_dev(ctx); 2600 int rid; 2601 2602 rid = PCIR_BAR(0); 2603 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2604 RF_ACTIVE); 2605 2606 if (!(adapter->pci_mem)) { 2607 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2608 return (ENXIO); 2609 } 2610 2611 /* Save bus_space values for READ/WRITE_REG macros */ 2612 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); 2613 adapter->osdep.mem_bus_space_handle = 2614 rman_get_bushandle(adapter->pci_mem); 2615 /* Set hw values for shared code */ 2616 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2617 2618 return (0); 2619 } /* ixgbe_allocate_pci_resources */ 2620 2621 /************************************************************************ 2622 * ixgbe_detach - Device removal routine 2623 * 2624 * Called when the driver is being removed. 2625 * Stops the adapter and deallocates all the resources 2626 * that were allocated for driver operation. 2627 * 2628 * return 0 on success, positive on failure 2629 ************************************************************************/ 2630 static int 2631 ixgbe_if_detach(if_ctx_t ctx) 2632 { 2633 struct adapter *adapter = iflib_get_softc(ctx); 2634 device_t dev = iflib_get_dev(ctx); 2635 u32 ctrl_ext; 2636 2637 INIT_DEBUGOUT("ixgbe_detach: begin"); 2638 2639 if (ixgbe_pci_iov_detach(dev) != 0) { 2640 device_printf(dev, "SR-IOV in use; detach first.\n"); 2641 return (EBUSY); 2642 } 2643 2644 ixgbe_setup_low_power_mode(ctx); 2645 2646 /* let hardware know driver is unloading */ 2647 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 2648 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2649 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 2650 2651 ixgbe_free_pci_resources(ctx); 2652 free(adapter->mta, M_IXGBE); 2653 2654 return (0); 2655 } /* ixgbe_if_detach */ 2656 2657 /************************************************************************ 2658 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2659 * 2660 * Prepare the adapter/port for LPLU and/or WoL 2661 ************************************************************************/ 2662 static int 2663 ixgbe_setup_low_power_mode(if_ctx_t ctx) 2664 { 2665 struct adapter *adapter = iflib_get_softc(ctx); 2666 struct ixgbe_hw *hw = &adapter->hw; 2667 device_t dev = iflib_get_dev(ctx); 2668 s32 error = 0; 2669 2670 if (!hw->wol_enabled) 2671 ixgbe_set_phy_power(hw, FALSE); 2672 2673 /* Limit power management flow to X550EM baseT */ 2674 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2675 hw->phy.ops.enter_lplu) { 2676 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2677 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2678 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2679 2680 /* 2681 * Clear Wake Up Status register to prevent any previous wakeup 2682 * events from waking us up immediately after we suspend. 2683 */ 2684 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2685 2686 /* 2687 * Program the Wakeup Filter Control register with user filter 2688 * settings 2689 */ 2690 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 2691 2692 /* Enable wakeups and power management in Wakeup Control */ 2693 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2694 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2695 2696 /* X550EM baseT adapters need a special LPLU flow */ 2697 hw->phy.reset_disable = TRUE; 2698 ixgbe_if_stop(ctx); 2699 error = hw->phy.ops.enter_lplu(hw); 2700 if (error) 2701 device_printf(dev, "Error entering LPLU: %d\n", error); 2702 hw->phy.reset_disable = FALSE; 2703 } else { 2704 /* Just stop for other adapters */ 2705 ixgbe_if_stop(ctx); 2706 } 2707 2708 return error; 2709 } /* ixgbe_setup_low_power_mode */ 2710 2711 /************************************************************************ 2712 * ixgbe_shutdown - Shutdown entry point 2713 ************************************************************************/ 2714 static int 2715 ixgbe_if_shutdown(if_ctx_t ctx) 2716 { 2717 int error = 0; 2718 2719 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2720 2721 error = ixgbe_setup_low_power_mode(ctx); 2722 2723 return (error); 2724 } /* ixgbe_if_shutdown */ 2725 2726 /************************************************************************ 2727 * ixgbe_suspend 2728 * 2729 * From D0 to D3 2730 ************************************************************************/ 2731 static int 2732 ixgbe_if_suspend(if_ctx_t ctx) 2733 { 2734 int error = 0; 2735 2736 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2737 2738 error = ixgbe_setup_low_power_mode(ctx); 2739 2740 return (error); 2741 } /* ixgbe_if_suspend */ 2742 2743 /************************************************************************ 2744 * ixgbe_resume 2745 * 2746 * From D3 to D0 2747 ************************************************************************/ 2748 static int 2749 ixgbe_if_resume(if_ctx_t ctx) 2750 { 2751 struct adapter *adapter = iflib_get_softc(ctx); 2752 device_t dev = iflib_get_dev(ctx); 2753 struct ifnet *ifp = iflib_get_ifp(ctx); 2754 struct ixgbe_hw *hw = &adapter->hw; 2755 u32 wus; 2756 2757 INIT_DEBUGOUT("ixgbe_resume: begin"); 2758 2759 /* Read & clear WUS register */ 2760 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2761 if (wus) 2762 device_printf(dev, "Woken up by (WUS): %#010x\n", 2763 IXGBE_READ_REG(hw, IXGBE_WUS)); 2764 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2765 /* And clear WUFC until next low-power transition */ 2766 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2767 2768 /* 2769 * Required after D3->D0 transition; 2770 * will re-advertise all previous advertised speeds 2771 */ 2772 if (ifp->if_flags & IFF_UP) 2773 ixgbe_if_init(ctx); 2774 2775 return (0); 2776 } /* ixgbe_if_resume */ 2777 2778 /************************************************************************ 2779 * ixgbe_if_mtu_set - Ioctl mtu entry point 2780 * 2781 * Return 0 on success, EINVAL on failure 2782 ************************************************************************/ 2783 static int 2784 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2785 { 2786 struct adapter *adapter = iflib_get_softc(ctx); 2787 int error = 0; 2788 2789 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); 2790 2791 if (mtu > IXGBE_MAX_MTU) { 2792 error = EINVAL; 2793 } else { 2794 adapter->max_frame_size = mtu + IXGBE_MTU_HDR; 2795 } 2796 2797 return error; 2798 } /* ixgbe_if_mtu_set */ 2799 2800 /************************************************************************ 2801 * ixgbe_if_crcstrip_set 2802 ************************************************************************/ 2803 static void 2804 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) 2805 { 2806 struct adapter *sc = iflib_get_softc(ctx); 2807 struct ixgbe_hw *hw = &sc->hw; 2808 /* crc stripping is set in two places: 2809 * IXGBE_HLREG0 (modified on init_locked and hw reset) 2810 * IXGBE_RDRXCTL (set by the original driver in 2811 * ixgbe_setup_hw_rsc() called in init_locked. 2812 * We disable the setting when netmap is compiled in). 2813 * We update the values here, but also in ixgbe.c because 2814 * init_locked sometimes is called outside our control. 2815 */ 2816 uint32_t hl, rxc; 2817 2818 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2819 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2820 #ifdef NETMAP 2821 if (netmap_verbose) 2822 D("%s read HLREG 0x%x rxc 0x%x", 2823 onoff ? "enter" : "exit", hl, rxc); 2824 #endif 2825 /* hw requirements ... */ 2826 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2827 rxc |= IXGBE_RDRXCTL_RSCACKC; 2828 if (onoff && !crcstrip) { 2829 /* keep the crc. Fast rx */ 2830 hl &= ~IXGBE_HLREG0_RXCRCSTRP; 2831 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; 2832 } else { 2833 /* reset default mode */ 2834 hl |= IXGBE_HLREG0_RXCRCSTRP; 2835 rxc |= IXGBE_RDRXCTL_CRCSTRIP; 2836 } 2837 #ifdef NETMAP 2838 if (netmap_verbose) 2839 D("%s write HLREG 0x%x rxc 0x%x", 2840 onoff ? "enter" : "exit", hl, rxc); 2841 #endif 2842 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); 2843 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); 2844 } /* ixgbe_if_crcstrip_set */ 2845 2846 /********************************************************************* 2847 * ixgbe_if_init - Init entry point 2848 * 2849 * Used in two ways: It is used by the stack as an init 2850 * entry point in network interface structure. It is also 2851 * used by the driver as a hw/sw initialization routine to 2852 * get to a consistent state. 2853 * 2854 * Return 0 on success, positive on failure 2855 **********************************************************************/ 2856 void 2857 ixgbe_if_init(if_ctx_t ctx) 2858 { 2859 struct adapter *adapter = iflib_get_softc(ctx); 2860 struct ifnet *ifp = iflib_get_ifp(ctx); 2861 device_t dev = iflib_get_dev(ctx); 2862 struct ixgbe_hw *hw = &adapter->hw; 2863 struct ix_rx_queue *rx_que; 2864 struct ix_tx_queue *tx_que; 2865 u32 txdctl, mhadd; 2866 u32 rxdctl, rxctrl; 2867 u32 ctrl_ext; 2868 2869 int i, j, err; 2870 2871 INIT_DEBUGOUT("ixgbe_if_init: begin"); 2872 2873 /* Queue indices may change with IOV mode */ 2874 ixgbe_align_all_queue_indices(adapter); 2875 2876 /* reprogram the RAR[0] in case user changed it. */ 2877 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 2878 2879 /* Get the latest mac address, User can use a LAA */ 2880 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 2881 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 2882 hw->addr_ctrl.rar_used_count = 1; 2883 2884 ixgbe_init_hw(hw); 2885 2886 ixgbe_initialize_iov(adapter); 2887 2888 ixgbe_initialize_transmit_units(ctx); 2889 2890 /* Setup Multicast table */ 2891 ixgbe_if_multi_set(ctx); 2892 2893 /* Determine the correct mbuf pool, based on frame size */ 2894 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 2895 2896 /* Configure RX settings */ 2897 ixgbe_initialize_receive_units(ctx); 2898 2899 /* 2900 * Initialize variable holding task enqueue requests 2901 * from MSI-X interrupts 2902 */ 2903 adapter->task_requests = 0; 2904 2905 /* Enable SDP & MSI-X interrupts based on adapter */ 2906 ixgbe_config_gpie(adapter); 2907 2908 /* Set MTU size */ 2909 if (ifp->if_mtu > ETHERMTU) { 2910 /* aka IXGBE_MAXFRS on 82599 and newer */ 2911 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2912 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2913 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 2914 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 2915 } 2916 2917 /* Now enable all the queues */ 2918 for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { 2919 struct tx_ring *txr = &tx_que->txr; 2920 2921 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 2922 txdctl |= IXGBE_TXDCTL_ENABLE; 2923 /* Set WTHRESH to 8, burst writeback */ 2924 txdctl |= (8 << 16); 2925 /* 2926 * When the internal queue falls below PTHRESH (32), 2927 * start prefetching as long as there are at least 2928 * HTHRESH (1) buffers ready. The values are taken 2929 * from the Intel linux driver 3.8.21. 2930 * Prefetching enables tx line rate even with 1 queue. 2931 */ 2932 txdctl |= (32 << 0) | (1 << 8); 2933 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 2934 } 2935 2936 for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { 2937 struct rx_ring *rxr = &rx_que->rxr; 2938 2939 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2940 if (hw->mac.type == ixgbe_mac_82598EB) { 2941 /* 2942 * PTHRESH = 21 2943 * HTHRESH = 4 2944 * WTHRESH = 8 2945 */ 2946 rxdctl &= ~0x3FFFFF; 2947 rxdctl |= 0x080420; 2948 } 2949 rxdctl |= IXGBE_RXDCTL_ENABLE; 2950 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 2951 for (j = 0; j < 10; j++) { 2952 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 2953 IXGBE_RXDCTL_ENABLE) 2954 break; 2955 else 2956 msec_delay(1); 2957 } 2958 wmb(); 2959 } 2960 2961 /* Enable Receive engine */ 2962 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2963 if (hw->mac.type == ixgbe_mac_82598EB) 2964 rxctrl |= IXGBE_RXCTRL_DMBYPS; 2965 rxctrl |= IXGBE_RXCTRL_RXEN; 2966 ixgbe_enable_rx_dma(hw, rxctrl); 2967 2968 /* Set up MSI/MSI-X routing */ 2969 if (ixgbe_enable_msix) { 2970 ixgbe_configure_ivars(adapter); 2971 /* Set up auto-mask */ 2972 if (hw->mac.type == ixgbe_mac_82598EB) 2973 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2974 else { 2975 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 2976 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 2977 } 2978 } else { /* Simple settings for Legacy/MSI */ 2979 ixgbe_set_ivar(adapter, 0, 0, 0); 2980 ixgbe_set_ivar(adapter, 0, 0, 1); 2981 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2982 } 2983 2984 ixgbe_init_fdir(adapter); 2985 2986 /* 2987 * Check on any SFP devices that 2988 * need to be kick-started 2989 */ 2990 if (hw->phy.type == ixgbe_phy_none) { 2991 err = hw->phy.ops.identify(hw); 2992 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 2993 device_printf(dev, 2994 "Unsupported SFP+ module type was detected.\n"); 2995 return; 2996 } 2997 } 2998 2999 /* Set moderation on the Link interrupt */ 3000 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 3001 3002 /* Enable power to the phy. */ 3003 ixgbe_set_phy_power(hw, TRUE); 3004 3005 /* Config/Enable Link */ 3006 ixgbe_config_link(ctx); 3007 3008 /* Hardware Packet Buffer & Flow Control setup */ 3009 ixgbe_config_delay_values(adapter); 3010 3011 /* Initialize the FC settings */ 3012 ixgbe_start_hw(hw); 3013 3014 /* Set up VLAN support and filter */ 3015 ixgbe_setup_vlan_hw_support(ctx); 3016 3017 /* Setup DMA Coalescing */ 3018 ixgbe_config_dmac(adapter); 3019 3020 /* And now turn on interrupts */ 3021 ixgbe_if_enable_intr(ctx); 3022 3023 /* Enable the use of the MBX by the VF's */ 3024 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 3025 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3026 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3027 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3028 } 3029 3030 } /* ixgbe_init_locked */ 3031 3032 /************************************************************************ 3033 * ixgbe_set_ivar 3034 * 3035 * Setup the correct IVAR register for a particular MSI-X interrupt 3036 * (yes this is all very magic and confusing :) 3037 * - entry is the register array entry 3038 * - vector is the MSI-X vector for this queue 3039 * - type is RX/TX/MISC 3040 ************************************************************************/ 3041 static void 3042 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3043 { 3044 struct ixgbe_hw *hw = &adapter->hw; 3045 u32 ivar, index; 3046 3047 vector |= IXGBE_IVAR_ALLOC_VAL; 3048 3049 switch (hw->mac.type) { 3050 case ixgbe_mac_82598EB: 3051 if (type == -1) 3052 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3053 else 3054 entry += (type * 64); 3055 index = (entry >> 2) & 0x1F; 3056 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3057 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3058 ivar |= (vector << (8 * (entry & 0x3))); 3059 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3060 break; 3061 case ixgbe_mac_82599EB: 3062 case ixgbe_mac_X540: 3063 case ixgbe_mac_X550: 3064 case ixgbe_mac_X550EM_x: 3065 case ixgbe_mac_X550EM_a: 3066 if (type == -1) { /* MISC IVAR */ 3067 index = (entry & 1) * 8; 3068 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3069 ivar &= ~(0xFF << index); 3070 ivar |= (vector << index); 3071 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3072 } else { /* RX/TX IVARS */ 3073 index = (16 * (entry & 1)) + (8 * type); 3074 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3075 ivar &= ~(0xFF << index); 3076 ivar |= (vector << index); 3077 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3078 } 3079 default: 3080 break; 3081 } 3082 } /* ixgbe_set_ivar */ 3083 3084 /************************************************************************ 3085 * ixgbe_configure_ivars 3086 ************************************************************************/ 3087 static void 3088 ixgbe_configure_ivars(struct adapter *adapter) 3089 { 3090 struct ix_rx_queue *rx_que = adapter->rx_queues; 3091 struct ix_tx_queue *tx_que = adapter->tx_queues; 3092 u32 newitr; 3093 3094 if (ixgbe_max_interrupt_rate > 0) 3095 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3096 else { 3097 /* 3098 * Disable DMA coalescing if interrupt moderation is 3099 * disabled. 3100 */ 3101 adapter->dmac = 0; 3102 newitr = 0; 3103 } 3104 3105 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { 3106 struct rx_ring *rxr = &rx_que->rxr; 3107 3108 /* First the RX queue entry */ 3109 ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0); 3110 3111 /* Set an Initial EITR value */ 3112 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr); 3113 } 3114 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 3115 struct tx_ring *txr = &tx_que->txr; 3116 3117 /* ... and the TX */ 3118 ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1); 3119 } 3120 /* For the Link interrupt */ 3121 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3122 } /* ixgbe_configure_ivars */ 3123 3124 /************************************************************************ 3125 * ixgbe_config_gpie 3126 ************************************************************************/ 3127 static void 3128 ixgbe_config_gpie(struct adapter *adapter) 3129 { 3130 struct ixgbe_hw *hw = &adapter->hw; 3131 u32 gpie; 3132 3133 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3134 3135 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3136 /* Enable Enhanced MSI-X mode */ 3137 gpie |= IXGBE_GPIE_MSIX_MODE 3138 | IXGBE_GPIE_EIAME 3139 | IXGBE_GPIE_PBA_SUPPORT 3140 | IXGBE_GPIE_OCD; 3141 } 3142 3143 /* Fan Failure Interrupt */ 3144 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3145 gpie |= IXGBE_SDP1_GPIEN; 3146 3147 /* Thermal Sensor Interrupt */ 3148 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3149 gpie |= IXGBE_SDP0_GPIEN_X540; 3150 3151 /* Link detection */ 3152 switch (hw->mac.type) { 3153 case ixgbe_mac_82599EB: 3154 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3155 break; 3156 case ixgbe_mac_X550EM_x: 3157 case ixgbe_mac_X550EM_a: 3158 gpie |= IXGBE_SDP0_GPIEN_X540; 3159 break; 3160 default: 3161 break; 3162 } 3163 3164 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3165 3166 } /* ixgbe_config_gpie */ 3167 3168 /************************************************************************ 3169 * ixgbe_config_delay_values 3170 * 3171 * Requires adapter->max_frame_size to be set. 3172 ************************************************************************/ 3173 static void 3174 ixgbe_config_delay_values(struct adapter *adapter) 3175 { 3176 struct ixgbe_hw *hw = &adapter->hw; 3177 u32 rxpb, frame, size, tmp; 3178 3179 frame = adapter->max_frame_size; 3180 3181 /* Calculate High Water */ 3182 switch (hw->mac.type) { 3183 case ixgbe_mac_X540: 3184 case ixgbe_mac_X550: 3185 case ixgbe_mac_X550EM_x: 3186 case ixgbe_mac_X550EM_a: 3187 tmp = IXGBE_DV_X540(frame, frame); 3188 break; 3189 default: 3190 tmp = IXGBE_DV(frame, frame); 3191 break; 3192 } 3193 size = IXGBE_BT2KB(tmp); 3194 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3195 hw->fc.high_water[0] = rxpb - size; 3196 3197 /* Now calculate Low Water */ 3198 switch (hw->mac.type) { 3199 case ixgbe_mac_X540: 3200 case ixgbe_mac_X550: 3201 case ixgbe_mac_X550EM_x: 3202 case ixgbe_mac_X550EM_a: 3203 tmp = IXGBE_LOW_DV_X540(frame); 3204 break; 3205 default: 3206 tmp = IXGBE_LOW_DV(frame); 3207 break; 3208 } 3209 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3210 3211 hw->fc.pause_time = IXGBE_FC_PAUSE; 3212 hw->fc.send_xon = TRUE; 3213 } /* ixgbe_config_delay_values */ 3214 3215 /************************************************************************ 3216 * ixgbe_set_multi - Multicast Update 3217 * 3218 * Called whenever multicast address list is updated. 3219 ************************************************************************/ 3220 static u_int 3221 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count) 3222 { 3223 struct adapter *adapter = arg; 3224 struct ixgbe_mc_addr *mta = adapter->mta; 3225 3226 if (count == MAX_NUM_MULTICAST_ADDRESSES) 3227 return (0); 3228 bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3229 mta[count].vmdq = adapter->pool; 3230 3231 return (1); 3232 } /* ixgbe_mc_filter_apply */ 3233 3234 static void 3235 ixgbe_if_multi_set(if_ctx_t ctx) 3236 { 3237 struct adapter *adapter = iflib_get_softc(ctx); 3238 struct ixgbe_mc_addr *mta; 3239 struct ifnet *ifp = iflib_get_ifp(ctx); 3240 u8 *update_ptr; 3241 u32 fctrl; 3242 u_int mcnt; 3243 3244 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); 3245 3246 mta = adapter->mta; 3247 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3248 3249 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, 3250 adapter); 3251 3252 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3253 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3254 if (ifp->if_flags & IFF_PROMISC) 3255 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3256 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3257 ifp->if_flags & IFF_ALLMULTI) { 3258 fctrl |= IXGBE_FCTRL_MPE; 3259 fctrl &= ~IXGBE_FCTRL_UPE; 3260 } else 3261 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3262 3263 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 3264 3265 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3266 update_ptr = (u8 *)mta; 3267 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 3268 ixgbe_mc_array_itr, TRUE); 3269 } 3270 3271 } /* ixgbe_if_multi_set */ 3272 3273 /************************************************************************ 3274 * ixgbe_mc_array_itr 3275 * 3276 * An iterator function needed by the multicast shared code. 3277 * It feeds the shared code routine the addresses in the 3278 * array of ixgbe_set_multi() one by one. 3279 ************************************************************************/ 3280 static u8 * 3281 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3282 { 3283 struct ixgbe_mc_addr *mta; 3284 3285 mta = (struct ixgbe_mc_addr *)*update_ptr; 3286 *vmdq = mta->vmdq; 3287 3288 *update_ptr = (u8*)(mta + 1); 3289 3290 return (mta->addr); 3291 } /* ixgbe_mc_array_itr */ 3292 3293 /************************************************************************ 3294 * ixgbe_local_timer - Timer routine 3295 * 3296 * Checks for link status, updates statistics, 3297 * and runs the watchdog check. 3298 ************************************************************************/ 3299 static void 3300 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) 3301 { 3302 struct adapter *adapter = iflib_get_softc(ctx); 3303 3304 if (qid != 0) 3305 return; 3306 3307 /* Check for pluggable optics */ 3308 if (adapter->sfp_probe) 3309 if (!ixgbe_sfp_probe(ctx)) 3310 return; /* Nothing to do */ 3311 3312 ixgbe_check_link(&adapter->hw, &adapter->link_speed, 3313 &adapter->link_up, 0); 3314 3315 /* Fire off the adminq task */ 3316 iflib_admin_intr_deferred(ctx); 3317 3318 } /* ixgbe_if_timer */ 3319 3320 /************************************************************************ 3321 * ixgbe_sfp_probe 3322 * 3323 * Determine if a port had optics inserted. 3324 ************************************************************************/ 3325 static bool 3326 ixgbe_sfp_probe(if_ctx_t ctx) 3327 { 3328 struct adapter *adapter = iflib_get_softc(ctx); 3329 struct ixgbe_hw *hw = &adapter->hw; 3330 device_t dev = iflib_get_dev(ctx); 3331 bool result = FALSE; 3332 3333 if ((hw->phy.type == ixgbe_phy_nl) && 3334 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3335 s32 ret = hw->phy.ops.identify_sfp(hw); 3336 if (ret) 3337 goto out; 3338 ret = hw->phy.ops.reset(hw); 3339 adapter->sfp_probe = FALSE; 3340 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3341 device_printf(dev, "Unsupported SFP+ module detected!"); 3342 device_printf(dev, 3343 "Reload driver with supported module.\n"); 3344 goto out; 3345 } else 3346 device_printf(dev, "SFP+ module detected!\n"); 3347 /* We now have supported optics */ 3348 result = TRUE; 3349 } 3350 out: 3351 3352 return (result); 3353 } /* ixgbe_sfp_probe */ 3354 3355 /************************************************************************ 3356 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3357 ************************************************************************/ 3358 static void 3359 ixgbe_handle_mod(void *context) 3360 { 3361 if_ctx_t ctx = context; 3362 struct adapter *adapter = iflib_get_softc(ctx); 3363 struct ixgbe_hw *hw = &adapter->hw; 3364 device_t dev = iflib_get_dev(ctx); 3365 u32 err, cage_full = 0; 3366 3367 if (adapter->hw.need_crosstalk_fix) { 3368 switch (hw->mac.type) { 3369 case ixgbe_mac_82599EB: 3370 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3371 IXGBE_ESDP_SDP2; 3372 break; 3373 case ixgbe_mac_X550EM_x: 3374 case ixgbe_mac_X550EM_a: 3375 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3376 IXGBE_ESDP_SDP0; 3377 break; 3378 default: 3379 break; 3380 } 3381 3382 if (!cage_full) 3383 goto handle_mod_out; 3384 } 3385 3386 err = hw->phy.ops.identify_sfp(hw); 3387 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3388 device_printf(dev, 3389 "Unsupported SFP+ module type was detected.\n"); 3390 goto handle_mod_out; 3391 } 3392 3393 if (hw->mac.type == ixgbe_mac_82598EB) 3394 err = hw->phy.ops.reset(hw); 3395 else 3396 err = hw->mac.ops.setup_sfp(hw); 3397 3398 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3399 device_printf(dev, 3400 "Setup failure - unsupported SFP+ module type.\n"); 3401 goto handle_mod_out; 3402 } 3403 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 3404 return; 3405 3406 handle_mod_out: 3407 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3408 } /* ixgbe_handle_mod */ 3409 3410 3411 /************************************************************************ 3412 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3413 ************************************************************************/ 3414 static void 3415 ixgbe_handle_msf(void *context) 3416 { 3417 if_ctx_t ctx = context; 3418 struct adapter *adapter = iflib_get_softc(ctx); 3419 struct ixgbe_hw *hw = &adapter->hw; 3420 u32 autoneg; 3421 bool negotiate; 3422 3423 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3424 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 3425 3426 autoneg = hw->phy.autoneg_advertised; 3427 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3428 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3429 if (hw->mac.ops.setup_link) 3430 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3431 3432 /* Adjust media types shown in ifconfig */ 3433 ifmedia_removeall(adapter->media); 3434 ixgbe_add_media_types(adapter->ctx); 3435 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 3436 } /* ixgbe_handle_msf */ 3437 3438 /************************************************************************ 3439 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3440 ************************************************************************/ 3441 static void 3442 ixgbe_handle_phy(void *context) 3443 { 3444 if_ctx_t ctx = context; 3445 struct adapter *adapter = iflib_get_softc(ctx); 3446 struct ixgbe_hw *hw = &adapter->hw; 3447 int error; 3448 3449 error = hw->phy.ops.handle_lasi(hw); 3450 if (error == IXGBE_ERR_OVERTEMP) 3451 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3452 else if (error) 3453 device_printf(adapter->dev, 3454 "Error handling LASI interrupt: %d\n", error); 3455 } /* ixgbe_handle_phy */ 3456 3457 /************************************************************************ 3458 * ixgbe_if_stop - Stop the hardware 3459 * 3460 * Disables all traffic on the adapter by issuing a 3461 * global reset on the MAC and deallocates TX/RX buffers. 3462 ************************************************************************/ 3463 static void 3464 ixgbe_if_stop(if_ctx_t ctx) 3465 { 3466 struct adapter *adapter = iflib_get_softc(ctx); 3467 struct ixgbe_hw *hw = &adapter->hw; 3468 3469 INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); 3470 3471 ixgbe_reset_hw(hw); 3472 hw->adapter_stopped = FALSE; 3473 ixgbe_stop_adapter(hw); 3474 if (hw->mac.type == ixgbe_mac_82599EB) 3475 ixgbe_stop_mac_link_on_d3_82599(hw); 3476 /* Turn off the laser - noop with no optics */ 3477 ixgbe_disable_tx_laser(hw); 3478 3479 /* Update the stack */ 3480 adapter->link_up = FALSE; 3481 ixgbe_if_update_admin_status(ctx); 3482 3483 /* reprogram the RAR[0] in case user changed it. */ 3484 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3485 3486 return; 3487 } /* ixgbe_if_stop */ 3488 3489 /************************************************************************ 3490 * ixgbe_update_link_status - Update OS on link state 3491 * 3492 * Note: Only updates the OS on the cached link state. 3493 * The real check of the hardware only happens with 3494 * a link interrupt. 3495 ************************************************************************/ 3496 static void 3497 ixgbe_if_update_admin_status(if_ctx_t ctx) 3498 { 3499 struct adapter *adapter = iflib_get_softc(ctx); 3500 device_t dev = iflib_get_dev(ctx); 3501 3502 if (adapter->link_up) { 3503 if (adapter->link_active == FALSE) { 3504 if (bootverbose) 3505 device_printf(dev, "Link is up %d Gbps %s \n", 3506 ((adapter->link_speed == 128) ? 10 : 1), 3507 "Full Duplex"); 3508 adapter->link_active = TRUE; 3509 /* Update any Flow Control changes */ 3510 ixgbe_fc_enable(&adapter->hw); 3511 /* Update DMA coalescing config */ 3512 ixgbe_config_dmac(adapter); 3513 /* should actually be negotiated value */ 3514 iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); 3515 3516 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3517 ixgbe_ping_all_vfs(adapter); 3518 } 3519 } else { /* Link down */ 3520 if (adapter->link_active == TRUE) { 3521 if (bootverbose) 3522 device_printf(dev, "Link is Down\n"); 3523 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 3524 adapter->link_active = FALSE; 3525 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3526 ixgbe_ping_all_vfs(adapter); 3527 } 3528 } 3529 3530 /* Handle task requests from msix_link() */ 3531 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD) 3532 ixgbe_handle_mod(ctx); 3533 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF) 3534 ixgbe_handle_msf(ctx); 3535 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX) 3536 ixgbe_handle_mbx(ctx); 3537 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR) 3538 ixgbe_reinit_fdir(ctx); 3539 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY) 3540 ixgbe_handle_phy(ctx); 3541 adapter->task_requests = 0; 3542 3543 ixgbe_update_stats_counters(adapter); 3544 } /* ixgbe_if_update_admin_status */ 3545 3546 /************************************************************************ 3547 * ixgbe_config_dmac - Configure DMA Coalescing 3548 ************************************************************************/ 3549 static void 3550 ixgbe_config_dmac(struct adapter *adapter) 3551 { 3552 struct ixgbe_hw *hw = &adapter->hw; 3553 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3554 3555 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3556 return; 3557 3558 if (dcfg->watchdog_timer ^ adapter->dmac || 3559 dcfg->link_speed ^ adapter->link_speed) { 3560 dcfg->watchdog_timer = adapter->dmac; 3561 dcfg->fcoe_en = FALSE; 3562 dcfg->link_speed = adapter->link_speed; 3563 dcfg->num_tcs = 1; 3564 3565 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3566 dcfg->watchdog_timer, dcfg->link_speed); 3567 3568 hw->mac.ops.dmac_config(hw); 3569 } 3570 } /* ixgbe_config_dmac */ 3571 3572 /************************************************************************ 3573 * ixgbe_if_enable_intr 3574 ************************************************************************/ 3575 void 3576 ixgbe_if_enable_intr(if_ctx_t ctx) 3577 { 3578 struct adapter *adapter = iflib_get_softc(ctx); 3579 struct ixgbe_hw *hw = &adapter->hw; 3580 struct ix_rx_queue *que = adapter->rx_queues; 3581 u32 mask, fwsm; 3582 3583 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3584 3585 switch (adapter->hw.mac.type) { 3586 case ixgbe_mac_82599EB: 3587 mask |= IXGBE_EIMS_ECC; 3588 /* Temperature sensor on some adapters */ 3589 mask |= IXGBE_EIMS_GPI_SDP0; 3590 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3591 mask |= IXGBE_EIMS_GPI_SDP1; 3592 mask |= IXGBE_EIMS_GPI_SDP2; 3593 break; 3594 case ixgbe_mac_X540: 3595 /* Detect if Thermal Sensor is enabled */ 3596 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3597 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3598 mask |= IXGBE_EIMS_TS; 3599 mask |= IXGBE_EIMS_ECC; 3600 break; 3601 case ixgbe_mac_X550: 3602 /* MAC thermal sensor is automatically enabled */ 3603 mask |= IXGBE_EIMS_TS; 3604 mask |= IXGBE_EIMS_ECC; 3605 break; 3606 case ixgbe_mac_X550EM_x: 3607 case ixgbe_mac_X550EM_a: 3608 /* Some devices use SDP0 for important information */ 3609 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3610 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3611 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3612 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3613 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3614 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3615 mask |= IXGBE_EICR_GPI_SDP0_X540; 3616 mask |= IXGBE_EIMS_ECC; 3617 break; 3618 default: 3619 break; 3620 } 3621 3622 /* Enable Fan Failure detection */ 3623 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3624 mask |= IXGBE_EIMS_GPI_SDP1; 3625 /* Enable SR-IOV */ 3626 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3627 mask |= IXGBE_EIMS_MAILBOX; 3628 /* Enable Flow Director */ 3629 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 3630 mask |= IXGBE_EIMS_FLOW_DIR; 3631 3632 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3633 3634 /* With MSI-X we use auto clear */ 3635 if (adapter->intr_type == IFLIB_INTR_MSIX) { 3636 mask = IXGBE_EIMS_ENABLE_MASK; 3637 /* Don't autoclear Link */ 3638 mask &= ~IXGBE_EIMS_OTHER; 3639 mask &= ~IXGBE_EIMS_LSC; 3640 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 3641 mask &= ~IXGBE_EIMS_MAILBOX; 3642 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3643 } 3644 3645 /* 3646 * Now enable all queues, this is done separately to 3647 * allow for handling the extended (beyond 32) MSI-X 3648 * vectors that can be used by 82599 3649 */ 3650 for (int i = 0; i < adapter->num_rx_queues; i++, que++) 3651 ixgbe_enable_queue(adapter, que->msix); 3652 3653 IXGBE_WRITE_FLUSH(hw); 3654 3655 } /* ixgbe_if_enable_intr */ 3656 3657 /************************************************************************ 3658 * ixgbe_disable_intr 3659 ************************************************************************/ 3660 static void 3661 ixgbe_if_disable_intr(if_ctx_t ctx) 3662 { 3663 struct adapter *adapter = iflib_get_softc(ctx); 3664 3665 if (adapter->intr_type == IFLIB_INTR_MSIX) 3666 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 3667 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3668 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3669 } else { 3670 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3671 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3672 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3673 } 3674 IXGBE_WRITE_FLUSH(&adapter->hw); 3675 3676 } /* ixgbe_if_disable_intr */ 3677 3678 /************************************************************************ 3679 * ixgbe_link_intr_enable 3680 ************************************************************************/ 3681 static void 3682 ixgbe_link_intr_enable(if_ctx_t ctx) 3683 { 3684 struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw; 3685 3686 /* Re-enable other interrupts */ 3687 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 3688 } /* ixgbe_link_intr_enable */ 3689 3690 /************************************************************************ 3691 * ixgbe_if_rx_queue_intr_enable 3692 ************************************************************************/ 3693 static int 3694 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 3695 { 3696 struct adapter *adapter = iflib_get_softc(ctx); 3697 struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; 3698 3699 ixgbe_enable_queue(adapter, que->msix); 3700 3701 return (0); 3702 } /* ixgbe_if_rx_queue_intr_enable */ 3703 3704 /************************************************************************ 3705 * ixgbe_enable_queue 3706 ************************************************************************/ 3707 static void 3708 ixgbe_enable_queue(struct adapter *adapter, u32 vector) 3709 { 3710 struct ixgbe_hw *hw = &adapter->hw; 3711 u64 queue = 1ULL << vector; 3712 u32 mask; 3713 3714 if (hw->mac.type == ixgbe_mac_82598EB) { 3715 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3716 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3717 } else { 3718 mask = (queue & 0xFFFFFFFF); 3719 if (mask) 3720 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 3721 mask = (queue >> 32); 3722 if (mask) 3723 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 3724 } 3725 } /* ixgbe_enable_queue */ 3726 3727 /************************************************************************ 3728 * ixgbe_disable_queue 3729 ************************************************************************/ 3730 static void 3731 ixgbe_disable_queue(struct adapter *adapter, u32 vector) 3732 { 3733 struct ixgbe_hw *hw = &adapter->hw; 3734 u64 queue = 1ULL << vector; 3735 u32 mask; 3736 3737 if (hw->mac.type == ixgbe_mac_82598EB) { 3738 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 3739 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 3740 } else { 3741 mask = (queue & 0xFFFFFFFF); 3742 if (mask) 3743 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 3744 mask = (queue >> 32); 3745 if (mask) 3746 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 3747 } 3748 } /* ixgbe_disable_queue */ 3749 3750 /************************************************************************ 3751 * ixgbe_intr - Legacy Interrupt Service Routine 3752 ************************************************************************/ 3753 int 3754 ixgbe_intr(void *arg) 3755 { 3756 struct adapter *adapter = arg; 3757 struct ix_rx_queue *que = adapter->rx_queues; 3758 struct ixgbe_hw *hw = &adapter->hw; 3759 if_ctx_t ctx = adapter->ctx; 3760 u32 eicr, eicr_mask; 3761 3762 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3763 3764 ++que->irqs; 3765 if (eicr == 0) { 3766 ixgbe_if_enable_intr(ctx); 3767 return (FILTER_HANDLED); 3768 } 3769 3770 /* Check for fan failure */ 3771 if ((hw->device_id == IXGBE_DEV_ID_82598AT) && 3772 (eicr & IXGBE_EICR_GPI_SDP1)) { 3773 device_printf(adapter->dev, 3774 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 3775 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3776 } 3777 3778 /* Link status change */ 3779 if (eicr & IXGBE_EICR_LSC) { 3780 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3781 iflib_admin_intr_deferred(ctx); 3782 } 3783 3784 if (ixgbe_is_sfp(hw)) { 3785 /* Pluggable optics-related interrupt */ 3786 if (hw->mac.type >= ixgbe_mac_X540) 3787 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3788 else 3789 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3790 3791 if (eicr & eicr_mask) { 3792 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3793 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 3794 } 3795 3796 if ((hw->mac.type == ixgbe_mac_82599EB) && 3797 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3798 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3799 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3800 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 3801 } 3802 } 3803 3804 /* External PHY interrupt */ 3805 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3806 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 3807 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; 3808 3809 return (FILTER_SCHEDULE_THREAD); 3810 } /* ixgbe_intr */ 3811 3812 /************************************************************************ 3813 * ixgbe_free_pci_resources 3814 ************************************************************************/ 3815 static void 3816 ixgbe_free_pci_resources(if_ctx_t ctx) 3817 { 3818 struct adapter *adapter = iflib_get_softc(ctx); 3819 struct ix_rx_queue *que = adapter->rx_queues; 3820 device_t dev = iflib_get_dev(ctx); 3821 3822 /* Release all MSI-X queue resources */ 3823 if (adapter->intr_type == IFLIB_INTR_MSIX) 3824 iflib_irq_free(ctx, &adapter->irq); 3825 3826 if (que != NULL) { 3827 for (int i = 0; i < adapter->num_rx_queues; i++, que++) { 3828 iflib_irq_free(ctx, &que->que_irq); 3829 } 3830 } 3831 3832 if (adapter->pci_mem != NULL) 3833 bus_release_resource(dev, SYS_RES_MEMORY, 3834 rman_get_rid(adapter->pci_mem), adapter->pci_mem); 3835 } /* ixgbe_free_pci_resources */ 3836 3837 /************************************************************************ 3838 * ixgbe_sysctl_flowcntl 3839 * 3840 * SYSCTL wrapper around setting Flow Control 3841 ************************************************************************/ 3842 static int 3843 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 3844 { 3845 struct adapter *adapter; 3846 int error, fc; 3847 3848 adapter = (struct adapter *)arg1; 3849 fc = adapter->hw.fc.current_mode; 3850 3851 error = sysctl_handle_int(oidp, &fc, 0, req); 3852 if ((error) || (req->newptr == NULL)) 3853 return (error); 3854 3855 /* Don't bother if it's not changed */ 3856 if (fc == adapter->hw.fc.current_mode) 3857 return (0); 3858 3859 return ixgbe_set_flowcntl(adapter, fc); 3860 } /* ixgbe_sysctl_flowcntl */ 3861 3862 /************************************************************************ 3863 * ixgbe_set_flowcntl - Set flow control 3864 * 3865 * Flow control values: 3866 * 0 - off 3867 * 1 - rx pause 3868 * 2 - tx pause 3869 * 3 - full 3870 ************************************************************************/ 3871 static int 3872 ixgbe_set_flowcntl(struct adapter *adapter, int fc) 3873 { 3874 switch (fc) { 3875 case ixgbe_fc_rx_pause: 3876 case ixgbe_fc_tx_pause: 3877 case ixgbe_fc_full: 3878 adapter->hw.fc.requested_mode = fc; 3879 if (adapter->num_rx_queues > 1) 3880 ixgbe_disable_rx_drop(adapter); 3881 break; 3882 case ixgbe_fc_none: 3883 adapter->hw.fc.requested_mode = ixgbe_fc_none; 3884 if (adapter->num_rx_queues > 1) 3885 ixgbe_enable_rx_drop(adapter); 3886 break; 3887 default: 3888 return (EINVAL); 3889 } 3890 3891 /* Don't autoneg if forcing a value */ 3892 adapter->hw.fc.disable_fc_autoneg = TRUE; 3893 ixgbe_fc_enable(&adapter->hw); 3894 3895 return (0); 3896 } /* ixgbe_set_flowcntl */ 3897 3898 /************************************************************************ 3899 * ixgbe_enable_rx_drop 3900 * 3901 * Enable the hardware to drop packets when the buffer is 3902 * full. This is useful with multiqueue, so that no single 3903 * queue being full stalls the entire RX engine. We only 3904 * enable this when Multiqueue is enabled AND Flow Control 3905 * is disabled. 3906 ************************************************************************/ 3907 static void 3908 ixgbe_enable_rx_drop(struct adapter *adapter) 3909 { 3910 struct ixgbe_hw *hw = &adapter->hw; 3911 struct rx_ring *rxr; 3912 u32 srrctl; 3913 3914 for (int i = 0; i < adapter->num_rx_queues; i++) { 3915 rxr = &adapter->rx_queues[i].rxr; 3916 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3917 srrctl |= IXGBE_SRRCTL_DROP_EN; 3918 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3919 } 3920 3921 /* enable drop for each vf */ 3922 for (int i = 0; i < adapter->num_vfs; i++) { 3923 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3924 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 3925 IXGBE_QDE_ENABLE)); 3926 } 3927 } /* ixgbe_enable_rx_drop */ 3928 3929 /************************************************************************ 3930 * ixgbe_disable_rx_drop 3931 ************************************************************************/ 3932 static void 3933 ixgbe_disable_rx_drop(struct adapter *adapter) 3934 { 3935 struct ixgbe_hw *hw = &adapter->hw; 3936 struct rx_ring *rxr; 3937 u32 srrctl; 3938 3939 for (int i = 0; i < adapter->num_rx_queues; i++) { 3940 rxr = &adapter->rx_queues[i].rxr; 3941 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 3942 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3943 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 3944 } 3945 3946 /* disable drop for each vf */ 3947 for (int i = 0; i < adapter->num_vfs; i++) { 3948 IXGBE_WRITE_REG(hw, IXGBE_QDE, 3949 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 3950 } 3951 } /* ixgbe_disable_rx_drop */ 3952 3953 /************************************************************************ 3954 * ixgbe_sysctl_advertise 3955 * 3956 * SYSCTL wrapper around setting advertised speed 3957 ************************************************************************/ 3958 static int 3959 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 3960 { 3961 struct adapter *adapter; 3962 int error, advertise; 3963 3964 adapter = (struct adapter *)arg1; 3965 advertise = adapter->advertise; 3966 3967 error = sysctl_handle_int(oidp, &advertise, 0, req); 3968 if ((error) || (req->newptr == NULL)) 3969 return (error); 3970 3971 return ixgbe_set_advertise(adapter, advertise); 3972 } /* ixgbe_sysctl_advertise */ 3973 3974 /************************************************************************ 3975 * ixgbe_set_advertise - Control advertised link speed 3976 * 3977 * Flags: 3978 * 0x1 - advertise 100 Mb 3979 * 0x2 - advertise 1G 3980 * 0x4 - advertise 10G 3981 * 0x8 - advertise 10 Mb (yes, Mb) 3982 ************************************************************************/ 3983 static int 3984 ixgbe_set_advertise(struct adapter *adapter, int advertise) 3985 { 3986 device_t dev = iflib_get_dev(adapter->ctx); 3987 struct ixgbe_hw *hw; 3988 ixgbe_link_speed speed = 0; 3989 ixgbe_link_speed link_caps = 0; 3990 s32 err = IXGBE_NOT_IMPLEMENTED; 3991 bool negotiate = FALSE; 3992 3993 /* Checks to validate new value */ 3994 if (adapter->advertise == advertise) /* no change */ 3995 return (0); 3996 3997 hw = &adapter->hw; 3998 3999 /* No speed changes for backplane media */ 4000 if (hw->phy.media_type == ixgbe_media_type_backplane) 4001 return (ENODEV); 4002 4003 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4004 (hw->phy.multispeed_fiber))) { 4005 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4006 return (EINVAL); 4007 } 4008 4009 if (advertise < 0x1 || advertise > 0xF) { 4010 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n"); 4011 return (EINVAL); 4012 } 4013 4014 if (hw->mac.ops.get_link_capabilities) { 4015 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4016 &negotiate); 4017 if (err != IXGBE_SUCCESS) { 4018 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4019 return (ENODEV); 4020 } 4021 } 4022 4023 /* Set new value and report new advertised mode */ 4024 if (advertise & 0x1) { 4025 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4026 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4027 return (EINVAL); 4028 } 4029 speed |= IXGBE_LINK_SPEED_100_FULL; 4030 } 4031 if (advertise & 0x2) { 4032 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4033 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4034 return (EINVAL); 4035 } 4036 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4037 } 4038 if (advertise & 0x4) { 4039 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4040 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4041 return (EINVAL); 4042 } 4043 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4044 } 4045 if (advertise & 0x8) { 4046 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4047 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4048 return (EINVAL); 4049 } 4050 speed |= IXGBE_LINK_SPEED_10_FULL; 4051 } 4052 4053 hw->mac.autotry_restart = TRUE; 4054 hw->mac.ops.setup_link(hw, speed, TRUE); 4055 adapter->advertise = advertise; 4056 4057 return (0); 4058 } /* ixgbe_set_advertise */ 4059 4060 /************************************************************************ 4061 * ixgbe_get_advertise - Get current advertised speed settings 4062 * 4063 * Formatted for sysctl usage. 4064 * Flags: 4065 * 0x1 - advertise 100 Mb 4066 * 0x2 - advertise 1G 4067 * 0x4 - advertise 10G 4068 * 0x8 - advertise 10 Mb (yes, Mb) 4069 ************************************************************************/ 4070 static int 4071 ixgbe_get_advertise(struct adapter *adapter) 4072 { 4073 struct ixgbe_hw *hw = &adapter->hw; 4074 int speed; 4075 ixgbe_link_speed link_caps = 0; 4076 s32 err; 4077 bool negotiate = FALSE; 4078 4079 /* 4080 * Advertised speed means nothing unless it's copper or 4081 * multi-speed fiber 4082 */ 4083 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4084 !(hw->phy.multispeed_fiber)) 4085 return (0); 4086 4087 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4088 if (err != IXGBE_SUCCESS) 4089 return (0); 4090 4091 speed = 4092 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 4093 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 4094 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 4095 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 4096 4097 return speed; 4098 } /* ixgbe_get_advertise */ 4099 4100 /************************************************************************ 4101 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4102 * 4103 * Control values: 4104 * 0/1 - off / on (use default value of 1000) 4105 * 4106 * Legal timer values are: 4107 * 50,100,250,500,1000,2000,5000,10000 4108 * 4109 * Turning off interrupt moderation will also turn this off. 4110 ************************************************************************/ 4111 static int 4112 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4113 { 4114 struct adapter *adapter = (struct adapter *)arg1; 4115 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4116 int error; 4117 u16 newval; 4118 4119 newval = adapter->dmac; 4120 error = sysctl_handle_16(oidp, &newval, 0, req); 4121 if ((error) || (req->newptr == NULL)) 4122 return (error); 4123 4124 switch (newval) { 4125 case 0: 4126 /* Disabled */ 4127 adapter->dmac = 0; 4128 break; 4129 case 1: 4130 /* Enable and use default */ 4131 adapter->dmac = 1000; 4132 break; 4133 case 50: 4134 case 100: 4135 case 250: 4136 case 500: 4137 case 1000: 4138 case 2000: 4139 case 5000: 4140 case 10000: 4141 /* Legal values - allow */ 4142 adapter->dmac = newval; 4143 break; 4144 default: 4145 /* Do nothing, illegal value */ 4146 return (EINVAL); 4147 } 4148 4149 /* Re-initialize hardware if it's already running */ 4150 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4151 ifp->if_init(ifp); 4152 4153 return (0); 4154 } /* ixgbe_sysctl_dmac */ 4155 4156 #ifdef IXGBE_DEBUG 4157 /************************************************************************ 4158 * ixgbe_sysctl_power_state 4159 * 4160 * Sysctl to test power states 4161 * Values: 4162 * 0 - set device to D0 4163 * 3 - set device to D3 4164 * (none) - get current device power state 4165 ************************************************************************/ 4166 static int 4167 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4168 { 4169 struct adapter *adapter = (struct adapter *)arg1; 4170 device_t dev = adapter->dev; 4171 int curr_ps, new_ps, error = 0; 4172 4173 curr_ps = new_ps = pci_get_powerstate(dev); 4174 4175 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4176 if ((error) || (req->newptr == NULL)) 4177 return (error); 4178 4179 if (new_ps == curr_ps) 4180 return (0); 4181 4182 if (new_ps == 3 && curr_ps == 0) 4183 error = DEVICE_SUSPEND(dev); 4184 else if (new_ps == 0 && curr_ps == 3) 4185 error = DEVICE_RESUME(dev); 4186 else 4187 return (EINVAL); 4188 4189 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4190 4191 return (error); 4192 } /* ixgbe_sysctl_power_state */ 4193 #endif 4194 4195 /************************************************************************ 4196 * ixgbe_sysctl_wol_enable 4197 * 4198 * Sysctl to enable/disable the WoL capability, 4199 * if supported by the adapter. 4200 * 4201 * Values: 4202 * 0 - disabled 4203 * 1 - enabled 4204 ************************************************************************/ 4205 static int 4206 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4207 { 4208 struct adapter *adapter = (struct adapter *)arg1; 4209 struct ixgbe_hw *hw = &adapter->hw; 4210 int new_wol_enabled; 4211 int error = 0; 4212 4213 new_wol_enabled = hw->wol_enabled; 4214 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4215 if ((error) || (req->newptr == NULL)) 4216 return (error); 4217 new_wol_enabled = !!(new_wol_enabled); 4218 if (new_wol_enabled == hw->wol_enabled) 4219 return (0); 4220 4221 if (new_wol_enabled > 0 && !adapter->wol_support) 4222 return (ENODEV); 4223 else 4224 hw->wol_enabled = new_wol_enabled; 4225 4226 return (0); 4227 } /* ixgbe_sysctl_wol_enable */ 4228 4229 /************************************************************************ 4230 * ixgbe_sysctl_wufc - Wake Up Filter Control 4231 * 4232 * Sysctl to enable/disable the types of packets that the 4233 * adapter will wake up on upon receipt. 4234 * Flags: 4235 * 0x1 - Link Status Change 4236 * 0x2 - Magic Packet 4237 * 0x4 - Direct Exact 4238 * 0x8 - Directed Multicast 4239 * 0x10 - Broadcast 4240 * 0x20 - ARP/IPv4 Request Packet 4241 * 0x40 - Direct IPv4 Packet 4242 * 0x80 - Direct IPv6 Packet 4243 * 4244 * Settings not listed above will cause the sysctl to return an error. 4245 ************************************************************************/ 4246 static int 4247 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4248 { 4249 struct adapter *adapter = (struct adapter *)arg1; 4250 int error = 0; 4251 u32 new_wufc; 4252 4253 new_wufc = adapter->wufc; 4254 4255 error = sysctl_handle_32(oidp, &new_wufc, 0, req); 4256 if ((error) || (req->newptr == NULL)) 4257 return (error); 4258 if (new_wufc == adapter->wufc) 4259 return (0); 4260 4261 if (new_wufc & 0xffffff00) 4262 return (EINVAL); 4263 4264 new_wufc &= 0xff; 4265 new_wufc |= (0xffffff & adapter->wufc); 4266 adapter->wufc = new_wufc; 4267 4268 return (0); 4269 } /* ixgbe_sysctl_wufc */ 4270 4271 #ifdef IXGBE_DEBUG 4272 /************************************************************************ 4273 * ixgbe_sysctl_print_rss_config 4274 ************************************************************************/ 4275 static int 4276 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4277 { 4278 struct adapter *adapter = (struct adapter *)arg1; 4279 struct ixgbe_hw *hw = &adapter->hw; 4280 device_t dev = adapter->dev; 4281 struct sbuf *buf; 4282 int error = 0, reta_size; 4283 u32 reg; 4284 4285 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4286 if (!buf) { 4287 device_printf(dev, "Could not allocate sbuf for output.\n"); 4288 return (ENOMEM); 4289 } 4290 4291 // TODO: use sbufs to make a string to print out 4292 /* Set multiplier for RETA setup and table size based on MAC */ 4293 switch (adapter->hw.mac.type) { 4294 case ixgbe_mac_X550: 4295 case ixgbe_mac_X550EM_x: 4296 case ixgbe_mac_X550EM_a: 4297 reta_size = 128; 4298 break; 4299 default: 4300 reta_size = 32; 4301 break; 4302 } 4303 4304 /* Print out the redirection table */ 4305 sbuf_cat(buf, "\n"); 4306 for (int i = 0; i < reta_size; i++) { 4307 if (i < 32) { 4308 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4309 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4310 } else { 4311 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4312 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4313 } 4314 } 4315 4316 // TODO: print more config 4317 4318 error = sbuf_finish(buf); 4319 if (error) 4320 device_printf(dev, "Error finishing sbuf: %d\n", error); 4321 4322 sbuf_delete(buf); 4323 4324 return (0); 4325 } /* ixgbe_sysctl_print_rss_config */ 4326 #endif /* IXGBE_DEBUG */ 4327 4328 /************************************************************************ 4329 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4330 * 4331 * For X552/X557-AT devices using an external PHY 4332 ************************************************************************/ 4333 static int 4334 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4335 { 4336 struct adapter *adapter = (struct adapter *)arg1; 4337 struct ixgbe_hw *hw = &adapter->hw; 4338 u16 reg; 4339 4340 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4341 device_printf(iflib_get_dev(adapter->ctx), 4342 "Device has no supported external thermal sensor.\n"); 4343 return (ENODEV); 4344 } 4345 4346 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4347 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4348 device_printf(iflib_get_dev(adapter->ctx), 4349 "Error reading from PHY's current temperature register\n"); 4350 return (EAGAIN); 4351 } 4352 4353 /* Shift temp for output */ 4354 reg = reg >> 8; 4355 4356 return (sysctl_handle_16(oidp, NULL, reg, req)); 4357 } /* ixgbe_sysctl_phy_temp */ 4358 4359 /************************************************************************ 4360 * ixgbe_sysctl_phy_overtemp_occurred 4361 * 4362 * Reports (directly from the PHY) whether the current PHY 4363 * temperature is over the overtemp threshold. 4364 ************************************************************************/ 4365 static int 4366 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4367 { 4368 struct adapter *adapter = (struct adapter *)arg1; 4369 struct ixgbe_hw *hw = &adapter->hw; 4370 u16 reg; 4371 4372 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4373 device_printf(iflib_get_dev(adapter->ctx), 4374 "Device has no supported external thermal sensor.\n"); 4375 return (ENODEV); 4376 } 4377 4378 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4379 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4380 device_printf(iflib_get_dev(adapter->ctx), 4381 "Error reading from PHY's temperature status register\n"); 4382 return (EAGAIN); 4383 } 4384 4385 /* Get occurrence bit */ 4386 reg = !!(reg & 0x4000); 4387 4388 return (sysctl_handle_16(oidp, 0, reg, req)); 4389 } /* ixgbe_sysctl_phy_overtemp_occurred */ 4390 4391 /************************************************************************ 4392 * ixgbe_sysctl_eee_state 4393 * 4394 * Sysctl to set EEE power saving feature 4395 * Values: 4396 * 0 - disable EEE 4397 * 1 - enable EEE 4398 * (none) - get current device EEE state 4399 ************************************************************************/ 4400 static int 4401 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4402 { 4403 struct adapter *adapter = (struct adapter *)arg1; 4404 device_t dev = adapter->dev; 4405 struct ifnet *ifp = iflib_get_ifp(adapter->ctx); 4406 int curr_eee, new_eee, error = 0; 4407 s32 retval; 4408 4409 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE); 4410 4411 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4412 if ((error) || (req->newptr == NULL)) 4413 return (error); 4414 4415 /* Nothing to do */ 4416 if (new_eee == curr_eee) 4417 return (0); 4418 4419 /* Not supported */ 4420 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE)) 4421 return (EINVAL); 4422 4423 /* Bounds checking */ 4424 if ((new_eee < 0) || (new_eee > 1)) 4425 return (EINVAL); 4426 4427 retval = ixgbe_setup_eee(&adapter->hw, new_eee); 4428 if (retval) { 4429 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4430 return (EINVAL); 4431 } 4432 4433 /* Restart auto-neg */ 4434 ifp->if_init(ifp); 4435 4436 device_printf(dev, "New EEE state: %d\n", new_eee); 4437 4438 /* Cache new value */ 4439 if (new_eee) 4440 adapter->feat_en |= IXGBE_FEATURE_EEE; 4441 else 4442 adapter->feat_en &= ~IXGBE_FEATURE_EEE; 4443 4444 return (error); 4445 } /* ixgbe_sysctl_eee_state */ 4446 4447 /************************************************************************ 4448 * ixgbe_init_device_features 4449 ************************************************************************/ 4450 static void 4451 ixgbe_init_device_features(struct adapter *adapter) 4452 { 4453 adapter->feat_cap = IXGBE_FEATURE_NETMAP 4454 | IXGBE_FEATURE_RSS 4455 | IXGBE_FEATURE_MSI 4456 | IXGBE_FEATURE_MSIX 4457 | IXGBE_FEATURE_LEGACY_IRQ; 4458 4459 /* Set capabilities first... */ 4460 switch (adapter->hw.mac.type) { 4461 case ixgbe_mac_82598EB: 4462 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT) 4463 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4464 break; 4465 case ixgbe_mac_X540: 4466 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4467 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4468 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4469 (adapter->hw.bus.func == 0)) 4470 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4471 break; 4472 case ixgbe_mac_X550: 4473 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4474 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4475 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4476 break; 4477 case ixgbe_mac_X550EM_x: 4478 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4479 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4480 break; 4481 case ixgbe_mac_X550EM_a: 4482 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4483 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4484 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4485 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4486 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4487 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4488 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4489 } 4490 break; 4491 case ixgbe_mac_82599EB: 4492 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4493 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4494 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4495 (adapter->hw.bus.func == 0)) 4496 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4497 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4498 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4499 break; 4500 default: 4501 break; 4502 } 4503 4504 /* Enabled by default... */ 4505 /* Fan failure detection */ 4506 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4507 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4508 /* Netmap */ 4509 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 4510 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 4511 /* EEE */ 4512 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4513 adapter->feat_en |= IXGBE_FEATURE_EEE; 4514 /* Thermal Sensor */ 4515 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4516 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4517 4518 /* Enabled via global sysctl... */ 4519 /* Flow Director */ 4520 if (ixgbe_enable_fdir) { 4521 if (adapter->feat_cap & IXGBE_FEATURE_FDIR) 4522 adapter->feat_en |= IXGBE_FEATURE_FDIR; 4523 else 4524 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled."); 4525 } 4526 /* 4527 * Message Signal Interrupts - Extended (MSI-X) 4528 * Normal MSI is only enabled if MSI-X calls fail. 4529 */ 4530 if (!ixgbe_enable_msix) 4531 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX; 4532 /* Receive-Side Scaling (RSS) */ 4533 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4534 adapter->feat_en |= IXGBE_FEATURE_RSS; 4535 4536 /* Disable features with unmet dependencies... */ 4537 /* No MSI-X */ 4538 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) { 4539 adapter->feat_cap &= ~IXGBE_FEATURE_RSS; 4540 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4541 adapter->feat_en &= ~IXGBE_FEATURE_RSS; 4542 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; 4543 } 4544 } /* ixgbe_init_device_features */ 4545 4546 /************************************************************************ 4547 * ixgbe_check_fan_failure 4548 ************************************************************************/ 4549 static void 4550 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt) 4551 { 4552 u32 mask; 4553 4554 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) : 4555 IXGBE_ESDP_SDP1; 4556 4557 if (reg & mask) 4558 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4559 } /* ixgbe_check_fan_failure */ 4560