xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 48ddd1b9f88753c6875566fbb67bc622453e4993)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37 
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41 
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44 
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 static const char ixgbe_driver_version[] = "4.0.1-k";
49 
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixgbe_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) 82598EB AF (Dual Fiber)"),
62   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) 82598EB AF (Fiber)"),
63   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) 82598EB AT (CX4)"),
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) 82598EB AT"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) 82598EB AT2"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) 82598"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) 82598EB AF DA (Dual Fiber)"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) 82598EB AT (Dual CX4)"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) 82598EB AF (Dual Fiber LR)"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) 82598EB AF (Dual Fiber SR)"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) 82598EB LOM"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) X520 82599 (KX4)"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) X520 82599 (KX4 Mezzanine)"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) X520 82599ES (SFI/SFP+)"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) X520 82599 (XAUI/BX4)"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) X520 82599 (Dual CX4)"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) X520-T 82599 LOM"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,  "Intel(R) X520 82599 LS"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) X520 82599 (Combined Backplane)"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) X520 82599 (Backplane w/FCoE)"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) X520 82599 (Dual SFP+)"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) X520-1 82599EN (SFP+)"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) X520-4 82599 (Quad SFP+)"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) X520-Q1 82599 (QSFP+)"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) X540-AT2"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) X540-T1"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) X550-T2"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) X552 (KR Backplane)"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) X552 (KX4 Backplane)"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) X552/X557-AT (10GBASE-T)"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) X552 (1000BASE-T)"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
106 	/* required last entry */
107   PVID_END
108 };
109 
110 static void *ixgbe_register(device_t);
111 static int  ixgbe_if_attach_pre(if_ctx_t);
112 static int  ixgbe_if_attach_post(if_ctx_t);
113 static int  ixgbe_if_detach(if_ctx_t);
114 static int  ixgbe_if_shutdown(if_ctx_t);
115 static int  ixgbe_if_suspend(if_ctx_t);
116 static int  ixgbe_if_resume(if_ctx_t);
117 
118 static void ixgbe_if_stop(if_ctx_t);
119 void ixgbe_if_enable_intr(if_ctx_t);
120 static void ixgbe_if_disable_intr(if_ctx_t);
121 static void ixgbe_link_intr_enable(if_ctx_t);
122 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
124 static int  ixgbe_if_media_change(if_ctx_t);
125 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
126 static int  ixgbe_if_mtu_set(if_ctx_t, uint32_t);
127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
128 static void ixgbe_if_multi_set(if_ctx_t);
129 static int  ixgbe_if_promisc_set(if_ctx_t, int);
130 static int  ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
131 static int  ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
132 static void ixgbe_if_queues_free(if_ctx_t);
133 static void ixgbe_if_timer(if_ctx_t, uint16_t);
134 static void ixgbe_if_update_admin_status(if_ctx_t);
135 static void ixgbe_if_vlan_register(if_ctx_t, u16);
136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
137 static int  ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
139 int ixgbe_intr(void *);
140 
141 /************************************************************************
142  * Function prototypes
143  ************************************************************************/
144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
145 
146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
148 static void ixgbe_add_device_sysctls(if_ctx_t);
149 static int  ixgbe_allocate_pci_resources(if_ctx_t);
150 static int  ixgbe_setup_low_power_mode(if_ctx_t);
151 
152 static void ixgbe_config_dmac(struct ixgbe_softc *);
153 static void ixgbe_configure_ivars(struct ixgbe_softc *);
154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
155 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
156 static bool ixgbe_sfp_probe(if_ctx_t);
157 
158 static void ixgbe_free_pci_resources(if_ctx_t);
159 
160 static int  ixgbe_msix_link(void *);
161 static int  ixgbe_msix_que(void *);
162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
163 static void ixgbe_initialize_receive_units(if_ctx_t);
164 static void ixgbe_initialize_transmit_units(if_ctx_t);
165 
166 static int  ixgbe_setup_interface(if_ctx_t);
167 static void ixgbe_init_device_features(struct ixgbe_softc *);
168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
170 static void ixgbe_print_fw_version(if_ctx_t);
171 static void ixgbe_add_media_types(if_ctx_t);
172 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
173 static void ixgbe_config_link(if_ctx_t);
174 static void ixgbe_get_slot_info(struct ixgbe_softc *);
175 static void ixgbe_fw_mode_timer(void *);
176 static void ixgbe_check_wol_support(struct ixgbe_softc *);
177 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
178 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
179 
180 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
181 static int  ixgbe_set_flowcntl(struct ixgbe_softc *, int);
182 static int  ixgbe_set_advertise(struct ixgbe_softc *, int);
183 static int  ixgbe_get_default_advertise(struct ixgbe_softc *);
184 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
185 static void ixgbe_config_gpie(struct ixgbe_softc *);
186 static void ixgbe_config_delay_values(struct ixgbe_softc *);
187 
188 /* Sysctl handlers */
189 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
190 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
191 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
192 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
196 #ifdef IXGBE_DEBUG
197 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
198 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
199 #endif
200 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
201 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
202 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
207 
208 /* Deferred interrupt tasklets */
209 static void ixgbe_handle_msf(void *);
210 static void ixgbe_handle_mod(void *);
211 static void ixgbe_handle_phy(void *);
212 
213 /************************************************************************
214  *  FreeBSD Device Interface Entry Points
215  ************************************************************************/
216 static device_method_t ix_methods[] = {
217 	/* Device interface */
218 	DEVMETHOD(device_register, ixgbe_register),
219 	DEVMETHOD(device_probe, iflib_device_probe),
220 	DEVMETHOD(device_attach, iflib_device_attach),
221 	DEVMETHOD(device_detach, iflib_device_detach),
222 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
223 	DEVMETHOD(device_suspend, iflib_device_suspend),
224 	DEVMETHOD(device_resume, iflib_device_resume),
225 #ifdef PCI_IOV
226 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
227 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
228 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
229 #endif /* PCI_IOV */
230 	DEVMETHOD_END
231 };
232 
233 static driver_t ix_driver = {
234 	"ix", ix_methods, sizeof(struct ixgbe_softc),
235 };
236 
237 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
238 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
239 MODULE_DEPEND(ix, pci, 1, 1, 1);
240 MODULE_DEPEND(ix, ether, 1, 1, 1);
241 MODULE_DEPEND(ix, iflib, 1, 1, 1);
242 
243 static device_method_t ixgbe_if_methods[] = {
244 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
245 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
246 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
247 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
248 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
249 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
250 	DEVMETHOD(ifdi_init, ixgbe_if_init),
251 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
252 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
253 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
254 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
255 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
256 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
257 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
259 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
260 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
261 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
262 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
263 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
264 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
265 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
266 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
267 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
268 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
269 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
270 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
271 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
272 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
273 	DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
274 #ifdef PCI_IOV
275 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
278 #endif /* PCI_IOV */
279 	DEVMETHOD_END
280 };
281 
282 /*
283  * TUNEABLE PARAMETERS:
284  */
285 
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
287     "IXGBE driver parameters");
288 static driver_t ixgbe_if_driver = {
289   "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
290 };
291 
292 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
293 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
294     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
295 
296 /* Flow control setting, default to full */
297 static int ixgbe_flow_control = ixgbe_fc_full;
298 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
299     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
300 
301 /* Advertise Speed, default to 0 (auto) */
302 static int ixgbe_advertise_speed = 0;
303 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
304     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
305 
306 /*
307  * Smart speed setting, default to on
308  * this only works as a compile option
309  * right now as its during attach, set
310  * this to 'ixgbe_smart_speed_off' to
311  * disable.
312  */
313 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
314 
315 /*
316  * MSI-X should be the default for best performance,
317  * but this allows it to be forced off for testing.
318  */
319 static int ixgbe_enable_msix = 1;
320 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
321     "Enable MSI-X interrupts");
322 
323 /*
324  * Defining this on will allow the use
325  * of unsupported SFP+ modules, note that
326  * doing so you are on your own :)
327  */
328 static int allow_unsupported_sfp = false;
329 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
330     &allow_unsupported_sfp, 0,
331     "Allow unsupported SFP modules...use at your own risk");
332 
333 /*
334  * Not sure if Flow Director is fully baked,
335  * so we'll default to turning it off.
336  */
337 static int ixgbe_enable_fdir = 0;
338 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
339     "Enable Flow Director");
340 
341 /* Receive-Side Scaling */
342 static int ixgbe_enable_rss = 1;
343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
344     "Enable Receive-Side Scaling (RSS)");
345 
346 /*
347  * AIM: Adaptive Interrupt Moderation
348  * which means that the interrupt rate
349  * is varied over time based on the
350  * traffic for that interrupt vector
351  */
352 static int ixgbe_enable_aim = false;
353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
354     "Enable adaptive interrupt moderation");
355 
356 #if 0
357 /* Keep running tab on them for sanity check */
358 static int ixgbe_total_ports;
359 #endif
360 
361 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
362 
363 /*
364  * For Flow Director: this is the number of TX packets we sample
365  * for the filter pool, this means every 20th packet will be probed.
366  *
367  * This feature can be disabled by setting this to 0.
368  */
369 static int atr_sample_rate = 20;
370 
371 extern struct if_txrx ixgbe_txrx;
372 
373 static struct if_shared_ctx ixgbe_sctx_init = {
374 	.isc_magic = IFLIB_MAGIC,
375 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
376 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
377 	.isc_tx_maxsegsize = PAGE_SIZE,
378 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
379 	.isc_tso_maxsegsize = PAGE_SIZE,
380 	.isc_rx_maxsize = PAGE_SIZE*4,
381 	.isc_rx_nsegments = 1,
382 	.isc_rx_maxsegsize = PAGE_SIZE*4,
383 	.isc_nfl = 1,
384 	.isc_ntxqs = 1,
385 	.isc_nrxqs = 1,
386 
387 	.isc_admin_intrcnt = 1,
388 	.isc_vendor_info = ixgbe_vendor_info_array,
389 	.isc_driver_version = ixgbe_driver_version,
390 	.isc_driver = &ixgbe_if_driver,
391 	.isc_flags = IFLIB_TSO_INIT_IP,
392 
393 	.isc_nrxd_min = {MIN_RXD},
394 	.isc_ntxd_min = {MIN_TXD},
395 	.isc_nrxd_max = {MAX_RXD},
396 	.isc_ntxd_max = {MAX_TXD},
397 	.isc_nrxd_default = {DEFAULT_RXD},
398 	.isc_ntxd_default = {DEFAULT_TXD},
399 };
400 
401 /************************************************************************
402  * ixgbe_if_tx_queues_alloc
403  ************************************************************************/
404 static int
405 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
406     int ntxqs, int ntxqsets)
407 {
408 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
409 	if_softc_ctx_t     scctx = sc->shared;
410 	struct ix_tx_queue *que;
411 	int                i, j, error;
412 
413 	MPASS(sc->num_tx_queues > 0);
414 	MPASS(sc->num_tx_queues == ntxqsets);
415 	MPASS(ntxqs == 1);
416 
417 	/* Allocate queue structure memory */
418 	sc->tx_queues =
419 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
420 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
421 	if (!sc->tx_queues) {
422 		device_printf(iflib_get_dev(ctx),
423 		    "Unable to allocate TX ring memory\n");
424 		return (ENOMEM);
425 	}
426 
427 	for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
428 		struct tx_ring *txr = &que->txr;
429 
430 		/* In case SR-IOV is enabled, align the index properly */
431 		txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
432 		    i);
433 
434 		txr->sc = que->sc = sc;
435 
436 		/* Allocate report status array */
437 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
438 		if (txr->tx_rsq == NULL) {
439 			error = ENOMEM;
440 			goto fail;
441 		}
442 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
443 			txr->tx_rsq[j] = QIDX_INVALID;
444 		/* get the virtual and physical address of the hardware queues */
445 		txr->tail = IXGBE_TDT(txr->me);
446 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
447 		txr->tx_paddr = paddrs[i];
448 
449 		txr->bytes = 0;
450 		txr->total_packets = 0;
451 
452 		/* Set the rate at which we sample packets */
453 		if (sc->feat_en & IXGBE_FEATURE_FDIR)
454 			txr->atr_sample = atr_sample_rate;
455 
456 	}
457 
458 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
459 	    sc->num_tx_queues);
460 
461 	return (0);
462 
463 fail:
464 	ixgbe_if_queues_free(ctx);
465 
466 	return (error);
467 } /* ixgbe_if_tx_queues_alloc */
468 
469 /************************************************************************
470  * ixgbe_if_rx_queues_alloc
471  ************************************************************************/
472 static int
473 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
474     int nrxqs, int nrxqsets)
475 {
476 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
477 	struct ix_rx_queue *que;
478 	int                i;
479 
480 	MPASS(sc->num_rx_queues > 0);
481 	MPASS(sc->num_rx_queues == nrxqsets);
482 	MPASS(nrxqs == 1);
483 
484 	/* Allocate queue structure memory */
485 	sc->rx_queues =
486 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
487 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
488 	if (!sc->rx_queues) {
489 		device_printf(iflib_get_dev(ctx),
490 		    "Unable to allocate TX ring memory\n");
491 		return (ENOMEM);
492 	}
493 
494 	for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
495 		struct rx_ring *rxr = &que->rxr;
496 
497 		/* In case SR-IOV is enabled, align the index properly */
498 		rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
499 		    i);
500 
501 		rxr->sc = que->sc = sc;
502 
503 		/* get the virtual and physical address of the hw queues */
504 		rxr->tail = IXGBE_RDT(rxr->me);
505 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
506 		rxr->rx_paddr = paddrs[i];
507 		rxr->bytes = 0;
508 		rxr->que = que;
509 	}
510 
511 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
512 	    sc->num_rx_queues);
513 
514 	return (0);
515 } /* ixgbe_if_rx_queues_alloc */
516 
517 /************************************************************************
518  * ixgbe_if_queues_free
519  ************************************************************************/
520 static void
521 ixgbe_if_queues_free(if_ctx_t ctx)
522 {
523 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
524 	struct ix_tx_queue *tx_que = sc->tx_queues;
525 	struct ix_rx_queue *rx_que = sc->rx_queues;
526 	int                i;
527 
528 	if (tx_que != NULL) {
529 		for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
530 			struct tx_ring *txr = &tx_que->txr;
531 			if (txr->tx_rsq == NULL)
532 				break;
533 
534 			free(txr->tx_rsq, M_IXGBE);
535 			txr->tx_rsq = NULL;
536 		}
537 
538 		free(sc->tx_queues, M_IXGBE);
539 		sc->tx_queues = NULL;
540 	}
541 	if (rx_que != NULL) {
542 		free(sc->rx_queues, M_IXGBE);
543 		sc->rx_queues = NULL;
544 	}
545 } /* ixgbe_if_queues_free */
546 
547 /************************************************************************
548  * ixgbe_initialize_rss_mapping
549  ************************************************************************/
550 static void
551 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
552 {
553 	struct ixgbe_hw *hw = &sc->hw;
554 	u32             reta = 0, mrqc, rss_key[10];
555 	int             queue_id, table_size, index_mult;
556 	int             i, j;
557 	u32             rss_hash_config;
558 
559 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
560 		/* Fetch the configured RSS key */
561 		rss_getkey((uint8_t *)&rss_key);
562 	} else {
563 		/* set up random bits */
564 		arc4rand(&rss_key, sizeof(rss_key), 0);
565 	}
566 
567 	/* Set multiplier for RETA setup and table size based on MAC */
568 	index_mult = 0x1;
569 	table_size = 128;
570 	switch (sc->hw.mac.type) {
571 	case ixgbe_mac_82598EB:
572 		index_mult = 0x11;
573 		break;
574 	case ixgbe_mac_X550:
575 	case ixgbe_mac_X550EM_x:
576 	case ixgbe_mac_X550EM_a:
577 		table_size = 512;
578 		break;
579 	default:
580 		break;
581 	}
582 
583 	/* Set up the redirection table */
584 	for (i = 0, j = 0; i < table_size; i++, j++) {
585 		if (j == sc->num_rx_queues)
586 			j = 0;
587 
588 		if (sc->feat_en & IXGBE_FEATURE_RSS) {
589 			/*
590 			 * Fetch the RSS bucket id for the given indirection
591 			 * entry. Cap it at the number of configured buckets
592 			 * (which is num_rx_queues.)
593 			 */
594 			queue_id = rss_get_indirection_to_bucket(i);
595 			queue_id = queue_id % sc->num_rx_queues;
596 		} else
597 			queue_id = (j * index_mult);
598 
599 		/*
600 		 * The low 8 bits are for hash value (n+0);
601 		 * The next 8 bits are for hash value (n+1), etc.
602 		 */
603 		reta = reta >> 8;
604 		reta = reta | (((uint32_t)queue_id) << 24);
605 		if ((i & 3) == 3) {
606 			if (i < 128)
607 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
608 			else
609 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
610 				    reta);
611 			reta = 0;
612 		}
613 	}
614 
615 	/* Now fill our hash function seeds */
616 	for (i = 0; i < 10; i++)
617 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
618 
619 	/* Perform hash on these packet types */
620 	if (sc->feat_en & IXGBE_FEATURE_RSS)
621 		rss_hash_config = rss_gethashconfig();
622 	else {
623 		/*
624 		 * Disable UDP - IP fragments aren't currently being handled
625 		 * and so we end up with a mix of 2-tuple and 4-tuple
626 		 * traffic.
627 		 */
628 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
629 		                | RSS_HASHTYPE_RSS_TCP_IPV4
630 		                | RSS_HASHTYPE_RSS_IPV6
631 		                | RSS_HASHTYPE_RSS_TCP_IPV6
632 		                | RSS_HASHTYPE_RSS_IPV6_EX
633 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
634 	}
635 
636 	mrqc = IXGBE_MRQC_RSSEN;
637 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
638 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
639 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
640 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
649 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
650 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
651 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
652 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
653 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
654 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
655 	mrqc |= ixgbe_get_mrqc(sc->iov_mode);
656 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
657 } /* ixgbe_initialize_rss_mapping */
658 
659 /************************************************************************
660  * ixgbe_initialize_receive_units - Setup receive registers and features.
661  ************************************************************************/
662 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
663 
664 static void
665 ixgbe_initialize_receive_units(if_ctx_t ctx)
666 {
667 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
668 	if_softc_ctx_t     scctx = sc->shared;
669 	struct ixgbe_hw    *hw = &sc->hw;
670 	if_t               ifp = iflib_get_ifp(ctx);
671 	struct ix_rx_queue *que;
672 	int                i, j;
673 	u32                bufsz, fctrl, srrctl, rxcsum;
674 	u32                hlreg;
675 
676 	/*
677 	 * Make sure receives are disabled while
678 	 * setting up the descriptor ring
679 	 */
680 	ixgbe_disable_rx(hw);
681 
682 	/* Enable broadcasts */
683 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
684 	fctrl |= IXGBE_FCTRL_BAM;
685 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
686 		fctrl |= IXGBE_FCTRL_DPF;
687 		fctrl |= IXGBE_FCTRL_PMCF;
688 	}
689 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
690 
691 	/* Set for Jumbo Frames? */
692 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
693 	if (if_getmtu(ifp) > ETHERMTU)
694 		hlreg |= IXGBE_HLREG0_JUMBOEN;
695 	else
696 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
697 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
698 
699 	bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
700 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
701 
702 	/* Setup the Base and Length of the Rx Descriptor Ring */
703 	for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
704 		struct rx_ring *rxr = &que->rxr;
705 		u64            rdba = rxr->rx_paddr;
706 
707 		j = rxr->me;
708 
709 		/* Setup the Base and Length of the Rx Descriptor Ring */
710 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
711 		    (rdba & 0x00000000ffffffffULL));
712 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
713 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
714 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
715 
716 		/* Set up the SRRCTL register */
717 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
718 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
719 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
720 		srrctl |= bufsz;
721 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
722 
723 		/*
724 		 * Set DROP_EN iff we have no flow control and >1 queue.
725 		 * Note that srrctl was cleared shortly before during reset,
726 		 * so we do not need to clear the bit, but do it just in case
727 		 * this code is moved elsewhere.
728 		 */
729 		if (sc->num_rx_queues > 1 &&
730 		    sc->hw.fc.requested_mode == ixgbe_fc_none) {
731 			srrctl |= IXGBE_SRRCTL_DROP_EN;
732 		} else {
733 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
734 		}
735 
736 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
737 
738 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
739 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
740 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
741 
742 		/* Set the driver rx tail address */
743 		rxr->tail =  IXGBE_RDT(rxr->me);
744 	}
745 
746 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
747 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
748 		            | IXGBE_PSRTYPE_UDPHDR
749 		            | IXGBE_PSRTYPE_IPV4HDR
750 		            | IXGBE_PSRTYPE_IPV6HDR;
751 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
752 	}
753 
754 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
755 
756 	ixgbe_initialize_rss_mapping(sc);
757 
758 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
759 		/* RSS and RX IPP Checksum are mutually exclusive */
760 		rxcsum |= IXGBE_RXCSUM_PCSD;
761 	}
762 
763 	if (if_getcapenable(ifp) & IFCAP_RXCSUM)
764 		rxcsum |= IXGBE_RXCSUM_PCSD;
765 
766 	/* This is useful for calculating UDP/IP fragment checksums */
767 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
768 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
769 
770 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
771 
772 } /* ixgbe_initialize_receive_units */
773 
774 /************************************************************************
775  * ixgbe_initialize_transmit_units - Enable transmit units.
776  ************************************************************************/
777 static void
778 ixgbe_initialize_transmit_units(if_ctx_t ctx)
779 {
780 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
781 	struct ixgbe_hw    *hw = &sc->hw;
782 	if_softc_ctx_t     scctx = sc->shared;
783 	struct ix_tx_queue *que;
784 	int i;
785 
786 	/* Setup the Base and Length of the Tx Descriptor Ring */
787 	for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
788 	    i++, que++) {
789 		struct tx_ring	   *txr = &que->txr;
790 		u64 tdba = txr->tx_paddr;
791 		u32 txctrl = 0;
792 		int j = txr->me;
793 
794 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
795 		    (tdba & 0x00000000ffffffffULL));
796 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
797 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
798 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
799 
800 		/* Setup the HW Tx Head and Tail descriptor pointers */
801 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
802 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
803 
804 		/* Cache the tail address */
805 		txr->tail = IXGBE_TDT(txr->me);
806 
807 		txr->tx_rs_cidx = txr->tx_rs_pidx;
808 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
809 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
810 			txr->tx_rsq[k] = QIDX_INVALID;
811 
812 		/* Disable Head Writeback */
813 		/*
814 		 * Note: for X550 series devices, these registers are actually
815 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
816 		 * fields remain the same.
817 		 */
818 		switch (hw->mac.type) {
819 		case ixgbe_mac_82598EB:
820 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
821 			break;
822 		default:
823 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
824 			break;
825 		}
826 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
827 		switch (hw->mac.type) {
828 		case ixgbe_mac_82598EB:
829 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
830 			break;
831 		default:
832 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
833 			break;
834 		}
835 
836 	}
837 
838 	if (hw->mac.type != ixgbe_mac_82598EB) {
839 		u32 dmatxctl, rttdcs;
840 
841 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
842 		dmatxctl |= IXGBE_DMATXCTL_TE;
843 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
844 		/* Disable arbiter to set MTQC */
845 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
846 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
847 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
848 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
849 		    ixgbe_get_mtqc(sc->iov_mode));
850 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
851 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
852 	}
853 
854 } /* ixgbe_initialize_transmit_units */
855 
856 /************************************************************************
857  * ixgbe_register
858  ************************************************************************/
859 static void *
860 ixgbe_register(device_t dev)
861 {
862 	return (&ixgbe_sctx_init);
863 } /* ixgbe_register */
864 
865 /************************************************************************
866  * ixgbe_if_attach_pre - Device initialization routine, part 1
867  *
868  *   Called when the driver is being loaded.
869  *   Identifies the type of hardware, initializes the hardware,
870  *   and initializes iflib structures.
871  *
872  *   return 0 on success, positive on failure
873  ************************************************************************/
874 static int
875 ixgbe_if_attach_pre(if_ctx_t ctx)
876 {
877 	struct ixgbe_softc  *sc;
878 	device_t        dev;
879 	if_softc_ctx_t  scctx;
880 	struct ixgbe_hw *hw;
881 	int             error = 0;
882 	u32             ctrl_ext;
883 	size_t i;
884 
885 	INIT_DEBUGOUT("ixgbe_attach: begin");
886 
887 	/* Allocate, clear, and link in our adapter structure */
888 	dev = iflib_get_dev(ctx);
889 	sc = iflib_get_softc(ctx);
890 	sc->hw.back = sc;
891 	sc->ctx = ctx;
892 	sc->dev = dev;
893 	scctx = sc->shared = iflib_get_softc_ctx(ctx);
894 	sc->media = iflib_get_media(ctx);
895 	hw = &sc->hw;
896 
897 	/* Determine hardware revision */
898 	hw->vendor_id = pci_get_vendor(dev);
899 	hw->device_id = pci_get_device(dev);
900 	hw->revision_id = pci_get_revid(dev);
901 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
902 	hw->subsystem_device_id = pci_get_subdevice(dev);
903 
904 	/* Do base PCI setup - map BAR0 */
905 	if (ixgbe_allocate_pci_resources(ctx)) {
906 		device_printf(dev, "Allocation of PCI resources failed\n");
907 		return (ENXIO);
908 	}
909 
910 	/* let hardware know driver is loaded */
911 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
912 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
913 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
914 
915 	/*
916 	 * Initialize the shared code
917 	 */
918 	if (ixgbe_init_shared_code(hw) != 0) {
919 		device_printf(dev, "Unable to initialize the shared code\n");
920 		error = ENXIO;
921 		goto err_pci;
922 	}
923 
924 	if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
925 		device_printf(dev, "Firmware recovery mode detected. Limiting "
926 		    "functionality.\nRefer to the Intel(R) Ethernet Adapters "
927 		    "and Devices User Guide for details on firmware recovery "
928 		    "mode.");
929 		error = ENOSYS;
930 		goto err_pci;
931 	}
932 
933 	/* 82598 Does not support SR-IOV, initialize everything else */
934 	if (hw->mac.type >= ixgbe_mac_82599_vf) {
935 		for (i = 0; i < sc->num_vfs; i++)
936 			hw->mbx.ops[i].init_params(hw);
937 	}
938 
939 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
940 
941 	if (hw->mac.type != ixgbe_mac_82598EB)
942 		hw->phy.smart_speed = ixgbe_smart_speed;
943 
944 	ixgbe_init_device_features(sc);
945 
946 	/* Enable WoL (if supported) */
947 	ixgbe_check_wol_support(sc);
948 
949 	/* Verify adapter fan is still functional (if applicable) */
950 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
951 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
952 		ixgbe_check_fan_failure(sc, esdp, false);
953 	}
954 
955 	/* Ensure SW/FW semaphore is free */
956 	ixgbe_init_swfw_semaphore(hw);
957 
958 	/* Set an initial default flow control value */
959 	hw->fc.requested_mode = ixgbe_flow_control;
960 
961 	hw->phy.reset_if_overtemp = true;
962 	error = ixgbe_reset_hw(hw);
963 	hw->phy.reset_if_overtemp = false;
964 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
965 		/*
966 		 * No optics in this port, set up
967 		 * so the timer routine will probe
968 		 * for later insertion.
969 		 */
970 		sc->sfp_probe = true;
971 		error = 0;
972 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
973 		device_printf(dev, "Unsupported SFP+ module detected!\n");
974 		error = EIO;
975 		goto err_pci;
976 	} else if (error) {
977 		device_printf(dev, "Hardware initialization failed\n");
978 		error = EIO;
979 		goto err_pci;
980 	}
981 
982 	/* Make sure we have a good EEPROM before we read from it */
983 	if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
984 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
985 		error = EIO;
986 		goto err_pci;
987 	}
988 
989 	error = ixgbe_start_hw(hw);
990 	switch (error) {
991 	case IXGBE_ERR_EEPROM_VERSION:
992 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
993 		break;
994 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
995 		device_printf(dev, "Unsupported SFP+ Module\n");
996 		error = EIO;
997 		goto err_pci;
998 	case IXGBE_ERR_SFP_NOT_PRESENT:
999 		device_printf(dev, "No SFP+ Module found\n");
1000 		/* falls thru */
1001 	default:
1002 		break;
1003 	}
1004 
1005 	/* Most of the iflib initialization... */
1006 
1007 	iflib_set_mac(ctx, hw->mac.addr);
1008 	switch (sc->hw.mac.type) {
1009 	case ixgbe_mac_X550:
1010 	case ixgbe_mac_X550EM_x:
1011 	case ixgbe_mac_X550EM_a:
1012 		scctx->isc_rss_table_size = 512;
1013 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1014 		break;
1015 	default:
1016 		scctx->isc_rss_table_size = 128;
1017 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1018 	}
1019 
1020 	/* Allow legacy interrupts */
1021 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1022 
1023 	scctx->isc_txqsizes[0] =
1024 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1025 	    sizeof(u32), DBA_ALIGN),
1026 	scctx->isc_rxqsizes[0] =
1027 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1028 	    DBA_ALIGN);
1029 
1030 	/* XXX */
1031 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1032 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1033 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1034 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1035 	} else {
1036 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1037 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1038 	}
1039 
1040 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1041 
1042 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1043 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1044 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1045 
1046 	scctx->isc_txrx = &ixgbe_txrx;
1047 
1048 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1049 
1050 	return (0);
1051 
1052 err_pci:
1053 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1054 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1055 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1056 	ixgbe_free_pci_resources(ctx);
1057 
1058 	return (error);
1059 } /* ixgbe_if_attach_pre */
1060 
1061  /*********************************************************************
1062  * ixgbe_if_attach_post - Device initialization routine, part 2
1063  *
1064  *   Called during driver load, but after interrupts and
1065  *   resources have been allocated and configured.
1066  *   Sets up some data structures not relevant to iflib.
1067  *
1068  *   return 0 on success, positive on failure
1069  *********************************************************************/
1070 static int
1071 ixgbe_if_attach_post(if_ctx_t ctx)
1072 {
1073 	device_t dev;
1074 	struct ixgbe_softc  *sc;
1075 	struct ixgbe_hw *hw;
1076 	int             error = 0;
1077 
1078 	dev = iflib_get_dev(ctx);
1079 	sc = iflib_get_softc(ctx);
1080 	hw = &sc->hw;
1081 
1082 
1083 	if (sc->intr_type == IFLIB_INTR_LEGACY &&
1084 		(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1085 		device_printf(dev, "Device does not support legacy interrupts");
1086 		error = ENXIO;
1087 		goto err;
1088 	}
1089 
1090 	/* Allocate multicast array memory. */
1091 	sc->mta = malloc(sizeof(*sc->mta) *
1092 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1093 	if (sc->mta == NULL) {
1094 		device_printf(dev, "Can not allocate multicast setup array\n");
1095 		error = ENOMEM;
1096 		goto err;
1097 	}
1098 
1099 	/* hw.ix defaults init */
1100 	ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1101 
1102 	/* Enable the optics for 82599 SFP+ fiber */
1103 	ixgbe_enable_tx_laser(hw);
1104 
1105 	/* Enable power to the phy. */
1106 	ixgbe_set_phy_power(hw, true);
1107 
1108 	ixgbe_initialize_iov(sc);
1109 
1110 	error = ixgbe_setup_interface(ctx);
1111 	if (error) {
1112 		device_printf(dev, "Interface setup failed: %d\n", error);
1113 		goto err;
1114 	}
1115 
1116 	ixgbe_if_update_admin_status(ctx);
1117 
1118 	/* Initialize statistics */
1119 	ixgbe_update_stats_counters(sc);
1120 	ixgbe_add_hw_stats(sc);
1121 
1122 	/* Check PCIE slot type/speed/width */
1123 	ixgbe_get_slot_info(sc);
1124 
1125 	/*
1126 	 * Do time init and sysctl init here, but
1127 	 * only on the first port of a bypass sc.
1128 	 */
1129 	ixgbe_bypass_init(sc);
1130 
1131 	/* Display NVM and Option ROM versions */
1132 	ixgbe_print_fw_version(ctx);
1133 
1134 	/* Set an initial dmac value */
1135 	sc->dmac = 0;
1136 	/* Set initial advertised speeds (if applicable) */
1137 	sc->advertise = ixgbe_get_default_advertise(sc);
1138 
1139 	if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1140 		ixgbe_define_iov_schemas(dev, &error);
1141 
1142 	/* Add sysctls */
1143 	ixgbe_add_device_sysctls(ctx);
1144 
1145 	/* Init recovery mode timer and state variable */
1146 	if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1147 		sc->recovery_mode = 0;
1148 
1149 		/* Set up the timer callout */
1150 		callout_init(&sc->fw_mode_timer, true);
1151 
1152 		/* Start the task */
1153 		callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1154 	}
1155 
1156 	return (0);
1157 err:
1158 	return (error);
1159 } /* ixgbe_if_attach_post */
1160 
1161 /************************************************************************
1162  * ixgbe_check_wol_support
1163  *
1164  *   Checks whether the adapter's ports are capable of
1165  *   Wake On LAN by reading the adapter's NVM.
1166  *
1167  *   Sets each port's hw->wol_enabled value depending
1168  *   on the value read here.
1169  ************************************************************************/
1170 static void
1171 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1172 {
1173 	struct ixgbe_hw *hw = &sc->hw;
1174 	u16             dev_caps = 0;
1175 
1176 	/* Find out WoL support for port */
1177 	sc->wol_support = hw->wol_enabled = 0;
1178 	ixgbe_get_device_caps(hw, &dev_caps);
1179 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1180 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1181 	     hw->bus.func == 0))
1182 		sc->wol_support = hw->wol_enabled = 1;
1183 
1184 	/* Save initial wake up filter configuration */
1185 	sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1186 
1187 	return;
1188 } /* ixgbe_check_wol_support */
1189 
1190 /************************************************************************
1191  * ixgbe_setup_interface
1192  *
1193  *   Setup networking device structure and register an interface.
1194  ************************************************************************/
1195 static int
1196 ixgbe_setup_interface(if_ctx_t ctx)
1197 {
1198 	if_t               ifp = iflib_get_ifp(ctx);
1199 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1200 
1201 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1202 
1203 	if_setbaudrate(ifp, IF_Gbps(10));
1204 
1205 	sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1206 
1207 	sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1208 
1209 	ixgbe_add_media_types(ctx);
1210 
1211 	/* Autoselect media by default */
1212 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1213 
1214 	return (0);
1215 } /* ixgbe_setup_interface */
1216 
1217 /************************************************************************
1218  * ixgbe_if_get_counter
1219  ************************************************************************/
1220 static uint64_t
1221 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1222 {
1223 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1224 	if_t           ifp = iflib_get_ifp(ctx);
1225 
1226 	switch (cnt) {
1227 	case IFCOUNTER_IPACKETS:
1228 		return (sc->ipackets);
1229 	case IFCOUNTER_OPACKETS:
1230 		return (sc->opackets);
1231 	case IFCOUNTER_IBYTES:
1232 		return (sc->ibytes);
1233 	case IFCOUNTER_OBYTES:
1234 		return (sc->obytes);
1235 	case IFCOUNTER_IMCASTS:
1236 		return (sc->imcasts);
1237 	case IFCOUNTER_OMCASTS:
1238 		return (sc->omcasts);
1239 	case IFCOUNTER_COLLISIONS:
1240 		return (0);
1241 	case IFCOUNTER_IQDROPS:
1242 		return (sc->iqdrops);
1243 	case IFCOUNTER_OQDROPS:
1244 		return (0);
1245 	case IFCOUNTER_IERRORS:
1246 		return (sc->ierrors);
1247 	default:
1248 		return (if_get_counter_default(ifp, cnt));
1249 	}
1250 } /* ixgbe_if_get_counter */
1251 
1252 /************************************************************************
1253  * ixgbe_if_i2c_req
1254  ************************************************************************/
1255 static int
1256 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1257 {
1258 	struct ixgbe_softc		*sc = iflib_get_softc(ctx);
1259 	struct ixgbe_hw 	*hw = &sc->hw;
1260 	int 			i;
1261 
1262 
1263 	if (hw->phy.ops.read_i2c_byte == NULL)
1264 		return (ENXIO);
1265 	for (i = 0; i < req->len; i++)
1266 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1267 		    req->dev_addr, &req->data[i]);
1268 	return (0);
1269 } /* ixgbe_if_i2c_req */
1270 
1271 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1272  * @ctx: iflib context
1273  * @event: event code to check
1274  *
1275  * Defaults to returning false for unknown events.
1276  *
1277  * @returns true if iflib needs to reinit the interface
1278  */
1279 static bool
1280 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1281 {
1282 	switch (event) {
1283 	case IFLIB_RESTART_VLAN_CONFIG:
1284 	default:
1285 		return (false);
1286 	}
1287 }
1288 
1289 /************************************************************************
1290  * ixgbe_add_media_types
1291  ************************************************************************/
1292 static void
1293 ixgbe_add_media_types(if_ctx_t ctx)
1294 {
1295 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1296 	struct ixgbe_hw *hw = &sc->hw;
1297 	device_t        dev = iflib_get_dev(ctx);
1298 	u64             layer;
1299 
1300 	layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1301 
1302 	/* Media types with matching FreeBSD media defines */
1303 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1304 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1305 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1306 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1307 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1308 		ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1309 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1310 		ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1311 
1312 	if (hw->mac.type == ixgbe_mac_X550) {
1313 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1314 		ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1315 	}
1316 
1317 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1318 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1319 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1320 		    NULL);
1321 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1322 	}
1323 
1324 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1325 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1326 		if (hw->phy.multispeed_fiber)
1327 			ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1328 			    NULL);
1329 	}
1330 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1331 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1332 		if (hw->phy.multispeed_fiber)
1333 			ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1334 			    NULL);
1335 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1336 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1337 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1338 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1339 
1340 #ifdef IFM_ETH_XTYPE
1341 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1342 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1343 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1344 		ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1345 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1346 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1347 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1348 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1349 #else
1350 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1351 		device_printf(dev, "Media supported: 10GbaseKR\n");
1352 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1353 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1354 	}
1355 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1356 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1357 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1358 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1359 	}
1360 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1361 		device_printf(dev, "Media supported: 1000baseKX\n");
1362 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1363 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1364 	}
1365 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1366 		device_printf(dev, "Media supported: 2500baseKX\n");
1367 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1368 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1369 	}
1370 #endif
1371 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1372 		device_printf(dev, "Media supported: 1000baseBX\n");
1373 
1374 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1375 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1376 		    0, NULL);
1377 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1378 	}
1379 
1380 	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1381 } /* ixgbe_add_media_types */
1382 
1383 /************************************************************************
1384  * ixgbe_is_sfp
1385  ************************************************************************/
1386 static inline bool
1387 ixgbe_is_sfp(struct ixgbe_hw *hw)
1388 {
1389 	switch (hw->mac.type) {
1390 	case ixgbe_mac_82598EB:
1391 		if (hw->phy.type == ixgbe_phy_nl)
1392 			return (true);
1393 		return (false);
1394 	case ixgbe_mac_82599EB:
1395 		switch (hw->mac.ops.get_media_type(hw)) {
1396 		case ixgbe_media_type_fiber:
1397 		case ixgbe_media_type_fiber_qsfp:
1398 			return (true);
1399 		default:
1400 			return (false);
1401 		}
1402 	case ixgbe_mac_X550EM_x:
1403 	case ixgbe_mac_X550EM_a:
1404 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1405 			return (true);
1406 		return (false);
1407 	default:
1408 		return (false);
1409 	}
1410 } /* ixgbe_is_sfp */
1411 
1412 /************************************************************************
1413  * ixgbe_config_link
1414  ************************************************************************/
1415 static void
1416 ixgbe_config_link(if_ctx_t ctx)
1417 {
1418 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1419 	struct ixgbe_hw *hw = &sc->hw;
1420 	u32             autoneg, err = 0;
1421 	bool            sfp, negotiate;
1422 
1423 	sfp = ixgbe_is_sfp(hw);
1424 
1425 	if (sfp) {
1426 		sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1427 		iflib_admin_intr_deferred(ctx);
1428 	} else {
1429 		if (hw->mac.ops.check_link)
1430 			err = ixgbe_check_link(hw, &sc->link_speed,
1431 			    &sc->link_up, false);
1432 		if (err)
1433 			return;
1434 		autoneg = hw->phy.autoneg_advertised;
1435 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1436 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1437 			    &negotiate);
1438 		if (err)
1439 			return;
1440 
1441 		if (hw->mac.type == ixgbe_mac_X550 &&
1442 		    hw->phy.autoneg_advertised == 0) {
1443 			/*
1444 			 * 2.5G and 5G autonegotiation speeds on X550
1445 			 * are disabled by default due to reported
1446 			 * interoperability issues with some switches.
1447 			 *
1448 			 * The second condition checks if any operations
1449 			 * involving setting autonegotiation speeds have
1450 			 * been performed prior to this ixgbe_config_link()
1451 			 * call.
1452 			 *
1453 			 * If hw->phy.autoneg_advertised does not
1454 			 * equal 0, this means that the user might have
1455 			 * set autonegotiation speeds via the sysctl
1456 			 * before bringing the interface up. In this
1457 			 * case, we should not disable 2.5G and 5G
1458 			 * since that speeds might be selected by the
1459 			 * user.
1460 			 *
1461 			 * Otherwise (i.e. if hw->phy.autoneg_advertised
1462 			 * is set to 0), it is the first time we set
1463 			 * autonegotiation preferences and the default
1464 			 * set of speeds should exclude 2.5G and 5G.
1465 			 */
1466 			autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1467 			    IXGBE_LINK_SPEED_5GB_FULL);
1468 		}
1469 
1470 		if (hw->mac.ops.setup_link)
1471 			err = hw->mac.ops.setup_link(hw, autoneg,
1472 			    sc->link_up);
1473 	}
1474 } /* ixgbe_config_link */
1475 
1476 /************************************************************************
1477  * ixgbe_update_stats_counters - Update board statistics counters.
1478  ************************************************************************/
1479 static void
1480 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1481 {
1482 	struct ixgbe_hw       *hw = &sc->hw;
1483 	struct ixgbe_hw_stats *stats = &sc->stats.pf;
1484 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1485 	u32                   lxoffrxc;
1486 	u64                   total_missed_rx = 0;
1487 
1488 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1489 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1490 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1491 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1492 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1493 
1494 	for (int i = 0; i < 16; i++) {
1495 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1496 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1497 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1498 	}
1499 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1500 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1501 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1502 
1503 	/* Hardware workaround, gprc counts missed packets */
1504 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1505 	stats->gprc -= missed_rx;
1506 
1507 	if (hw->mac.type != ixgbe_mac_82598EB) {
1508 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1509 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1510 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1511 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1512 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1513 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1514 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1515 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1516 		stats->lxoffrxc += lxoffrxc;
1517 	} else {
1518 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1519 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1520 		stats->lxoffrxc += lxoffrxc;
1521 		/* 82598 only has a counter in the high register */
1522 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1523 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1524 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1525 	}
1526 
1527 	/*
1528 	 * For watchdog management we need to know if we have been paused
1529 	 * during the last interval, so capture that here.
1530 	*/
1531 	if (lxoffrxc)
1532 		sc->shared->isc_pause_frames = 1;
1533 
1534 	/*
1535 	 * Workaround: mprc hardware is incorrectly counting
1536 	 * broadcasts, so for now we subtract those.
1537 	 */
1538 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1539 	stats->bprc += bprc;
1540 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1541 	if (hw->mac.type == ixgbe_mac_82598EB)
1542 		stats->mprc -= bprc;
1543 
1544 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1545 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1546 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1547 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1548 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1549 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1550 
1551 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1552 	stats->lxontxc += lxon;
1553 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1554 	stats->lxofftxc += lxoff;
1555 	total = lxon + lxoff;
1556 
1557 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1558 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1559 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1560 	stats->gptc -= total;
1561 	stats->mptc -= total;
1562 	stats->ptc64 -= total;
1563 	stats->gotc -= total * ETHER_MIN_LEN;
1564 
1565 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1566 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1567 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1568 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1569 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1570 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1571 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1572 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1573 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1574 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1575 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1576 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1577 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1578 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1579 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1580 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1581 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1582 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1583 	/* Only read FCOE on 82599 */
1584 	if (hw->mac.type != ixgbe_mac_82598EB) {
1585 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1586 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1587 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1588 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1589 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1590 	}
1591 
1592 	/* Fill out the OS statistics structure */
1593 	IXGBE_SET_IPACKETS(sc, stats->gprc);
1594 	IXGBE_SET_OPACKETS(sc, stats->gptc);
1595 	IXGBE_SET_IBYTES(sc, stats->gorc);
1596 	IXGBE_SET_OBYTES(sc, stats->gotc);
1597 	IXGBE_SET_IMCASTS(sc, stats->mprc);
1598 	IXGBE_SET_OMCASTS(sc, stats->mptc);
1599 	IXGBE_SET_COLLISIONS(sc, 0);
1600 	IXGBE_SET_IQDROPS(sc, total_missed_rx);
1601 
1602 	/*
1603 	 * Aggregate following types of errors as RX errors:
1604 	 * - CRC error count,
1605 	 * - illegal byte error count,
1606 	 * - missed packets count,
1607 	 * - length error count,
1608 	 * - undersized packets count,
1609 	 * - fragmented packets count,
1610 	 * - oversized packets count,
1611 	 * - jabber count.
1612 	 */
1613 	IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1614 	    stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1615 	    stats->rjc);
1616 } /* ixgbe_update_stats_counters */
1617 
1618 /************************************************************************
1619  * ixgbe_add_hw_stats
1620  *
1621  *   Add sysctl variables, one per statistic, to the system.
1622  ************************************************************************/
1623 static void
1624 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1625 {
1626 	device_t               dev = iflib_get_dev(sc->ctx);
1627 	struct ix_rx_queue     *rx_que;
1628 	struct ix_tx_queue     *tx_que;
1629 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1630 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1631 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1632 	struct ixgbe_hw_stats  *stats = &sc->stats.pf;
1633 	struct sysctl_oid      *stat_node, *queue_node;
1634 	struct sysctl_oid_list *stat_list, *queue_list;
1635 	int                    i;
1636 
1637 #define QUEUE_NAME_LEN 32
1638 	char                   namebuf[QUEUE_NAME_LEN];
1639 
1640 	/* Driver Statistics */
1641 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1642 	    CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1643 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1644 	    CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1645 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1646 	    CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1647 
1648 	for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
1649 		struct tx_ring *txr = &tx_que->txr;
1650 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1651 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1652 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1653 		queue_list = SYSCTL_CHILDREN(queue_node);
1654 
1655 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1656 		    CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1657 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1658 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1659 		    CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1660 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1661 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1662 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1663 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1664 		    CTLFLAG_RD, &txr->total_packets,
1665 		    "Queue Packets Transmitted");
1666 	}
1667 
1668 	for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
1669 		struct rx_ring *rxr = &rx_que->rxr;
1670 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1671 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1672 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1673 		queue_list = SYSCTL_CHILDREN(queue_node);
1674 
1675 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1676 		    CTLTYPE_UINT | CTLFLAG_RW,
1677 		    &sc->rx_queues[i], 0,
1678 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1679 		    "Interrupt Rate");
1680 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1681 		    CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1682 		    "irqs on this queue");
1683 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1684 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1685 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1686 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1687 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1688 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1689 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1690 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1691 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1692 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1693 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1694 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1695 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1696 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1697 	}
1698 
1699 	/* MAC stats get their own sub node */
1700 
1701 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1702 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1703 	stat_list = SYSCTL_CHILDREN(stat_node);
1704 
1705 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1706 	    CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1707 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1708 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1709 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1710 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1711 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1712 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1713 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1714 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1715 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1716 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1717 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1718 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1719 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1720 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1721 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1722 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1723 
1724 	/* Flow Control stats */
1725 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1726 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1727 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1728 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1729 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1730 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1731 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1732 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1733 
1734 	/* Packet Reception Stats */
1735 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1736 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1737 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1738 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1739 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1740 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1741 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1742 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1743 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1744 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1745 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1746 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1747 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1748 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1749 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1750 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1751 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1752 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1753 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1754 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1755 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1756 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1757 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1758 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1759 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1760 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1761 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1762 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1763 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1764 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1765 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1766 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1767 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1768 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1769 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1770 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1771 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1772 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1773 
1774 	/* Packet Transmission Stats */
1775 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1776 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1777 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1778 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1779 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1780 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1781 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1782 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1783 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1784 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1785 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1786 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1787 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1788 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1789 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1790 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1791 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1792 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1793 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1794 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1795 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1796 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1797 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1798 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1799 } /* ixgbe_add_hw_stats */
1800 
1801 /************************************************************************
1802  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1803  *
1804  *   Retrieves the TDH value from the hardware
1805  ************************************************************************/
1806 static int
1807 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1808 {
1809 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1810 	int            error;
1811 	unsigned int   val;
1812 
1813 	if (!txr)
1814 		return (0);
1815 
1816 
1817 	if (atomic_load_acq_int(&txr->sc->recovery_mode))
1818 		return (EPERM);
1819 
1820 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1821 	error = sysctl_handle_int(oidp, &val, 0, req);
1822 	if (error || !req->newptr)
1823 		return error;
1824 
1825 	return (0);
1826 } /* ixgbe_sysctl_tdh_handler */
1827 
1828 /************************************************************************
1829  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1830  *
1831  *   Retrieves the TDT value from the hardware
1832  ************************************************************************/
1833 static int
1834 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1835 {
1836 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1837 	int            error;
1838 	unsigned int   val;
1839 
1840 	if (!txr)
1841 		return (0);
1842 
1843 	if (atomic_load_acq_int(&txr->sc->recovery_mode))
1844 		return (EPERM);
1845 
1846 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1847 	error = sysctl_handle_int(oidp, &val, 0, req);
1848 	if (error || !req->newptr)
1849 		return error;
1850 
1851 	return (0);
1852 } /* ixgbe_sysctl_tdt_handler */
1853 
1854 /************************************************************************
1855  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1856  *
1857  *   Retrieves the RDH value from the hardware
1858  ************************************************************************/
1859 static int
1860 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1861 {
1862 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1863 	int            error;
1864 	unsigned int   val;
1865 
1866 	if (!rxr)
1867 		return (0);
1868 
1869 	if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1870 		return (EPERM);
1871 
1872 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1873 	error = sysctl_handle_int(oidp, &val, 0, req);
1874 	if (error || !req->newptr)
1875 		return error;
1876 
1877 	return (0);
1878 } /* ixgbe_sysctl_rdh_handler */
1879 
1880 /************************************************************************
1881  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1882  *
1883  *   Retrieves the RDT value from the hardware
1884  ************************************************************************/
1885 static int
1886 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1887 {
1888 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1889 	int            error;
1890 	unsigned int   val;
1891 
1892 	if (!rxr)
1893 		return (0);
1894 
1895 	if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1896 		return (EPERM);
1897 
1898 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1899 	error = sysctl_handle_int(oidp, &val, 0, req);
1900 	if (error || !req->newptr)
1901 		return error;
1902 
1903 	return (0);
1904 } /* ixgbe_sysctl_rdt_handler */
1905 
1906 /************************************************************************
1907  * ixgbe_if_vlan_register
1908  *
1909  *   Run via vlan config EVENT, it enables us to use the
1910  *   HW Filter table since we can get the vlan id. This
1911  *   just creates the entry in the soft version of the
1912  *   VFTA, init will repopulate the real table.
1913  ************************************************************************/
1914 static void
1915 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1916 {
1917 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1918 	u16            index, bit;
1919 
1920 	index = (vtag >> 5) & 0x7F;
1921 	bit = vtag & 0x1F;
1922 	sc->shadow_vfta[index] |= (1 << bit);
1923 	++sc->num_vlans;
1924 	ixgbe_setup_vlan_hw_support(ctx);
1925 } /* ixgbe_if_vlan_register */
1926 
1927 /************************************************************************
1928  * ixgbe_if_vlan_unregister
1929  *
1930  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1931  ************************************************************************/
1932 static void
1933 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1934 {
1935 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1936 	u16            index, bit;
1937 
1938 	index = (vtag >> 5) & 0x7F;
1939 	bit = vtag & 0x1F;
1940 	sc->shadow_vfta[index] &= ~(1 << bit);
1941 	--sc->num_vlans;
1942 	/* Re-init to load the changes */
1943 	ixgbe_setup_vlan_hw_support(ctx);
1944 } /* ixgbe_if_vlan_unregister */
1945 
1946 /************************************************************************
1947  * ixgbe_setup_vlan_hw_support
1948  ************************************************************************/
1949 static void
1950 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1951 {
1952 	if_t            ifp = iflib_get_ifp(ctx);
1953 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1954 	struct ixgbe_hw *hw = &sc->hw;
1955 	struct rx_ring  *rxr;
1956 	int             i;
1957 	u32             ctrl;
1958 
1959 
1960 	/*
1961 	 * We get here thru init_locked, meaning
1962 	 * a soft reset, this has already cleared
1963 	 * the VFTA and other state, so if there
1964 	 * have been no vlan's registered do nothing.
1965 	 */
1966 	if (sc->num_vlans == 0 || (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
1967 		/* Clear the vlan hw flag */
1968 		for (i = 0; i < sc->num_rx_queues; i++) {
1969 			rxr = &sc->rx_queues[i].rxr;
1970 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1971 			if (hw->mac.type != ixgbe_mac_82598EB) {
1972 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1973 				ctrl &= ~IXGBE_RXDCTL_VME;
1974 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1975 			}
1976 			rxr->vtag_strip = false;
1977 		}
1978 		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1979 		/* Enable the Filter Table if enabled */
1980 		ctrl |= IXGBE_VLNCTRL_CFIEN;
1981 		ctrl &= ~IXGBE_VLNCTRL_VFE;
1982 		if (hw->mac.type == ixgbe_mac_82598EB)
1983 			ctrl &= ~IXGBE_VLNCTRL_VME;
1984 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1985 		return;
1986 	}
1987 
1988 	/* Setup the queues for vlans */
1989 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1990 		for (i = 0; i < sc->num_rx_queues; i++) {
1991 			rxr = &sc->rx_queues[i].rxr;
1992 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1993 			if (hw->mac.type != ixgbe_mac_82598EB) {
1994 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1995 				ctrl |= IXGBE_RXDCTL_VME;
1996 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1997 			}
1998 			rxr->vtag_strip = true;
1999 		}
2000 	}
2001 
2002 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2003 		return;
2004 	/*
2005 	 * A soft reset zero's out the VFTA, so
2006 	 * we need to repopulate it now.
2007 	 */
2008 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2009 		if (sc->shadow_vfta[i] != 0)
2010 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2011 			    sc->shadow_vfta[i]);
2012 
2013 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2014 	/* Enable the Filter Table if enabled */
2015 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2016 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2017 		ctrl |= IXGBE_VLNCTRL_VFE;
2018 	}
2019 	if (hw->mac.type == ixgbe_mac_82598EB)
2020 		ctrl |= IXGBE_VLNCTRL_VME;
2021 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2022 } /* ixgbe_setup_vlan_hw_support */
2023 
2024 /************************************************************************
2025  * ixgbe_get_slot_info
2026  *
2027  *   Get the width and transaction speed of
2028  *   the slot this adapter is plugged into.
2029  ************************************************************************/
2030 static void
2031 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2032 {
2033 	device_t        dev = iflib_get_dev(sc->ctx);
2034 	struct ixgbe_hw *hw = &sc->hw;
2035 	int             bus_info_valid = true;
2036 	u32             offset;
2037 	u16             link;
2038 
2039 	/* Some devices are behind an internal bridge */
2040 	switch (hw->device_id) {
2041 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
2042 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2043 		goto get_parent_info;
2044 	default:
2045 		break;
2046 	}
2047 
2048 	ixgbe_get_bus_info(hw);
2049 
2050 	/*
2051 	 * Some devices don't use PCI-E, but there is no need
2052 	 * to display "Unknown" for bus speed and width.
2053 	 */
2054 	switch (hw->mac.type) {
2055 	case ixgbe_mac_X550EM_x:
2056 	case ixgbe_mac_X550EM_a:
2057 		return;
2058 	default:
2059 		goto display;
2060 	}
2061 
2062 get_parent_info:
2063 	/*
2064 	 * For the Quad port adapter we need to parse back
2065 	 * up the PCI tree to find the speed of the expansion
2066 	 * slot into which this adapter is plugged. A bit more work.
2067 	 */
2068 	dev = device_get_parent(device_get_parent(dev));
2069 #ifdef IXGBE_DEBUG
2070 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2071 	    pci_get_slot(dev), pci_get_function(dev));
2072 #endif
2073 	dev = device_get_parent(device_get_parent(dev));
2074 #ifdef IXGBE_DEBUG
2075 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2076 	    pci_get_slot(dev), pci_get_function(dev));
2077 #endif
2078 	/* Now get the PCI Express Capabilities offset */
2079 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2080 		/*
2081 		 * Hmm...can't get PCI-Express capabilities.
2082 		 * Falling back to default method.
2083 		 */
2084 		bus_info_valid = false;
2085 		ixgbe_get_bus_info(hw);
2086 		goto display;
2087 	}
2088 	/* ...and read the Link Status Register */
2089 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2090 	ixgbe_set_pci_config_data_generic(hw, link);
2091 
2092 display:
2093 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2094 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
2095 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
2096 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
2097 	     "Unknown"),
2098 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2099 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2100 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2101 	     "Unknown"));
2102 
2103 	if (bus_info_valid) {
2104 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2105 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2106 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
2107 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2108 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2109 		}
2110 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2111 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2112 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
2113 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2114 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2115 		}
2116 	} else
2117 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2118 
2119 	return;
2120 } /* ixgbe_get_slot_info */
2121 
2122 /************************************************************************
2123  * ixgbe_if_msix_intr_assign
2124  *
2125  *   Setup MSI-X Interrupt resources and handlers
2126  ************************************************************************/
2127 static int
2128 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2129 {
2130 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
2131 	struct ix_rx_queue *rx_que = sc->rx_queues;
2132 	struct ix_tx_queue *tx_que;
2133 	int                error, rid, vector = 0;
2134 	char               buf[16];
2135 
2136 	/* Admin Que is vector 0*/
2137 	rid = vector + 1;
2138 	for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2139 		rid = vector + 1;
2140 
2141 		snprintf(buf, sizeof(buf), "rxq%d", i);
2142 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2143 		    IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2144 
2145 		if (error) {
2146 			device_printf(iflib_get_dev(ctx),
2147 			    "Failed to allocate que int %d err: %d", i, error);
2148 			sc->num_rx_queues = i + 1;
2149 			goto fail;
2150 		}
2151 
2152 		rx_que->msix = vector;
2153 	}
2154 	for (int i = 0; i < sc->num_tx_queues; i++) {
2155 		snprintf(buf, sizeof(buf), "txq%d", i);
2156 		tx_que = &sc->tx_queues[i];
2157 		tx_que->msix = i % sc->num_rx_queues;
2158 		iflib_softirq_alloc_generic(ctx,
2159 		    &sc->rx_queues[tx_que->msix].que_irq,
2160 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2161 	}
2162 	rid = vector + 1;
2163 	error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2164 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2165 	if (error) {
2166 		device_printf(iflib_get_dev(ctx),
2167 		    "Failed to register admin handler");
2168 		return (error);
2169 	}
2170 
2171 	sc->vector = vector;
2172 
2173 	return (0);
2174 fail:
2175 	iflib_irq_free(ctx, &sc->irq);
2176 	rx_que = sc->rx_queues;
2177 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2178 		iflib_irq_free(ctx, &rx_que->que_irq);
2179 
2180 	return (error);
2181 } /* ixgbe_if_msix_intr_assign */
2182 
2183 static inline void
2184 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2185 {
2186 	uint32_t newitr = 0;
2187 	struct rx_ring *rxr = &que->rxr;
2188 	/* FIXME struct tx_ring *txr = ... ->txr; */
2189 
2190 	/*
2191 	 * Do Adaptive Interrupt Moderation:
2192 	 *  - Write out last calculated setting
2193 	 *  - Calculate based on average size over
2194 	 *    the last interval.
2195 	 */
2196 	if (que->eitr_setting) {
2197 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2198 		    que->eitr_setting);
2199 	}
2200 
2201 	que->eitr_setting = 0;
2202 	/* Idle, do nothing */
2203 	if (rxr->bytes == 0) {
2204 		/* FIXME && txr->bytes == 0 */
2205 		return;
2206 	}
2207 
2208 	if ((rxr->bytes) && (rxr->packets))
2209 		newitr = rxr->bytes / rxr->packets;
2210 	/* FIXME for transmit accounting
2211 	 * if ((txr->bytes) && (txr->packets))
2212 	 * 	newitr = txr->bytes/txr->packets;
2213 	 * if ((rxr->bytes) && (rxr->packets))
2214 	 * 	newitr = max(newitr, (rxr->bytes / rxr->packets));
2215 	 */
2216 
2217 	newitr += 24; /* account for hardware frame, crc */
2218 	/* set an upper boundary */
2219 	newitr = min(newitr, 3000);
2220 
2221 	/* Be nice to the mid range */
2222 	if ((newitr > 300) && (newitr < 1200)) {
2223 		newitr = (newitr / 3);
2224 	} else {
2225 		newitr = (newitr / 2);
2226 	}
2227 
2228 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2229 		newitr |= newitr << 16;
2230 	} else {
2231 		newitr |= IXGBE_EITR_CNT_WDIS;
2232 	}
2233 
2234 	/* save for next interrupt */
2235 	que->eitr_setting = newitr;
2236 
2237 	/* Reset state */
2238 	/* FIXME txr->bytes = 0; */
2239 	/* FIXME txr->packets = 0; */
2240 	rxr->bytes = 0;
2241 	rxr->packets = 0;
2242 
2243 	return;
2244 }
2245 
2246 /*********************************************************************
2247  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2248  **********************************************************************/
2249 static int
2250 ixgbe_msix_que(void *arg)
2251 {
2252 	struct ix_rx_queue *que = arg;
2253 	struct ixgbe_softc     *sc = que->sc;
2254 	if_t               ifp = iflib_get_ifp(que->sc->ctx);
2255 
2256 	/* Protect against spurious interrupts */
2257 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2258 		return (FILTER_HANDLED);
2259 
2260 	ixgbe_disable_queue(sc, que->msix);
2261 	++que->irqs;
2262 
2263 	/* Check for AIM */
2264 	if (sc->enable_aim) {
2265 		ixgbe_perform_aim(sc, que);
2266 	}
2267 
2268 	return (FILTER_SCHEDULE_THREAD);
2269 } /* ixgbe_msix_que */
2270 
2271 /************************************************************************
2272  * ixgbe_media_status - Media Ioctl callback
2273  *
2274  *   Called whenever the user queries the status of
2275  *   the interface using ifconfig.
2276  ************************************************************************/
2277 static void
2278 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2279 {
2280 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
2281 	struct ixgbe_hw *hw = &sc->hw;
2282 	int             layer;
2283 
2284 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2285 
2286 	ifmr->ifm_status = IFM_AVALID;
2287 	ifmr->ifm_active = IFM_ETHER;
2288 
2289 	if (!sc->link_active)
2290 		return;
2291 
2292 	ifmr->ifm_status |= IFM_ACTIVE;
2293 	layer = sc->phy_layer;
2294 
2295 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2296 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2297 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2298 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2299 		switch (sc->link_speed) {
2300 		case IXGBE_LINK_SPEED_10GB_FULL:
2301 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2302 			break;
2303 		case IXGBE_LINK_SPEED_1GB_FULL:
2304 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2305 			break;
2306 		case IXGBE_LINK_SPEED_100_FULL:
2307 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2308 			break;
2309 		case IXGBE_LINK_SPEED_10_FULL:
2310 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2311 			break;
2312 		}
2313 	if (hw->mac.type == ixgbe_mac_X550)
2314 		switch (sc->link_speed) {
2315 		case IXGBE_LINK_SPEED_5GB_FULL:
2316 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2317 			break;
2318 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2319 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2320 			break;
2321 		}
2322 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2323 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2324 		switch (sc->link_speed) {
2325 		case IXGBE_LINK_SPEED_10GB_FULL:
2326 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2327 			break;
2328 		case IXGBE_LINK_SPEED_1GB_FULL:
2329 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2330 			break;
2331 		}
2332 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2333 		switch (sc->link_speed) {
2334 		case IXGBE_LINK_SPEED_10GB_FULL:
2335 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2336 			break;
2337 		case IXGBE_LINK_SPEED_1GB_FULL:
2338 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2339 			break;
2340 		}
2341 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2342 		switch (sc->link_speed) {
2343 		case IXGBE_LINK_SPEED_10GB_FULL:
2344 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2345 			break;
2346 		case IXGBE_LINK_SPEED_1GB_FULL:
2347 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2348 			break;
2349 		}
2350 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2351 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2352 		switch (sc->link_speed) {
2353 		case IXGBE_LINK_SPEED_10GB_FULL:
2354 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2355 			break;
2356 		case IXGBE_LINK_SPEED_1GB_FULL:
2357 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2358 			break;
2359 		}
2360 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2361 		switch (sc->link_speed) {
2362 		case IXGBE_LINK_SPEED_10GB_FULL:
2363 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2364 			break;
2365 		}
2366 	/*
2367 	 * XXX: These need to use the proper media types once
2368 	 * they're added.
2369 	 */
2370 #ifndef IFM_ETH_XTYPE
2371 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2372 		switch (sc->link_speed) {
2373 		case IXGBE_LINK_SPEED_10GB_FULL:
2374 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2375 			break;
2376 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2377 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2378 			break;
2379 		case IXGBE_LINK_SPEED_1GB_FULL:
2380 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2381 			break;
2382 		}
2383 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2384 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2385 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2386 		switch (sc->link_speed) {
2387 		case IXGBE_LINK_SPEED_10GB_FULL:
2388 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2389 			break;
2390 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2391 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2392 			break;
2393 		case IXGBE_LINK_SPEED_1GB_FULL:
2394 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2395 			break;
2396 		}
2397 #else
2398 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2399 		switch (sc->link_speed) {
2400 		case IXGBE_LINK_SPEED_10GB_FULL:
2401 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2402 			break;
2403 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2404 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2405 			break;
2406 		case IXGBE_LINK_SPEED_1GB_FULL:
2407 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2408 			break;
2409 		}
2410 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2411 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2412 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2413 		switch (sc->link_speed) {
2414 		case IXGBE_LINK_SPEED_10GB_FULL:
2415 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2416 			break;
2417 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2418 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2419 			break;
2420 		case IXGBE_LINK_SPEED_1GB_FULL:
2421 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2422 			break;
2423 		}
2424 #endif
2425 
2426 	/* If nothing is recognized... */
2427 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2428 		ifmr->ifm_active |= IFM_UNKNOWN;
2429 
2430 	/* Display current flow control setting used on link */
2431 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2432 	    hw->fc.current_mode == ixgbe_fc_full)
2433 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2434 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2435 	    hw->fc.current_mode == ixgbe_fc_full)
2436 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2437 } /* ixgbe_media_status */
2438 
2439 /************************************************************************
2440  * ixgbe_media_change - Media Ioctl callback
2441  *
2442  *   Called when the user changes speed/duplex using
2443  *   media/mediopt option with ifconfig.
2444  ************************************************************************/
2445 static int
2446 ixgbe_if_media_change(if_ctx_t ctx)
2447 {
2448 	struct ixgbe_softc   *sc = iflib_get_softc(ctx);
2449 	struct ifmedia   *ifm = iflib_get_media(ctx);
2450 	struct ixgbe_hw  *hw = &sc->hw;
2451 	ixgbe_link_speed speed = 0;
2452 
2453 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2454 
2455 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2456 		return (EINVAL);
2457 
2458 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2459 		return (EPERM);
2460 
2461 	/*
2462 	 * We don't actually need to check against the supported
2463 	 * media types of the adapter; ifmedia will take care of
2464 	 * that for us.
2465 	 */
2466 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2467 	case IFM_AUTO:
2468 	case IFM_10G_T:
2469 		speed |= IXGBE_LINK_SPEED_100_FULL;
2470 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2471 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2472 		break;
2473 	case IFM_10G_LRM:
2474 	case IFM_10G_LR:
2475 #ifndef IFM_ETH_XTYPE
2476 	case IFM_10G_SR: /* KR, too */
2477 	case IFM_10G_CX4: /* KX4 */
2478 #else
2479 	case IFM_10G_KR:
2480 	case IFM_10G_KX4:
2481 #endif
2482 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2483 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2484 		break;
2485 #ifndef IFM_ETH_XTYPE
2486 	case IFM_1000_CX: /* KX */
2487 #else
2488 	case IFM_1000_KX:
2489 #endif
2490 	case IFM_1000_LX:
2491 	case IFM_1000_SX:
2492 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2493 		break;
2494 	case IFM_1000_T:
2495 		speed |= IXGBE_LINK_SPEED_100_FULL;
2496 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2497 		break;
2498 	case IFM_10G_TWINAX:
2499 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2500 		break;
2501 	case IFM_5000_T:
2502 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
2503 		break;
2504 	case IFM_2500_T:
2505 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2506 		break;
2507 	case IFM_100_TX:
2508 		speed |= IXGBE_LINK_SPEED_100_FULL;
2509 		break;
2510 	case IFM_10_T:
2511 		speed |= IXGBE_LINK_SPEED_10_FULL;
2512 		break;
2513 	default:
2514 		goto invalid;
2515 	}
2516 
2517 	hw->mac.autotry_restart = true;
2518 	hw->mac.ops.setup_link(hw, speed, true);
2519 	sc->advertise =
2520 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x4  : 0) |
2521 	    ((speed & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0) |
2522 	    ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2523 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x2  : 0) |
2524 	    ((speed & IXGBE_LINK_SPEED_100_FULL)   ? 0x1  : 0) |
2525 	    ((speed & IXGBE_LINK_SPEED_10_FULL)    ? 0x8  : 0);
2526 
2527 	return (0);
2528 
2529 invalid:
2530 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2531 
2532 	return (EINVAL);
2533 } /* ixgbe_if_media_change */
2534 
2535 /************************************************************************
2536  * ixgbe_set_promisc
2537  ************************************************************************/
2538 static int
2539 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2540 {
2541 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2542 	if_t           ifp = iflib_get_ifp(ctx);
2543 	u32            rctl;
2544 	int            mcnt = 0;
2545 
2546 	rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2547 	rctl &= (~IXGBE_FCTRL_UPE);
2548 	if (if_getflags(ifp) & IFF_ALLMULTI)
2549 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2550 	else {
2551 		mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2552 	}
2553 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2554 		rctl &= (~IXGBE_FCTRL_MPE);
2555 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2556 
2557 	if (if_getflags(ifp) & IFF_PROMISC) {
2558 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2559 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2560 	} else if (if_getflags(ifp) & IFF_ALLMULTI) {
2561 		rctl |= IXGBE_FCTRL_MPE;
2562 		rctl &= ~IXGBE_FCTRL_UPE;
2563 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2564 	}
2565 	return (0);
2566 } /* ixgbe_if_promisc_set */
2567 
2568 /************************************************************************
2569  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2570  ************************************************************************/
2571 static int
2572 ixgbe_msix_link(void *arg)
2573 {
2574 	struct ixgbe_softc  *sc = arg;
2575 	struct ixgbe_hw *hw = &sc->hw;
2576 	u32             eicr, eicr_mask;
2577 	s32             retval;
2578 
2579 	++sc->link_irq;
2580 
2581 	/* Pause other interrupts */
2582 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2583 
2584 	/* First get the cause */
2585 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2586 	/* Be sure the queue bits are not cleared */
2587 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2588 	/* Clear interrupt with write */
2589 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2590 
2591 	/* Link status change */
2592 	if (eicr & IXGBE_EICR_LSC) {
2593 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2594 		sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2595 	}
2596 
2597 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2598 		if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2599 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2600 			/* This is probably overkill :) */
2601 			if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2602 				return (FILTER_HANDLED);
2603 			/* Disable the interrupt */
2604 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2605 			sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2606 		} else
2607 			if (eicr & IXGBE_EICR_ECC) {
2608 				device_printf(iflib_get_dev(sc->ctx),
2609 				   "Received ECC Err, initiating reset\n");
2610 				hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2611 				ixgbe_reset_hw(hw);
2612 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2613 			}
2614 
2615 		/* Check for over temp condition */
2616 		if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2617 			switch (sc->hw.mac.type) {
2618 			case ixgbe_mac_X550EM_a:
2619 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2620 					break;
2621 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2622 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2623 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2624 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2625 				retval = hw->phy.ops.check_overtemp(hw);
2626 				if (retval != IXGBE_ERR_OVERTEMP)
2627 					break;
2628 				device_printf(iflib_get_dev(sc->ctx),
2629 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2630 				device_printf(iflib_get_dev(sc->ctx),
2631 				    "System shutdown required!\n");
2632 				break;
2633 			default:
2634 				if (!(eicr & IXGBE_EICR_TS))
2635 					break;
2636 				retval = hw->phy.ops.check_overtemp(hw);
2637 				if (retval != IXGBE_ERR_OVERTEMP)
2638 					break;
2639 				device_printf(iflib_get_dev(sc->ctx),
2640 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2641 				device_printf(iflib_get_dev(sc->ctx),
2642 				    "System shutdown required!\n");
2643 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2644 				break;
2645 			}
2646 		}
2647 
2648 		/* Check for VF message */
2649 		if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2650 		    (eicr & IXGBE_EICR_MAILBOX))
2651 			sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2652 	}
2653 
2654 	if (ixgbe_is_sfp(hw)) {
2655 		/* Pluggable optics-related interrupt */
2656 		if (hw->mac.type >= ixgbe_mac_X540)
2657 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2658 		else
2659 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2660 
2661 		if (eicr & eicr_mask) {
2662 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2663 			sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2664 		}
2665 
2666 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2667 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2668 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2669 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2670 			sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2671 		}
2672 	}
2673 
2674 	/* Check for fan failure */
2675 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2676 		ixgbe_check_fan_failure(sc, eicr, true);
2677 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2678 	}
2679 
2680 	/* External PHY interrupt */
2681 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2682 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2683 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2684 		sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2685 	}
2686 
2687 	return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2688 } /* ixgbe_msix_link */
2689 
2690 /************************************************************************
2691  * ixgbe_sysctl_interrupt_rate_handler
2692  ************************************************************************/
2693 static int
2694 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2695 {
2696 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2697 	int                error;
2698 	unsigned int       reg, usec, rate;
2699 
2700 	if (atomic_load_acq_int(&que->sc->recovery_mode))
2701 		return (EPERM);
2702 
2703 	reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2704 	usec = ((reg & 0x0FF8) >> 3);
2705 	if (usec > 0)
2706 		rate = 500000 / usec;
2707 	else
2708 		rate = 0;
2709 	error = sysctl_handle_int(oidp, &rate, 0, req);
2710 	if (error || !req->newptr)
2711 		return error;
2712 	reg &= ~0xfff; /* default, no limitation */
2713 	ixgbe_max_interrupt_rate = 0;
2714 	if (rate > 0 && rate < 500000) {
2715 		if (rate < 1000)
2716 			rate = 1000;
2717 		ixgbe_max_interrupt_rate = rate;
2718 		reg |= ((4000000/rate) & 0xff8);
2719 	}
2720 	IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2721 
2722 	return (0);
2723 } /* ixgbe_sysctl_interrupt_rate_handler */
2724 
2725 /************************************************************************
2726  * ixgbe_add_device_sysctls
2727  ************************************************************************/
2728 static void
2729 ixgbe_add_device_sysctls(if_ctx_t ctx)
2730 {
2731 	struct ixgbe_softc         *sc = iflib_get_softc(ctx);
2732 	device_t               dev = iflib_get_dev(ctx);
2733 	struct ixgbe_hw        *hw = &sc->hw;
2734 	struct sysctl_oid_list *child;
2735 	struct sysctl_ctx_list *ctx_list;
2736 
2737 	ctx_list = device_get_sysctl_ctx(dev);
2738 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2739 
2740 	/* Sysctls for all devices */
2741 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2742 	    CTLTYPE_INT | CTLFLAG_RW,
2743 	    sc, 0, ixgbe_sysctl_flowcntl, "I",
2744 	    IXGBE_SYSCTL_DESC_SET_FC);
2745 
2746 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2747 	    CTLTYPE_INT | CTLFLAG_RW,
2748 	    sc, 0, ixgbe_sysctl_advertise, "I",
2749 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2750 
2751 	sc->enable_aim = ixgbe_enable_aim;
2752 	SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2753 	    &sc->enable_aim, 0, "Interrupt Moderation");
2754 
2755 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2756 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2757 	    ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2758 
2759 #ifdef IXGBE_DEBUG
2760 	/* testing sysctls (for all devices) */
2761 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2762 	    CTLTYPE_INT | CTLFLAG_RW,
2763 	    sc, 0, ixgbe_sysctl_power_state,
2764 	    "I", "PCI Power State");
2765 
2766 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2767 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2768 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2769 #endif
2770 	/* for X550 series devices */
2771 	if (hw->mac.type >= ixgbe_mac_X550)
2772 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2773 		    CTLTYPE_U16 | CTLFLAG_RW,
2774 		    sc, 0, ixgbe_sysctl_dmac,
2775 		    "I", "DMA Coalesce");
2776 
2777 	/* for WoL-capable devices */
2778 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2779 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2780 		    CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2781 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2782 
2783 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2784 		    CTLTYPE_U32 | CTLFLAG_RW,
2785 		    sc, 0, ixgbe_sysctl_wufc,
2786 		    "I", "Enable/Disable Wake Up Filters");
2787 	}
2788 
2789 	/* for X552/X557-AT devices */
2790 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2791 		struct sysctl_oid *phy_node;
2792 		struct sysctl_oid_list *phy_list;
2793 
2794 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2795 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2796 		phy_list = SYSCTL_CHILDREN(phy_node);
2797 
2798 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2799 		    CTLTYPE_U16 | CTLFLAG_RD,
2800 		    sc, 0, ixgbe_sysctl_phy_temp,
2801 		    "I", "Current External PHY Temperature (Celsius)");
2802 
2803 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2804 		    "overtemp_occurred",
2805 		    CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2806 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2807 		    "External PHY High Temperature Event Occurred");
2808 	}
2809 
2810 	if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2811 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2812 		    CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2813 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2814 	}
2815 } /* ixgbe_add_device_sysctls */
2816 
2817 /************************************************************************
2818  * ixgbe_allocate_pci_resources
2819  ************************************************************************/
2820 static int
2821 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2822 {
2823 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2824 	device_t        dev = iflib_get_dev(ctx);
2825 	int             rid;
2826 
2827 	rid = PCIR_BAR(0);
2828 	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2829 	    RF_ACTIVE);
2830 
2831 	if (!(sc->pci_mem)) {
2832 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2833 		return (ENXIO);
2834 	}
2835 
2836 	/* Save bus_space values for READ/WRITE_REG macros */
2837 	sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2838 	sc->osdep.mem_bus_space_handle =
2839 	    rman_get_bushandle(sc->pci_mem);
2840 	/* Set hw values for shared code */
2841 	sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2842 
2843 	return (0);
2844 } /* ixgbe_allocate_pci_resources */
2845 
2846 /************************************************************************
2847  * ixgbe_detach - Device removal routine
2848  *
2849  *   Called when the driver is being removed.
2850  *   Stops the adapter and deallocates all the resources
2851  *   that were allocated for driver operation.
2852  *
2853  *   return 0 on success, positive on failure
2854  ************************************************************************/
2855 static int
2856 ixgbe_if_detach(if_ctx_t ctx)
2857 {
2858 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2859 	device_t       dev = iflib_get_dev(ctx);
2860 	u32            ctrl_ext;
2861 
2862 	INIT_DEBUGOUT("ixgbe_detach: begin");
2863 
2864 	if (ixgbe_pci_iov_detach(dev) != 0) {
2865 		device_printf(dev, "SR-IOV in use; detach first.\n");
2866 		return (EBUSY);
2867 	}
2868 
2869 	ixgbe_setup_low_power_mode(ctx);
2870 
2871 	/* let hardware know driver is unloading */
2872 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2873 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2874 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2875 
2876 	callout_drain(&sc->fw_mode_timer);
2877 
2878 	ixgbe_free_pci_resources(ctx);
2879 	free(sc->mta, M_IXGBE);
2880 
2881 	return (0);
2882 } /* ixgbe_if_detach */
2883 
2884 /************************************************************************
2885  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2886  *
2887  *   Prepare the adapter/port for LPLU and/or WoL
2888  ************************************************************************/
2889 static int
2890 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2891 {
2892 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
2893 	struct ixgbe_hw *hw = &sc->hw;
2894 	device_t        dev = iflib_get_dev(ctx);
2895 	s32             error = 0;
2896 
2897 	if (!hw->wol_enabled)
2898 		ixgbe_set_phy_power(hw, false);
2899 
2900 	/* Limit power management flow to X550EM baseT */
2901 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2902 	    hw->phy.ops.enter_lplu) {
2903 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2904 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
2905 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
2906 
2907 		/*
2908 		 * Clear Wake Up Status register to prevent any previous wakeup
2909 		 * events from waking us up immediately after we suspend.
2910 		 */
2911 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2912 
2913 		/*
2914 		 * Program the Wakeup Filter Control register with user filter
2915 		 * settings
2916 		 */
2917 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
2918 
2919 		/* Enable wakeups and power management in Wakeup Control */
2920 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2921 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2922 
2923 		/* X550EM baseT adapters need a special LPLU flow */
2924 		hw->phy.reset_disable = true;
2925 		ixgbe_if_stop(ctx);
2926 		error = hw->phy.ops.enter_lplu(hw);
2927 		if (error)
2928 			device_printf(dev, "Error entering LPLU: %d\n", error);
2929 		hw->phy.reset_disable = false;
2930 	} else {
2931 		/* Just stop for other adapters */
2932 		ixgbe_if_stop(ctx);
2933 	}
2934 
2935 	return error;
2936 } /* ixgbe_setup_low_power_mode */
2937 
2938 /************************************************************************
2939  * ixgbe_shutdown - Shutdown entry point
2940  ************************************************************************/
2941 static int
2942 ixgbe_if_shutdown(if_ctx_t ctx)
2943 {
2944 	int error = 0;
2945 
2946 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2947 
2948 	error = ixgbe_setup_low_power_mode(ctx);
2949 
2950 	return (error);
2951 } /* ixgbe_if_shutdown */
2952 
2953 /************************************************************************
2954  * ixgbe_suspend
2955  *
2956  *   From D0 to D3
2957  ************************************************************************/
2958 static int
2959 ixgbe_if_suspend(if_ctx_t ctx)
2960 {
2961 	int error = 0;
2962 
2963 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2964 
2965 	error = ixgbe_setup_low_power_mode(ctx);
2966 
2967 	return (error);
2968 } /* ixgbe_if_suspend */
2969 
2970 /************************************************************************
2971  * ixgbe_resume
2972  *
2973  *   From D3 to D0
2974  ************************************************************************/
2975 static int
2976 ixgbe_if_resume(if_ctx_t ctx)
2977 {
2978 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
2979 	device_t        dev = iflib_get_dev(ctx);
2980 	if_t            ifp = iflib_get_ifp(ctx);
2981 	struct ixgbe_hw *hw = &sc->hw;
2982 	u32             wus;
2983 
2984 	INIT_DEBUGOUT("ixgbe_resume: begin");
2985 
2986 	/* Read & clear WUS register */
2987 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2988 	if (wus)
2989 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2990 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2991 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2992 	/* And clear WUFC until next low-power transition */
2993 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2994 
2995 	/*
2996 	 * Required after D3->D0 transition;
2997 	 * will re-advertise all previous advertised speeds
2998 	 */
2999 	if (if_getflags(ifp) & IFF_UP)
3000 		ixgbe_if_init(ctx);
3001 
3002 	return (0);
3003 } /* ixgbe_if_resume */
3004 
3005 /************************************************************************
3006  * ixgbe_if_mtu_set - Ioctl mtu entry point
3007  *
3008  *   Return 0 on success, EINVAL on failure
3009  ************************************************************************/
3010 static int
3011 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3012 {
3013 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3014 	int error = 0;
3015 
3016 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3017 
3018 	if (mtu > IXGBE_MAX_MTU) {
3019 		error = EINVAL;
3020 	} else {
3021 		sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3022 	}
3023 
3024 	return error;
3025 } /* ixgbe_if_mtu_set */
3026 
3027 /************************************************************************
3028  * ixgbe_if_crcstrip_set
3029  ************************************************************************/
3030 static void
3031 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3032 {
3033 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3034 	struct ixgbe_hw *hw = &sc->hw;
3035 	/* crc stripping is set in two places:
3036 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3037 	 * IXGBE_RDRXCTL (set by the original driver in
3038 	 *	ixgbe_setup_hw_rsc() called in init_locked.
3039 	 *	We disable the setting when netmap is compiled in).
3040 	 * We update the values here, but also in ixgbe.c because
3041 	 * init_locked sometimes is called outside our control.
3042 	 */
3043 	uint32_t hl, rxc;
3044 
3045 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3046 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3047 #ifdef NETMAP
3048 	if (netmap_verbose)
3049 		D("%s read  HLREG 0x%x rxc 0x%x",
3050 			onoff ? "enter" : "exit", hl, rxc);
3051 #endif
3052 	/* hw requirements ... */
3053 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3054 	rxc |= IXGBE_RDRXCTL_RSCACKC;
3055 	if (onoff && !crcstrip) {
3056 		/* keep the crc. Fast rx */
3057 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3058 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3059 	} else {
3060 		/* reset default mode */
3061 		hl |= IXGBE_HLREG0_RXCRCSTRP;
3062 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3063 	}
3064 #ifdef NETMAP
3065 	if (netmap_verbose)
3066 		D("%s write HLREG 0x%x rxc 0x%x",
3067 			onoff ? "enter" : "exit", hl, rxc);
3068 #endif
3069 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3070 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3071 } /* ixgbe_if_crcstrip_set */
3072 
3073 /*********************************************************************
3074  * ixgbe_if_init - Init entry point
3075  *
3076  *   Used in two ways: It is used by the stack as an init
3077  *   entry point in network interface structure. It is also
3078  *   used by the driver as a hw/sw initialization routine to
3079  *   get to a consistent state.
3080  *
3081  *   Return 0 on success, positive on failure
3082  **********************************************************************/
3083 void
3084 ixgbe_if_init(if_ctx_t ctx)
3085 {
3086 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
3087 	if_t               ifp = iflib_get_ifp(ctx);
3088 	device_t           dev = iflib_get_dev(ctx);
3089 	struct ixgbe_hw *hw = &sc->hw;
3090 	struct ix_rx_queue *rx_que;
3091 	struct ix_tx_queue *tx_que;
3092 	u32             txdctl, mhadd;
3093 	u32             rxdctl, rxctrl;
3094 	u32             ctrl_ext;
3095 
3096 	int             i, j, err;
3097 
3098 	INIT_DEBUGOUT("ixgbe_if_init: begin");
3099 
3100 	/* Queue indices may change with IOV mode */
3101 	ixgbe_align_all_queue_indices(sc);
3102 
3103 	/* reprogram the RAR[0] in case user changed it. */
3104 	ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3105 
3106 	/* Get the latest mac address, User can use a LAA */
3107 	bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3108 	ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3109 	hw->addr_ctrl.rar_used_count = 1;
3110 
3111 	ixgbe_init_hw(hw);
3112 
3113 	ixgbe_initialize_iov(sc);
3114 
3115 	ixgbe_initialize_transmit_units(ctx);
3116 
3117 	/* Setup Multicast table */
3118 	ixgbe_if_multi_set(ctx);
3119 
3120 	/* Determine the correct mbuf pool, based on frame size */
3121 	sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3122 
3123 	/* Configure RX settings */
3124 	ixgbe_initialize_receive_units(ctx);
3125 
3126 	/*
3127 	 * Initialize variable holding task enqueue requests
3128 	 * from MSI-X interrupts
3129 	 */
3130 	sc->task_requests = 0;
3131 
3132 	/* Enable SDP & MSI-X interrupts based on adapter */
3133 	ixgbe_config_gpie(sc);
3134 
3135 	/* Set MTU size */
3136 	if (if_getmtu(ifp) > ETHERMTU) {
3137 		/* aka IXGBE_MAXFRS on 82599 and newer */
3138 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3139 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
3140 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3141 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3142 	}
3143 
3144 	/* Now enable all the queues */
3145 	for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
3146 		struct tx_ring *txr = &tx_que->txr;
3147 
3148 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3149 		txdctl |= IXGBE_TXDCTL_ENABLE;
3150 		/* Set WTHRESH to 8, burst writeback */
3151 		txdctl |= (8 << 16);
3152 		/*
3153 		 * When the internal queue falls below PTHRESH (32),
3154 		 * start prefetching as long as there are at least
3155 		 * HTHRESH (1) buffers ready. The values are taken
3156 		 * from the Intel linux driver 3.8.21.
3157 		 * Prefetching enables tx line rate even with 1 queue.
3158 		 */
3159 		txdctl |= (32 << 0) | (1 << 8);
3160 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3161 	}
3162 
3163 	for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
3164 		struct rx_ring *rxr = &rx_que->rxr;
3165 
3166 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3167 		if (hw->mac.type == ixgbe_mac_82598EB) {
3168 			/*
3169 			 * PTHRESH = 21
3170 			 * HTHRESH = 4
3171 			 * WTHRESH = 8
3172 			 */
3173 			rxdctl &= ~0x3FFFFF;
3174 			rxdctl |= 0x080420;
3175 		}
3176 		rxdctl |= IXGBE_RXDCTL_ENABLE;
3177 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3178 		for (j = 0; j < 10; j++) {
3179 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3180 			    IXGBE_RXDCTL_ENABLE)
3181 				break;
3182 			else
3183 				msec_delay(1);
3184 		}
3185 		wmb();
3186 	}
3187 
3188 	/* Enable Receive engine */
3189 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3190 	if (hw->mac.type == ixgbe_mac_82598EB)
3191 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3192 	rxctrl |= IXGBE_RXCTRL_RXEN;
3193 	ixgbe_enable_rx_dma(hw, rxctrl);
3194 
3195 	/* Set up MSI/MSI-X routing */
3196 	if (ixgbe_enable_msix)  {
3197 		ixgbe_configure_ivars(sc);
3198 		/* Set up auto-mask */
3199 		if (hw->mac.type == ixgbe_mac_82598EB)
3200 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3201 		else {
3202 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3203 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3204 		}
3205 	} else {  /* Simple settings for Legacy/MSI */
3206 		ixgbe_set_ivar(sc, 0, 0, 0);
3207 		ixgbe_set_ivar(sc, 0, 0, 1);
3208 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3209 	}
3210 
3211 	ixgbe_init_fdir(sc);
3212 
3213 	/*
3214 	 * Check on any SFP devices that
3215 	 * need to be kick-started
3216 	 */
3217 	if (hw->phy.type == ixgbe_phy_none) {
3218 		err = hw->phy.ops.identify(hw);
3219 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3220 			device_printf(dev,
3221 			    "Unsupported SFP+ module type was detected.\n");
3222 			return;
3223 		}
3224 	}
3225 
3226 	/* Set moderation on the Link interrupt */
3227 	IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3228 
3229 	/* Enable power to the phy. */
3230 	ixgbe_set_phy_power(hw, true);
3231 
3232 	/* Config/Enable Link */
3233 	ixgbe_config_link(ctx);
3234 
3235 	/* Hardware Packet Buffer & Flow Control setup */
3236 	ixgbe_config_delay_values(sc);
3237 
3238 	/* Initialize the FC settings */
3239 	ixgbe_start_hw(hw);
3240 
3241 	/* Set up VLAN support and filter */
3242 	ixgbe_setup_vlan_hw_support(ctx);
3243 
3244 	/* Setup DMA Coalescing */
3245 	ixgbe_config_dmac(sc);
3246 
3247 	/* And now turn on interrupts */
3248 	ixgbe_if_enable_intr(ctx);
3249 
3250 	/* Enable the use of the MBX by the VF's */
3251 	if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3252 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3253 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3254 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3255 	}
3256 
3257 } /* ixgbe_init_locked */
3258 
3259 /************************************************************************
3260  * ixgbe_set_ivar
3261  *
3262  *   Setup the correct IVAR register for a particular MSI-X interrupt
3263  *     (yes this is all very magic and confusing :)
3264  *    - entry is the register array entry
3265  *    - vector is the MSI-X vector for this queue
3266  *    - type is RX/TX/MISC
3267  ************************************************************************/
3268 static void
3269 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3270 {
3271 	struct ixgbe_hw *hw = &sc->hw;
3272 	u32 ivar, index;
3273 
3274 	vector |= IXGBE_IVAR_ALLOC_VAL;
3275 
3276 	switch (hw->mac.type) {
3277 	case ixgbe_mac_82598EB:
3278 		if (type == -1)
3279 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3280 		else
3281 			entry += (type * 64);
3282 		index = (entry >> 2) & 0x1F;
3283 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3284 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3285 		ivar |= (vector << (8 * (entry & 0x3)));
3286 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3287 		break;
3288 	case ixgbe_mac_82599EB:
3289 	case ixgbe_mac_X540:
3290 	case ixgbe_mac_X550:
3291 	case ixgbe_mac_X550EM_x:
3292 	case ixgbe_mac_X550EM_a:
3293 		if (type == -1) { /* MISC IVAR */
3294 			index = (entry & 1) * 8;
3295 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3296 			ivar &= ~(0xFF << index);
3297 			ivar |= (vector << index);
3298 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3299 		} else {          /* RX/TX IVARS */
3300 			index = (16 * (entry & 1)) + (8 * type);
3301 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3302 			ivar &= ~(0xFF << index);
3303 			ivar |= (vector << index);
3304 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3305 		}
3306 	default:
3307 		break;
3308 	}
3309 } /* ixgbe_set_ivar */
3310 
3311 /************************************************************************
3312  * ixgbe_configure_ivars
3313  ************************************************************************/
3314 static void
3315 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3316 {
3317 	struct ix_rx_queue *rx_que = sc->rx_queues;
3318 	struct ix_tx_queue *tx_que = sc->tx_queues;
3319 	u32                newitr;
3320 
3321 	if (ixgbe_max_interrupt_rate > 0)
3322 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3323 	else {
3324 		/*
3325 		 * Disable DMA coalescing if interrupt moderation is
3326 		 * disabled.
3327 		 */
3328 		sc->dmac = 0;
3329 		newitr = 0;
3330 	}
3331 
3332 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3333 		struct rx_ring *rxr = &rx_que->rxr;
3334 
3335 		/* First the RX queue entry */
3336 		ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3337 
3338 		/* Set an Initial EITR value */
3339 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3340 	}
3341 	for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3342 		struct tx_ring *txr = &tx_que->txr;
3343 
3344 		/* ... and the TX */
3345 		ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3346 	}
3347 	/* For the Link interrupt */
3348 	ixgbe_set_ivar(sc, 1, sc->vector, -1);
3349 } /* ixgbe_configure_ivars */
3350 
3351 /************************************************************************
3352  * ixgbe_config_gpie
3353  ************************************************************************/
3354 static void
3355 ixgbe_config_gpie(struct ixgbe_softc *sc)
3356 {
3357 	struct ixgbe_hw *hw = &sc->hw;
3358 	u32             gpie;
3359 
3360 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3361 
3362 	if (sc->intr_type == IFLIB_INTR_MSIX) {
3363 		/* Enable Enhanced MSI-X mode */
3364 		gpie |= IXGBE_GPIE_MSIX_MODE
3365 		     |  IXGBE_GPIE_EIAME
3366 		     |  IXGBE_GPIE_PBA_SUPPORT
3367 		     |  IXGBE_GPIE_OCD;
3368 	}
3369 
3370 	/* Fan Failure Interrupt */
3371 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3372 		gpie |= IXGBE_SDP1_GPIEN;
3373 
3374 	/* Thermal Sensor Interrupt */
3375 	if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3376 		gpie |= IXGBE_SDP0_GPIEN_X540;
3377 
3378 	/* Link detection */
3379 	switch (hw->mac.type) {
3380 	case ixgbe_mac_82599EB:
3381 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3382 		break;
3383 	case ixgbe_mac_X550EM_x:
3384 	case ixgbe_mac_X550EM_a:
3385 		gpie |= IXGBE_SDP0_GPIEN_X540;
3386 		break;
3387 	default:
3388 		break;
3389 	}
3390 
3391 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3392 
3393 } /* ixgbe_config_gpie */
3394 
3395 /************************************************************************
3396  * ixgbe_config_delay_values
3397  *
3398  *   Requires sc->max_frame_size to be set.
3399  ************************************************************************/
3400 static void
3401 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3402 {
3403 	struct ixgbe_hw *hw = &sc->hw;
3404 	u32             rxpb, frame, size, tmp;
3405 
3406 	frame = sc->max_frame_size;
3407 
3408 	/* Calculate High Water */
3409 	switch (hw->mac.type) {
3410 	case ixgbe_mac_X540:
3411 	case ixgbe_mac_X550:
3412 	case ixgbe_mac_X550EM_x:
3413 	case ixgbe_mac_X550EM_a:
3414 		tmp = IXGBE_DV_X540(frame, frame);
3415 		break;
3416 	default:
3417 		tmp = IXGBE_DV(frame, frame);
3418 		break;
3419 	}
3420 	size = IXGBE_BT2KB(tmp);
3421 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3422 	hw->fc.high_water[0] = rxpb - size;
3423 
3424 	/* Now calculate Low Water */
3425 	switch (hw->mac.type) {
3426 	case ixgbe_mac_X540:
3427 	case ixgbe_mac_X550:
3428 	case ixgbe_mac_X550EM_x:
3429 	case ixgbe_mac_X550EM_a:
3430 		tmp = IXGBE_LOW_DV_X540(frame);
3431 		break;
3432 	default:
3433 		tmp = IXGBE_LOW_DV(frame);
3434 		break;
3435 	}
3436 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3437 
3438 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3439 	hw->fc.send_xon = true;
3440 } /* ixgbe_config_delay_values */
3441 
3442 /************************************************************************
3443  * ixgbe_set_multi - Multicast Update
3444  *
3445  *   Called whenever multicast address list is updated.
3446  ************************************************************************/
3447 static u_int
3448 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3449 {
3450 	struct ixgbe_softc *sc = arg;
3451 	struct ixgbe_mc_addr *mta = sc->mta;
3452 
3453 	if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3454 		return (0);
3455 	bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3456 	mta[idx].vmdq = sc->pool;
3457 
3458 	return (1);
3459 } /* ixgbe_mc_filter_apply */
3460 
3461 static void
3462 ixgbe_if_multi_set(if_ctx_t ctx)
3463 {
3464 	struct ixgbe_softc       *sc = iflib_get_softc(ctx);
3465 	struct ixgbe_mc_addr *mta;
3466 	if_t                  ifp = iflib_get_ifp(ctx);
3467 	u8                   *update_ptr;
3468 	u32                  fctrl;
3469 	u_int		     mcnt;
3470 
3471 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3472 
3473 	mta = sc->mta;
3474 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3475 
3476 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
3477 
3478 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3479 		update_ptr = (u8 *)mta;
3480 		ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3481 		    ixgbe_mc_array_itr, true);
3482 	}
3483 
3484 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3485 
3486 	if (if_getflags(ifp) & IFF_PROMISC)
3487 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3488 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3489 	    if_getflags(ifp) & IFF_ALLMULTI) {
3490 		fctrl |= IXGBE_FCTRL_MPE;
3491 		fctrl &= ~IXGBE_FCTRL_UPE;
3492 	} else
3493 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3494 
3495 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3496 } /* ixgbe_if_multi_set */
3497 
3498 /************************************************************************
3499  * ixgbe_mc_array_itr
3500  *
3501  *   An iterator function needed by the multicast shared code.
3502  *   It feeds the shared code routine the addresses in the
3503  *   array of ixgbe_set_multi() one by one.
3504  ************************************************************************/
3505 static u8 *
3506 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3507 {
3508 	struct ixgbe_mc_addr *mta;
3509 
3510 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3511 	*vmdq = mta->vmdq;
3512 
3513 	*update_ptr = (u8*)(mta + 1);
3514 
3515 	return (mta->addr);
3516 } /* ixgbe_mc_array_itr */
3517 
3518 /************************************************************************
3519  * ixgbe_local_timer - Timer routine
3520  *
3521  *   Checks for link status, updates statistics,
3522  *   and runs the watchdog check.
3523  ************************************************************************/
3524 static void
3525 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3526 {
3527 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3528 
3529 	if (qid != 0)
3530 		return;
3531 
3532 	/* Check for pluggable optics */
3533 	if (sc->sfp_probe)
3534 		if (!ixgbe_sfp_probe(ctx))
3535 			return; /* Nothing to do */
3536 
3537 	ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3538 
3539 	/* Fire off the adminq task */
3540 	iflib_admin_intr_deferred(ctx);
3541 
3542 } /* ixgbe_if_timer */
3543 
3544 /************************************************************************
3545  * ixgbe_fw_mode_timer - FW mode timer routine
3546  ************************************************************************/
3547 static void
3548 ixgbe_fw_mode_timer(void *arg)
3549 {
3550 	struct ixgbe_softc *sc = arg;
3551 	struct ixgbe_hw *hw = &sc->hw;
3552 
3553 	if (ixgbe_fw_recovery_mode(hw)) {
3554 		if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
3555 			/* Firmware error detected, entering recovery mode */
3556 			device_printf(sc->dev, "Firmware recovery mode detected. Limiting"
3557 			    " functionality. Refer to the Intel(R) Ethernet Adapters"
3558 			    " and Devices User Guide for details on firmware recovery"
3559 			    " mode.\n");
3560 
3561 			if (hw->adapter_stopped == FALSE)
3562 				ixgbe_if_stop(sc->ctx);
3563 		}
3564 	} else
3565 		atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
3566 
3567 
3568 	callout_reset(&sc->fw_mode_timer, hz,
3569 	    ixgbe_fw_mode_timer, sc);
3570 } /* ixgbe_fw_mode_timer */
3571 
3572 /************************************************************************
3573  * ixgbe_sfp_probe
3574  *
3575  *   Determine if a port had optics inserted.
3576  ************************************************************************/
3577 static bool
3578 ixgbe_sfp_probe(if_ctx_t ctx)
3579 {
3580 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3581 	struct ixgbe_hw *hw = &sc->hw;
3582 	device_t        dev = iflib_get_dev(ctx);
3583 	bool            result = false;
3584 
3585 	if ((hw->phy.type == ixgbe_phy_nl) &&
3586 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3587 		s32 ret = hw->phy.ops.identify_sfp(hw);
3588 		if (ret)
3589 			goto out;
3590 		ret = hw->phy.ops.reset(hw);
3591 		sc->sfp_probe = false;
3592 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3593 			device_printf(dev, "Unsupported SFP+ module detected!");
3594 			device_printf(dev,
3595 			    "Reload driver with supported module.\n");
3596 			goto out;
3597 		} else
3598 			device_printf(dev, "SFP+ module detected!\n");
3599 		/* We now have supported optics */
3600 		result = true;
3601 	}
3602 out:
3603 
3604 	return (result);
3605 } /* ixgbe_sfp_probe */
3606 
3607 /************************************************************************
3608  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3609  ************************************************************************/
3610 static void
3611 ixgbe_handle_mod(void *context)
3612 {
3613 	if_ctx_t        ctx = context;
3614 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3615 	struct ixgbe_hw *hw = &sc->hw;
3616 	device_t        dev = iflib_get_dev(ctx);
3617 	u32             err, cage_full = 0;
3618 
3619 	if (sc->hw.need_crosstalk_fix) {
3620 		switch (hw->mac.type) {
3621 		case ixgbe_mac_82599EB:
3622 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3623 			    IXGBE_ESDP_SDP2;
3624 			break;
3625 		case ixgbe_mac_X550EM_x:
3626 		case ixgbe_mac_X550EM_a:
3627 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3628 			    IXGBE_ESDP_SDP0;
3629 			break;
3630 		default:
3631 			break;
3632 		}
3633 
3634 		if (!cage_full)
3635 			goto handle_mod_out;
3636 	}
3637 
3638 	err = hw->phy.ops.identify_sfp(hw);
3639 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3640 		device_printf(dev,
3641 		    "Unsupported SFP+ module type was detected.\n");
3642 		goto handle_mod_out;
3643 	}
3644 
3645 	if (hw->mac.type == ixgbe_mac_82598EB)
3646 		err = hw->phy.ops.reset(hw);
3647 	else
3648 		err = hw->mac.ops.setup_sfp(hw);
3649 
3650 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3651 		device_printf(dev,
3652 		    "Setup failure - unsupported SFP+ module type.\n");
3653 		goto handle_mod_out;
3654 	}
3655 	sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3656 	return;
3657 
3658 handle_mod_out:
3659 	sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3660 } /* ixgbe_handle_mod */
3661 
3662 
3663 /************************************************************************
3664  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3665  ************************************************************************/
3666 static void
3667 ixgbe_handle_msf(void *context)
3668 {
3669 	if_ctx_t        ctx = context;
3670 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3671 	struct ixgbe_hw *hw = &sc->hw;
3672 	u32             autoneg;
3673 	bool            negotiate;
3674 
3675 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3676 	sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3677 
3678 	autoneg = hw->phy.autoneg_advertised;
3679 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3680 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3681 	if (hw->mac.ops.setup_link)
3682 		hw->mac.ops.setup_link(hw, autoneg, true);
3683 
3684 	/* Adjust media types shown in ifconfig */
3685 	ifmedia_removeall(sc->media);
3686 	ixgbe_add_media_types(sc->ctx);
3687 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3688 } /* ixgbe_handle_msf */
3689 
3690 /************************************************************************
3691  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3692  ************************************************************************/
3693 static void
3694 ixgbe_handle_phy(void *context)
3695 {
3696 	if_ctx_t        ctx = context;
3697 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3698 	struct ixgbe_hw *hw = &sc->hw;
3699 	int             error;
3700 
3701 	error = hw->phy.ops.handle_lasi(hw);
3702 	if (error == IXGBE_ERR_OVERTEMP)
3703 		device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3704 	else if (error)
3705 		device_printf(sc->dev,
3706 		    "Error handling LASI interrupt: %d\n", error);
3707 } /* ixgbe_handle_phy */
3708 
3709 /************************************************************************
3710  * ixgbe_if_stop - Stop the hardware
3711  *
3712  *   Disables all traffic on the adapter by issuing a
3713  *   global reset on the MAC and deallocates TX/RX buffers.
3714  ************************************************************************/
3715 static void
3716 ixgbe_if_stop(if_ctx_t ctx)
3717 {
3718 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
3719 	struct ixgbe_hw *hw = &sc->hw;
3720 
3721 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3722 
3723 	ixgbe_reset_hw(hw);
3724 	hw->adapter_stopped = false;
3725 	ixgbe_stop_adapter(hw);
3726 	if (hw->mac.type == ixgbe_mac_82599EB)
3727 		ixgbe_stop_mac_link_on_d3_82599(hw);
3728 	/* Turn off the laser - noop with no optics */
3729 	ixgbe_disable_tx_laser(hw);
3730 
3731 	/* Update the stack */
3732 	sc->link_up = false;
3733 	ixgbe_if_update_admin_status(ctx);
3734 
3735 	/* reprogram the RAR[0] in case user changed it. */
3736 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3737 
3738 	return;
3739 } /* ixgbe_if_stop */
3740 
3741 /************************************************************************
3742  * ixgbe_update_link_status - Update OS on link state
3743  *
3744  * Note: Only updates the OS on the cached link state.
3745  *       The real check of the hardware only happens with
3746  *       a link interrupt.
3747  ************************************************************************/
3748 static void
3749 ixgbe_if_update_admin_status(if_ctx_t ctx)
3750 {
3751 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3752 	device_t       dev = iflib_get_dev(ctx);
3753 
3754 	if (sc->link_up) {
3755 		if (sc->link_active == false) {
3756 			if (bootverbose)
3757 				device_printf(dev, "Link is up %d Gbps %s \n",
3758 				    ((sc->link_speed == 128) ? 10 : 1),
3759 				    "Full Duplex");
3760 			sc->link_active = true;
3761 			/* Update any Flow Control changes */
3762 			ixgbe_fc_enable(&sc->hw);
3763 			/* Update DMA coalescing config */
3764 			ixgbe_config_dmac(sc);
3765 			iflib_link_state_change(ctx, LINK_STATE_UP,
3766 			    ixgbe_link_speed_to_baudrate(sc->link_speed));
3767 
3768 			if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3769 				ixgbe_ping_all_vfs(sc);
3770 		}
3771 	} else { /* Link down */
3772 		if (sc->link_active == true) {
3773 			if (bootverbose)
3774 				device_printf(dev, "Link is Down\n");
3775 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3776 			sc->link_active = false;
3777 			if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3778 				ixgbe_ping_all_vfs(sc);
3779 		}
3780 	}
3781 
3782 	/* Handle task requests from msix_link() */
3783 	if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3784 		ixgbe_handle_mod(ctx);
3785 	if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3786 		ixgbe_handle_msf(ctx);
3787 	if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3788 		ixgbe_handle_mbx(ctx);
3789 	if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3790 		ixgbe_reinit_fdir(ctx);
3791 	if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3792 		ixgbe_handle_phy(ctx);
3793 	sc->task_requests = 0;
3794 
3795 	ixgbe_update_stats_counters(sc);
3796 } /* ixgbe_if_update_admin_status */
3797 
3798 /************************************************************************
3799  * ixgbe_config_dmac - Configure DMA Coalescing
3800  ************************************************************************/
3801 static void
3802 ixgbe_config_dmac(struct ixgbe_softc *sc)
3803 {
3804 	struct ixgbe_hw          *hw = &sc->hw;
3805 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3806 
3807 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3808 		return;
3809 
3810 	if (dcfg->watchdog_timer ^ sc->dmac ||
3811 	    dcfg->link_speed ^ sc->link_speed) {
3812 		dcfg->watchdog_timer = sc->dmac;
3813 		dcfg->fcoe_en = false;
3814 		dcfg->link_speed = sc->link_speed;
3815 		dcfg->num_tcs = 1;
3816 
3817 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3818 		    dcfg->watchdog_timer, dcfg->link_speed);
3819 
3820 		hw->mac.ops.dmac_config(hw);
3821 	}
3822 } /* ixgbe_config_dmac */
3823 
3824 /************************************************************************
3825  * ixgbe_if_enable_intr
3826  ************************************************************************/
3827 void
3828 ixgbe_if_enable_intr(if_ctx_t ctx)
3829 {
3830 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
3831 	struct ixgbe_hw    *hw = &sc->hw;
3832 	struct ix_rx_queue *que = sc->rx_queues;
3833 	u32                mask, fwsm;
3834 
3835 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3836 
3837 	switch (sc->hw.mac.type) {
3838 	case ixgbe_mac_82599EB:
3839 		mask |= IXGBE_EIMS_ECC;
3840 		/* Temperature sensor on some scs */
3841 		mask |= IXGBE_EIMS_GPI_SDP0;
3842 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3843 		mask |= IXGBE_EIMS_GPI_SDP1;
3844 		mask |= IXGBE_EIMS_GPI_SDP2;
3845 		break;
3846 	case ixgbe_mac_X540:
3847 		/* Detect if Thermal Sensor is enabled */
3848 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3849 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3850 			mask |= IXGBE_EIMS_TS;
3851 		mask |= IXGBE_EIMS_ECC;
3852 		break;
3853 	case ixgbe_mac_X550:
3854 		/* MAC thermal sensor is automatically enabled */
3855 		mask |= IXGBE_EIMS_TS;
3856 		mask |= IXGBE_EIMS_ECC;
3857 		break;
3858 	case ixgbe_mac_X550EM_x:
3859 	case ixgbe_mac_X550EM_a:
3860 		/* Some devices use SDP0 for important information */
3861 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3862 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3863 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3864 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3865 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3866 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3867 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3868 		mask |= IXGBE_EIMS_ECC;
3869 		break;
3870 	default:
3871 		break;
3872 	}
3873 
3874 	/* Enable Fan Failure detection */
3875 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3876 		mask |= IXGBE_EIMS_GPI_SDP1;
3877 	/* Enable SR-IOV */
3878 	if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3879 		mask |= IXGBE_EIMS_MAILBOX;
3880 	/* Enable Flow Director */
3881 	if (sc->feat_en & IXGBE_FEATURE_FDIR)
3882 		mask |= IXGBE_EIMS_FLOW_DIR;
3883 
3884 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3885 
3886 	/* With MSI-X we use auto clear */
3887 	if (sc->intr_type == IFLIB_INTR_MSIX) {
3888 		mask = IXGBE_EIMS_ENABLE_MASK;
3889 		/* Don't autoclear Link */
3890 		mask &= ~IXGBE_EIMS_OTHER;
3891 		mask &= ~IXGBE_EIMS_LSC;
3892 		if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
3893 			mask &= ~IXGBE_EIMS_MAILBOX;
3894 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3895 	}
3896 
3897 	/*
3898 	 * Now enable all queues, this is done separately to
3899 	 * allow for handling the extended (beyond 32) MSI-X
3900 	 * vectors that can be used by 82599
3901 	 */
3902 	for (int i = 0; i < sc->num_rx_queues; i++, que++)
3903 		ixgbe_enable_queue(sc, que->msix);
3904 
3905 	IXGBE_WRITE_FLUSH(hw);
3906 
3907 } /* ixgbe_if_enable_intr */
3908 
3909 /************************************************************************
3910  * ixgbe_disable_intr
3911  ************************************************************************/
3912 static void
3913 ixgbe_if_disable_intr(if_ctx_t ctx)
3914 {
3915 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3916 
3917 	if (sc->intr_type == IFLIB_INTR_MSIX)
3918 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3919 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3920 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3921 	} else {
3922 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3923 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3924 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3925 	}
3926 	IXGBE_WRITE_FLUSH(&sc->hw);
3927 
3928 } /* ixgbe_if_disable_intr */
3929 
3930 /************************************************************************
3931  * ixgbe_link_intr_enable
3932  ************************************************************************/
3933 static void
3934 ixgbe_link_intr_enable(if_ctx_t ctx)
3935 {
3936 	struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
3937 
3938 	/* Re-enable other interrupts */
3939 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3940 } /* ixgbe_link_intr_enable */
3941 
3942 /************************************************************************
3943  * ixgbe_if_rx_queue_intr_enable
3944  ************************************************************************/
3945 static int
3946 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3947 {
3948 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
3949 	struct ix_rx_queue *que = &sc->rx_queues[rxqid];
3950 
3951 	ixgbe_enable_queue(sc, que->msix);
3952 
3953 	return (0);
3954 } /* ixgbe_if_rx_queue_intr_enable */
3955 
3956 /************************************************************************
3957  * ixgbe_enable_queue
3958  ************************************************************************/
3959 static void
3960 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
3961 {
3962 	struct ixgbe_hw *hw = &sc->hw;
3963 	u64             queue = 1ULL << vector;
3964 	u32             mask;
3965 
3966 	if (hw->mac.type == ixgbe_mac_82598EB) {
3967 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3968 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3969 	} else {
3970 		mask = (queue & 0xFFFFFFFF);
3971 		if (mask)
3972 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3973 		mask = (queue >> 32);
3974 		if (mask)
3975 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3976 	}
3977 } /* ixgbe_enable_queue */
3978 
3979 /************************************************************************
3980  * ixgbe_disable_queue
3981  ************************************************************************/
3982 static void
3983 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
3984 {
3985 	struct ixgbe_hw *hw = &sc->hw;
3986 	u64             queue = 1ULL << vector;
3987 	u32             mask;
3988 
3989 	if (hw->mac.type == ixgbe_mac_82598EB) {
3990 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3991 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3992 	} else {
3993 		mask = (queue & 0xFFFFFFFF);
3994 		if (mask)
3995 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3996 		mask = (queue >> 32);
3997 		if (mask)
3998 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3999 	}
4000 } /* ixgbe_disable_queue */
4001 
4002 /************************************************************************
4003  * ixgbe_intr - Legacy Interrupt Service Routine
4004  ************************************************************************/
4005 int
4006 ixgbe_intr(void *arg)
4007 {
4008 	struct ixgbe_softc     *sc = arg;
4009 	struct ix_rx_queue *que = sc->rx_queues;
4010 	struct ixgbe_hw    *hw = &sc->hw;
4011 	if_ctx_t           ctx = sc->ctx;
4012 	u32                eicr, eicr_mask;
4013 
4014 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4015 
4016 	++que->irqs;
4017 	if (eicr == 0) {
4018 		ixgbe_if_enable_intr(ctx);
4019 		return (FILTER_HANDLED);
4020 	}
4021 
4022 	/* Check for fan failure */
4023 	if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4024 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
4025 		device_printf(sc->dev,
4026 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4027 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4028 	}
4029 
4030 	/* Link status change */
4031 	if (eicr & IXGBE_EICR_LSC) {
4032 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4033 		iflib_admin_intr_deferred(ctx);
4034 	}
4035 
4036 	if (ixgbe_is_sfp(hw)) {
4037 		/* Pluggable optics-related interrupt */
4038 		if (hw->mac.type >= ixgbe_mac_X540)
4039 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4040 		else
4041 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4042 
4043 		if (eicr & eicr_mask) {
4044 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4045 			sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4046 		}
4047 
4048 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
4049 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4050 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
4051 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4052 			sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4053 		}
4054 	}
4055 
4056 	/* External PHY interrupt */
4057 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4058 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
4059 		sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4060 
4061 	return (FILTER_SCHEDULE_THREAD);
4062 } /* ixgbe_intr */
4063 
4064 /************************************************************************
4065  * ixgbe_free_pci_resources
4066  ************************************************************************/
4067 static void
4068 ixgbe_free_pci_resources(if_ctx_t ctx)
4069 {
4070 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4071 	struct         ix_rx_queue *que = sc->rx_queues;
4072 	device_t       dev = iflib_get_dev(ctx);
4073 
4074 	/* Release all MSI-X queue resources */
4075 	if (sc->intr_type == IFLIB_INTR_MSIX)
4076 		iflib_irq_free(ctx, &sc->irq);
4077 
4078 	if (que != NULL) {
4079 		for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4080 			iflib_irq_free(ctx, &que->que_irq);
4081 		}
4082 	}
4083 
4084 	if (sc->pci_mem != NULL)
4085 		bus_release_resource(dev, SYS_RES_MEMORY,
4086 		    rman_get_rid(sc->pci_mem), sc->pci_mem);
4087 } /* ixgbe_free_pci_resources */
4088 
4089 /************************************************************************
4090  * ixgbe_sysctl_flowcntl
4091  *
4092  *   SYSCTL wrapper around setting Flow Control
4093  ************************************************************************/
4094 static int
4095 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4096 {
4097 	struct ixgbe_softc *sc;
4098 	int            error, fc;
4099 
4100 	sc = (struct ixgbe_softc *)arg1;
4101 	fc = sc->hw.fc.current_mode;
4102 
4103 	error = sysctl_handle_int(oidp, &fc, 0, req);
4104 	if ((error) || (req->newptr == NULL))
4105 		return (error);
4106 
4107 	/* Don't bother if it's not changed */
4108 	if (fc == sc->hw.fc.current_mode)
4109 		return (0);
4110 
4111 	return ixgbe_set_flowcntl(sc, fc);
4112 } /* ixgbe_sysctl_flowcntl */
4113 
4114 /************************************************************************
4115  * ixgbe_set_flowcntl - Set flow control
4116  *
4117  *   Flow control values:
4118  *     0 - off
4119  *     1 - rx pause
4120  *     2 - tx pause
4121  *     3 - full
4122  ************************************************************************/
4123 static int
4124 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4125 {
4126 	switch (fc) {
4127 	case ixgbe_fc_rx_pause:
4128 	case ixgbe_fc_tx_pause:
4129 	case ixgbe_fc_full:
4130 		sc->hw.fc.requested_mode = fc;
4131 		if (sc->num_rx_queues > 1)
4132 			ixgbe_disable_rx_drop(sc);
4133 		break;
4134 	case ixgbe_fc_none:
4135 		sc->hw.fc.requested_mode = ixgbe_fc_none;
4136 		if (sc->num_rx_queues > 1)
4137 			ixgbe_enable_rx_drop(sc);
4138 		break;
4139 	default:
4140 		return (EINVAL);
4141 	}
4142 
4143 	/* Don't autoneg if forcing a value */
4144 	sc->hw.fc.disable_fc_autoneg = true;
4145 	ixgbe_fc_enable(&sc->hw);
4146 
4147 	return (0);
4148 } /* ixgbe_set_flowcntl */
4149 
4150 /************************************************************************
4151  * ixgbe_enable_rx_drop
4152  *
4153  *   Enable the hardware to drop packets when the buffer is
4154  *   full. This is useful with multiqueue, so that no single
4155  *   queue being full stalls the entire RX engine. We only
4156  *   enable this when Multiqueue is enabled AND Flow Control
4157  *   is disabled.
4158  ************************************************************************/
4159 static void
4160 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4161 {
4162 	struct ixgbe_hw *hw = &sc->hw;
4163 	struct rx_ring  *rxr;
4164 	u32             srrctl;
4165 
4166 	for (int i = 0; i < sc->num_rx_queues; i++) {
4167 		rxr = &sc->rx_queues[i].rxr;
4168 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4169 		srrctl |= IXGBE_SRRCTL_DROP_EN;
4170 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4171 	}
4172 
4173 	/* enable drop for each vf */
4174 	for (int i = 0; i < sc->num_vfs; i++) {
4175 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4176 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4177 		                IXGBE_QDE_ENABLE));
4178 	}
4179 } /* ixgbe_enable_rx_drop */
4180 
4181 /************************************************************************
4182  * ixgbe_disable_rx_drop
4183  ************************************************************************/
4184 static void
4185 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4186 {
4187 	struct ixgbe_hw *hw = &sc->hw;
4188 	struct rx_ring  *rxr;
4189 	u32             srrctl;
4190 
4191 	for (int i = 0; i < sc->num_rx_queues; i++) {
4192 		rxr = &sc->rx_queues[i].rxr;
4193 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4194 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4195 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4196 	}
4197 
4198 	/* disable drop for each vf */
4199 	for (int i = 0; i < sc->num_vfs; i++) {
4200 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4201 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4202 	}
4203 } /* ixgbe_disable_rx_drop */
4204 
4205 /************************************************************************
4206  * ixgbe_sysctl_advertise
4207  *
4208  *   SYSCTL wrapper around setting advertised speed
4209  ************************************************************************/
4210 static int
4211 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4212 {
4213 	struct ixgbe_softc *sc;
4214 	int            error, advertise;
4215 
4216 	sc = (struct ixgbe_softc *)arg1;
4217 	if (atomic_load_acq_int(&sc->recovery_mode))
4218 		return (EPERM);
4219 
4220 	advertise = sc->advertise;
4221 
4222 	error = sysctl_handle_int(oidp, &advertise, 0, req);
4223 	if ((error) || (req->newptr == NULL))
4224 		return (error);
4225 
4226 	return ixgbe_set_advertise(sc, advertise);
4227 } /* ixgbe_sysctl_advertise */
4228 
4229 /************************************************************************
4230  * ixgbe_set_advertise - Control advertised link speed
4231  *
4232  *   Flags:
4233  *     0x1  - advertise 100 Mb
4234  *     0x2  - advertise 1G
4235  *     0x4  - advertise 10G
4236  *     0x8  - advertise 10 Mb (yes, Mb)
4237  *     0x10 - advertise 2.5G (disabled by default)
4238  *     0x20 - advertise 5G (disabled by default)
4239  *
4240  ************************************************************************/
4241 static int
4242 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4243 {
4244 	device_t         dev = iflib_get_dev(sc->ctx);
4245 	struct ixgbe_hw  *hw;
4246 	ixgbe_link_speed speed = 0;
4247 	ixgbe_link_speed link_caps = 0;
4248 	s32              err = IXGBE_NOT_IMPLEMENTED;
4249 	bool             negotiate = false;
4250 
4251 	/* Checks to validate new value */
4252 	if (sc->advertise == advertise) /* no change */
4253 		return (0);
4254 
4255 	hw = &sc->hw;
4256 
4257 	/* No speed changes for backplane media */
4258 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4259 		return (ENODEV);
4260 
4261 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4262 	      (hw->phy.multispeed_fiber))) {
4263 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4264 		return (EINVAL);
4265 	}
4266 
4267 	if (advertise < 0x1 || advertise > 0x3F) {
4268 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n");
4269 		return (EINVAL);
4270 	}
4271 
4272 	if (hw->mac.ops.get_link_capabilities) {
4273 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4274 		    &negotiate);
4275 		if (err != IXGBE_SUCCESS) {
4276 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4277 			return (ENODEV);
4278 		}
4279 	}
4280 
4281 	/* Set new value and report new advertised mode */
4282 	if (advertise & 0x1) {
4283 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4284 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4285 			return (EINVAL);
4286 		}
4287 		speed |= IXGBE_LINK_SPEED_100_FULL;
4288 	}
4289 	if (advertise & 0x2) {
4290 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4291 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4292 			return (EINVAL);
4293 		}
4294 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4295 	}
4296 	if (advertise & 0x4) {
4297 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4298 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4299 			return (EINVAL);
4300 		}
4301 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4302 	}
4303 	if (advertise & 0x8) {
4304 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4305 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4306 			return (EINVAL);
4307 		}
4308 		speed |= IXGBE_LINK_SPEED_10_FULL;
4309 	}
4310 	if (advertise & 0x10) {
4311 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4312 			device_printf(dev, "Interface does not support 2.5G advertised speed\n");
4313 			return (EINVAL);
4314 		}
4315 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4316 	}
4317 	if (advertise & 0x20) {
4318 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4319 			device_printf(dev, "Interface does not support 5G advertised speed\n");
4320 			return (EINVAL);
4321 		}
4322 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
4323 	}
4324 
4325 	hw->mac.autotry_restart = true;
4326 	hw->mac.ops.setup_link(hw, speed, true);
4327 	sc->advertise = advertise;
4328 
4329 	return (0);
4330 } /* ixgbe_set_advertise */
4331 
4332 /************************************************************************
4333  * ixgbe_get_default_advertise - Get default advertised speed settings
4334  *
4335  *   Formatted for sysctl usage.
4336  *   Flags:
4337  *     0x1 - advertise 100 Mb
4338  *     0x2 - advertise 1G
4339  *     0x4 - advertise 10G
4340  *     0x8 - advertise 10 Mb (yes, Mb)
4341  *     0x10 - advertise 2.5G (disabled by default)
4342  *     0x20 - advertise 5G (disabled by default)
4343  ************************************************************************/
4344 static int
4345 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4346 {
4347 	struct ixgbe_hw  *hw = &sc->hw;
4348 	int              speed;
4349 	ixgbe_link_speed link_caps = 0;
4350 	s32              err;
4351 	bool             negotiate = false;
4352 
4353 	/*
4354 	 * Advertised speed means nothing unless it's copper or
4355 	 * multi-speed fiber
4356 	 */
4357 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4358 	    !(hw->phy.multispeed_fiber))
4359 		return (0);
4360 
4361 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4362 	if (err != IXGBE_SUCCESS)
4363 		return (0);
4364 
4365 	if (hw->mac.type == ixgbe_mac_X550) {
4366 		/*
4367 		 * 2.5G and 5G autonegotiation speeds on X550
4368 		 * are disabled by default due to reported
4369 		 * interoperability issues with some switches.
4370 		 */
4371 		link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4372 		    IXGBE_LINK_SPEED_5GB_FULL);
4373 	}
4374 
4375 	speed =
4376 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x4  : 0) |
4377 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0) |
4378 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4379 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x2  : 0) |
4380 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x1  : 0) |
4381 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x8  : 0);
4382 
4383 	return speed;
4384 } /* ixgbe_get_default_advertise */
4385 
4386 /************************************************************************
4387  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4388  *
4389  *   Control values:
4390  *     0/1 - off / on (use default value of 1000)
4391  *
4392  *     Legal timer values are:
4393  *     50,100,250,500,1000,2000,5000,10000
4394  *
4395  *     Turning off interrupt moderation will also turn this off.
4396  ************************************************************************/
4397 static int
4398 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4399 {
4400 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4401 	if_t           ifp = iflib_get_ifp(sc->ctx);
4402 	int            error;
4403 	u16            newval;
4404 
4405 	newval = sc->dmac;
4406 	error = sysctl_handle_16(oidp, &newval, 0, req);
4407 	if ((error) || (req->newptr == NULL))
4408 		return (error);
4409 
4410 	switch (newval) {
4411 	case 0:
4412 		/* Disabled */
4413 		sc->dmac = 0;
4414 		break;
4415 	case 1:
4416 		/* Enable and use default */
4417 		sc->dmac = 1000;
4418 		break;
4419 	case 50:
4420 	case 100:
4421 	case 250:
4422 	case 500:
4423 	case 1000:
4424 	case 2000:
4425 	case 5000:
4426 	case 10000:
4427 		/* Legal values - allow */
4428 		sc->dmac = newval;
4429 		break;
4430 	default:
4431 		/* Do nothing, illegal value */
4432 		return (EINVAL);
4433 	}
4434 
4435 	/* Re-initialize hardware if it's already running */
4436 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
4437 		if_init(ifp, ifp);
4438 
4439 	return (0);
4440 } /* ixgbe_sysctl_dmac */
4441 
4442 #ifdef IXGBE_DEBUG
4443 /************************************************************************
4444  * ixgbe_sysctl_power_state
4445  *
4446  *   Sysctl to test power states
4447  *   Values:
4448  *     0      - set device to D0
4449  *     3      - set device to D3
4450  *     (none) - get current device power state
4451  ************************************************************************/
4452 static int
4453 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4454 {
4455 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4456 	device_t       dev = sc->dev;
4457 	int            curr_ps, new_ps, error = 0;
4458 
4459 	curr_ps = new_ps = pci_get_powerstate(dev);
4460 
4461 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4462 	if ((error) || (req->newptr == NULL))
4463 		return (error);
4464 
4465 	if (new_ps == curr_ps)
4466 		return (0);
4467 
4468 	if (new_ps == 3 && curr_ps == 0)
4469 		error = DEVICE_SUSPEND(dev);
4470 	else if (new_ps == 0 && curr_ps == 3)
4471 		error = DEVICE_RESUME(dev);
4472 	else
4473 		return (EINVAL);
4474 
4475 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4476 
4477 	return (error);
4478 } /* ixgbe_sysctl_power_state */
4479 #endif
4480 
4481 /************************************************************************
4482  * ixgbe_sysctl_wol_enable
4483  *
4484  *   Sysctl to enable/disable the WoL capability,
4485  *   if supported by the adapter.
4486  *
4487  *   Values:
4488  *     0 - disabled
4489  *     1 - enabled
4490  ************************************************************************/
4491 static int
4492 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4493 {
4494 	struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4495 	struct ixgbe_hw *hw = &sc->hw;
4496 	int             new_wol_enabled;
4497 	int             error = 0;
4498 
4499 	new_wol_enabled = hw->wol_enabled;
4500 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4501 	if ((error) || (req->newptr == NULL))
4502 		return (error);
4503 	new_wol_enabled = !!(new_wol_enabled);
4504 	if (new_wol_enabled == hw->wol_enabled)
4505 		return (0);
4506 
4507 	if (new_wol_enabled > 0 && !sc->wol_support)
4508 		return (ENODEV);
4509 	else
4510 		hw->wol_enabled = new_wol_enabled;
4511 
4512 	return (0);
4513 } /* ixgbe_sysctl_wol_enable */
4514 
4515 /************************************************************************
4516  * ixgbe_sysctl_wufc - Wake Up Filter Control
4517  *
4518  *   Sysctl to enable/disable the types of packets that the
4519  *   adapter will wake up on upon receipt.
4520  *   Flags:
4521  *     0x1  - Link Status Change
4522  *     0x2  - Magic Packet
4523  *     0x4  - Direct Exact
4524  *     0x8  - Directed Multicast
4525  *     0x10 - Broadcast
4526  *     0x20 - ARP/IPv4 Request Packet
4527  *     0x40 - Direct IPv4 Packet
4528  *     0x80 - Direct IPv6 Packet
4529  *
4530  *   Settings not listed above will cause the sysctl to return an error.
4531  ************************************************************************/
4532 static int
4533 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4534 {
4535 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4536 	int            error = 0;
4537 	u32            new_wufc;
4538 
4539 	new_wufc = sc->wufc;
4540 
4541 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4542 	if ((error) || (req->newptr == NULL))
4543 		return (error);
4544 	if (new_wufc == sc->wufc)
4545 		return (0);
4546 
4547 	if (new_wufc & 0xffffff00)
4548 		return (EINVAL);
4549 
4550 	new_wufc &= 0xff;
4551 	new_wufc |= (0xffffff & sc->wufc);
4552 	sc->wufc = new_wufc;
4553 
4554 	return (0);
4555 } /* ixgbe_sysctl_wufc */
4556 
4557 #ifdef IXGBE_DEBUG
4558 /************************************************************************
4559  * ixgbe_sysctl_print_rss_config
4560  ************************************************************************/
4561 static int
4562 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4563 {
4564 	struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4565 	struct ixgbe_hw *hw = &sc->hw;
4566 	device_t        dev = sc->dev;
4567 	struct sbuf     *buf;
4568 	int             error = 0, reta_size;
4569 	u32             reg;
4570 
4571 	if (atomic_load_acq_int(&sc->recovery_mode))
4572 		return (EPERM);
4573 
4574 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4575 	if (!buf) {
4576 		device_printf(dev, "Could not allocate sbuf for output.\n");
4577 		return (ENOMEM);
4578 	}
4579 
4580 	// TODO: use sbufs to make a string to print out
4581 	/* Set multiplier for RETA setup and table size based on MAC */
4582 	switch (sc->hw.mac.type) {
4583 	case ixgbe_mac_X550:
4584 	case ixgbe_mac_X550EM_x:
4585 	case ixgbe_mac_X550EM_a:
4586 		reta_size = 128;
4587 		break;
4588 	default:
4589 		reta_size = 32;
4590 		break;
4591 	}
4592 
4593 	/* Print out the redirection table */
4594 	sbuf_cat(buf, "\n");
4595 	for (int i = 0; i < reta_size; i++) {
4596 		if (i < 32) {
4597 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4598 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4599 		} else {
4600 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4601 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4602 		}
4603 	}
4604 
4605 	// TODO: print more config
4606 
4607 	error = sbuf_finish(buf);
4608 	if (error)
4609 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4610 
4611 	sbuf_delete(buf);
4612 
4613 	return (0);
4614 } /* ixgbe_sysctl_print_rss_config */
4615 #endif /* IXGBE_DEBUG */
4616 
4617 /************************************************************************
4618  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4619  *
4620  *   For X552/X557-AT devices using an external PHY
4621  ************************************************************************/
4622 static int
4623 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4624 {
4625 	struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4626 	struct ixgbe_hw *hw = &sc->hw;
4627 	u16             reg;
4628 
4629 	if (atomic_load_acq_int(&sc->recovery_mode))
4630 		return (EPERM);
4631 
4632 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4633 		device_printf(iflib_get_dev(sc->ctx),
4634 		    "Device has no supported external thermal sensor.\n");
4635 		return (ENODEV);
4636 	}
4637 
4638 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4639 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4640 		device_printf(iflib_get_dev(sc->ctx),
4641 		    "Error reading from PHY's current temperature register\n");
4642 		return (EAGAIN);
4643 	}
4644 
4645 	/* Shift temp for output */
4646 	reg = reg >> 8;
4647 
4648 	return (sysctl_handle_16(oidp, NULL, reg, req));
4649 } /* ixgbe_sysctl_phy_temp */
4650 
4651 /************************************************************************
4652  * ixgbe_sysctl_phy_overtemp_occurred
4653  *
4654  *   Reports (directly from the PHY) whether the current PHY
4655  *   temperature is over the overtemp threshold.
4656  ************************************************************************/
4657 static int
4658 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4659 {
4660 	struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4661 	struct ixgbe_hw *hw = &sc->hw;
4662 	u16             reg;
4663 
4664 	if (atomic_load_acq_int(&sc->recovery_mode))
4665 		return (EPERM);
4666 
4667 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4668 		device_printf(iflib_get_dev(sc->ctx),
4669 		    "Device has no supported external thermal sensor.\n");
4670 		return (ENODEV);
4671 	}
4672 
4673 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4674 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4675 		device_printf(iflib_get_dev(sc->ctx),
4676 		    "Error reading from PHY's temperature status register\n");
4677 		return (EAGAIN);
4678 	}
4679 
4680 	/* Get occurrence bit */
4681 	reg = !!(reg & 0x4000);
4682 
4683 	return (sysctl_handle_16(oidp, 0, reg, req));
4684 } /* ixgbe_sysctl_phy_overtemp_occurred */
4685 
4686 /************************************************************************
4687  * ixgbe_sysctl_eee_state
4688  *
4689  *   Sysctl to set EEE power saving feature
4690  *   Values:
4691  *     0      - disable EEE
4692  *     1      - enable EEE
4693  *     (none) - get current device EEE state
4694  ************************************************************************/
4695 static int
4696 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4697 {
4698 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4699 	device_t       dev = sc->dev;
4700 	if_t           ifp = iflib_get_ifp(sc->ctx);
4701 	int            curr_eee, new_eee, error = 0;
4702 	s32            retval;
4703 
4704 	if (atomic_load_acq_int(&sc->recovery_mode))
4705 		return (EPERM);
4706 
4707 	curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4708 
4709 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4710 	if ((error) || (req->newptr == NULL))
4711 		return (error);
4712 
4713 	/* Nothing to do */
4714 	if (new_eee == curr_eee)
4715 		return (0);
4716 
4717 	/* Not supported */
4718 	if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4719 		return (EINVAL);
4720 
4721 	/* Bounds checking */
4722 	if ((new_eee < 0) || (new_eee > 1))
4723 		return (EINVAL);
4724 
4725 	retval = ixgbe_setup_eee(&sc->hw, new_eee);
4726 	if (retval) {
4727 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4728 		return (EINVAL);
4729 	}
4730 
4731 	/* Restart auto-neg */
4732 	if_init(ifp, ifp);
4733 
4734 	device_printf(dev, "New EEE state: %d\n", new_eee);
4735 
4736 	/* Cache new value */
4737 	if (new_eee)
4738 		sc->feat_en |= IXGBE_FEATURE_EEE;
4739 	else
4740 		sc->feat_en &= ~IXGBE_FEATURE_EEE;
4741 
4742 	return (error);
4743 } /* ixgbe_sysctl_eee_state */
4744 
4745 /************************************************************************
4746  * ixgbe_init_device_features
4747  ************************************************************************/
4748 static void
4749 ixgbe_init_device_features(struct ixgbe_softc *sc)
4750 {
4751 	sc->feat_cap = IXGBE_FEATURE_NETMAP
4752 	                  | IXGBE_FEATURE_RSS
4753 	                  | IXGBE_FEATURE_MSI
4754 	                  | IXGBE_FEATURE_MSIX
4755 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4756 
4757 	/* Set capabilities first... */
4758 	switch (sc->hw.mac.type) {
4759 	case ixgbe_mac_82598EB:
4760 		if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4761 			sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4762 		break;
4763 	case ixgbe_mac_X540:
4764 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4765 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4766 		if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4767 		    (sc->hw.bus.func == 0))
4768 			sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4769 		break;
4770 	case ixgbe_mac_X550:
4771 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4772 		sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4773 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4774 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4775 		break;
4776 	case ixgbe_mac_X550EM_x:
4777 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4778 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4779 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4780 		if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4781 			sc->feat_cap |= IXGBE_FEATURE_EEE;
4782 		break;
4783 	case ixgbe_mac_X550EM_a:
4784 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4785 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4786 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4787 		sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4788 		if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4789 		    (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4790 			sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4791 			sc->feat_cap |= IXGBE_FEATURE_EEE;
4792 		}
4793 		break;
4794 	case ixgbe_mac_82599EB:
4795 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4796 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4797 		if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4798 		    (sc->hw.bus.func == 0))
4799 			sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4800 		if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4801 			sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4802 		break;
4803 	default:
4804 		break;
4805 	}
4806 
4807 	/* Enabled by default... */
4808 	/* Fan failure detection */
4809 	if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4810 		sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4811 	/* Netmap */
4812 	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4813 		sc->feat_en |= IXGBE_FEATURE_NETMAP;
4814 	/* EEE */
4815 	if (sc->feat_cap & IXGBE_FEATURE_EEE)
4816 		sc->feat_en |= IXGBE_FEATURE_EEE;
4817 	/* Thermal Sensor */
4818 	if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4819 		sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4820 	/* Recovery mode */
4821 	if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
4822 		sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
4823 
4824 	/* Enabled via global sysctl... */
4825 	/* Flow Director */
4826 	if (ixgbe_enable_fdir) {
4827 		if (sc->feat_cap & IXGBE_FEATURE_FDIR)
4828 			sc->feat_en |= IXGBE_FEATURE_FDIR;
4829 		else
4830 			device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
4831 	}
4832 	/*
4833 	 * Message Signal Interrupts - Extended (MSI-X)
4834 	 * Normal MSI is only enabled if MSI-X calls fail.
4835 	 */
4836 	if (!ixgbe_enable_msix)
4837 		sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
4838 	/* Receive-Side Scaling (RSS) */
4839 	if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4840 		sc->feat_en |= IXGBE_FEATURE_RSS;
4841 
4842 	/* Disable features with unmet dependencies... */
4843 	/* No MSI-X */
4844 	if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
4845 		sc->feat_cap &= ~IXGBE_FEATURE_RSS;
4846 		sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4847 		sc->feat_en &= ~IXGBE_FEATURE_RSS;
4848 		sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
4849 	}
4850 } /* ixgbe_init_device_features */
4851 
4852 /************************************************************************
4853  * ixgbe_check_fan_failure
4854  ************************************************************************/
4855 static void
4856 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
4857 {
4858 	u32 mask;
4859 
4860 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
4861 	    IXGBE_ESDP_SDP1;
4862 
4863 	if (reg & mask)
4864 		device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4865 } /* ixgbe_check_fan_failure */
4866 
4867 /************************************************************************
4868  * ixgbe_sbuf_fw_version
4869  ************************************************************************/
4870 static void
4871 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4872 {
4873 	struct ixgbe_nvm_version nvm_ver = {0};
4874 	const char *space = "";
4875 
4876 	ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
4877 	ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4878 	ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4879 	ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4880 
4881 	/* FW version */
4882 	if ((nvm_ver.phy_fw_maj == 0x0 &&
4883 	    nvm_ver.phy_fw_min == 0x0 &&
4884 	    nvm_ver.phy_fw_id == 0x0) ||
4885 		(nvm_ver.phy_fw_maj == 0xF &&
4886 	    nvm_ver.phy_fw_min == 0xFF &&
4887 	    nvm_ver.phy_fw_id == 0xF)) {
4888 		/* If major, minor and id numbers are set to 0,
4889 		 * reading FW version is unsupported. If major number
4890 		 * is set to 0xF, minor is set to 0xFF and id is set
4891 		 * to 0xF, this means that number read is invalid. */
4892 	} else
4893 		sbuf_printf(buf, "fw %d.%d.%d ",
4894 		    nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min, nvm_ver.phy_fw_id);
4895 
4896 	/* NVM version */
4897 	if ((nvm_ver.nvm_major == 0x0 &&
4898 	    nvm_ver.nvm_minor == 0x0 &&
4899 	    nvm_ver.nvm_id == 0x0) ||
4900 		(nvm_ver.nvm_major == 0xF &&
4901 	    nvm_ver.nvm_minor == 0xFF &&
4902 	    nvm_ver.nvm_id == 0xF)) {
4903 		/* If major, minor and id numbers are set to 0,
4904 		 * reading NVM version is unsupported. If major number
4905 		 * is set to 0xF, minor is set to 0xFF and id is set
4906 		 * to 0xF, this means that number read is invalid. */
4907 	} else
4908 		sbuf_printf(buf, "nvm %x.%02x.%x ",
4909 		    nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
4910 
4911 	if (nvm_ver.oem_valid) {
4912 		sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4913 		    nvm_ver.oem_minor, nvm_ver.oem_release);
4914 		space = " ";
4915 	}
4916 
4917 	if (nvm_ver.or_valid) {
4918 		sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4919 		    space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4920 		space = " ";
4921 	}
4922 
4923 	if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4924 	    NVM_VER_INVALID | 0xFFFFFFFF)) {
4925 		sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4926 	}
4927 } /* ixgbe_sbuf_fw_version */
4928 
4929 /************************************************************************
4930  * ixgbe_print_fw_version
4931  ************************************************************************/
4932 static void
4933 ixgbe_print_fw_version(if_ctx_t ctx)
4934 {
4935 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4936 	struct ixgbe_hw *hw = &sc->hw;
4937 	device_t dev = sc->dev;
4938 	struct sbuf *buf;
4939 	int error = 0;
4940 
4941 	buf = sbuf_new_auto();
4942 	if (!buf) {
4943 		device_printf(dev, "Could not allocate sbuf for output.\n");
4944 		return;
4945 	}
4946 
4947 	ixgbe_sbuf_fw_version(hw, buf);
4948 
4949 	error = sbuf_finish(buf);
4950 	if (error)
4951 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4952 	else if (sbuf_len(buf))
4953 		device_printf(dev, "%s\n", sbuf_data(buf));
4954 
4955 	sbuf_delete(buf);
4956 } /* ixgbe_print_fw_version */
4957 
4958 /************************************************************************
4959  * ixgbe_sysctl_print_fw_version
4960  ************************************************************************/
4961 static int
4962 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4963 {
4964 	struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4965 	struct ixgbe_hw *hw = &sc->hw;
4966 	device_t dev = sc->dev;
4967 	struct sbuf *buf;
4968 	int error = 0;
4969 
4970 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4971 	if (!buf) {
4972 		device_printf(dev, "Could not allocate sbuf for output.\n");
4973 		return (ENOMEM);
4974 	}
4975 
4976 	ixgbe_sbuf_fw_version(hw, buf);
4977 
4978 	error = sbuf_finish(buf);
4979 	if (error)
4980 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4981 
4982 	sbuf_delete(buf);
4983 
4984 	return (0);
4985 } /* ixgbe_sysctl_print_fw_version */
4986