xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 8df8b2d3e51d1b816201d8a1fe8bc29fe192e562)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 int ixgbe_intr(void *arg);
143 
144 /************************************************************************
145  * Function prototypes
146  ************************************************************************/
147 #if __FreeBSD_version >= 1100036
148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
149 #endif
150 
151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
154 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
155 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
156 
157 static void ixgbe_config_dmac(struct adapter *adapter);
158 static void ixgbe_configure_ivars(struct adapter *adapter);
159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
160                            s8 type);
161 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static bool ixgbe_sfp_probe(if_ctx_t ctx);
163 
164 static void ixgbe_free_pci_resources(if_ctx_t ctx);
165 
166 static int  ixgbe_msix_link(void *arg);
167 static int  ixgbe_msix_que(void *arg);
168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
169 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
171 
172 static int  ixgbe_setup_interface(if_ctx_t ctx);
173 static void ixgbe_init_device_features(struct adapter *adapter);
174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
175 static void ixgbe_add_media_types(if_ctx_t ctx);
176 static void ixgbe_update_stats_counters(struct adapter *adapter);
177 static void ixgbe_config_link(if_ctx_t ctx);
178 static void ixgbe_get_slot_info(struct adapter *);
179 static void ixgbe_check_wol_support(struct adapter *adapter);
180 static void ixgbe_enable_rx_drop(struct adapter *);
181 static void ixgbe_disable_rx_drop(struct adapter *);
182 
183 static void ixgbe_add_hw_stats(struct adapter *adapter);
184 static int  ixgbe_set_flowcntl(struct adapter *, int);
185 static int  ixgbe_set_advertise(struct adapter *, int);
186 static int  ixgbe_get_advertise(struct adapter *);
187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
188 static void ixgbe_config_gpie(struct adapter *adapter);
189 static void ixgbe_config_delay_values(struct adapter *adapter);
190 
191 /* Sysctl handlers */
192 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
198 #ifdef IXGBE_DEBUG
199 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
200 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
201 #endif
202 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
209 
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_msf(void *);
212 static void ixgbe_handle_mod(void *);
213 static void ixgbe_handle_phy(void *);
214 
215 /************************************************************************
216  *  FreeBSD Device Interface Entry Points
217  ************************************************************************/
218 static device_method_t ix_methods[] = {
219 	/* Device interface */
220 	DEVMETHOD(device_register, ixgbe_register),
221 	DEVMETHOD(device_probe, iflib_device_probe),
222 	DEVMETHOD(device_attach, iflib_device_attach),
223 	DEVMETHOD(device_detach, iflib_device_detach),
224 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
225 	DEVMETHOD(device_suspend, iflib_device_suspend),
226 	DEVMETHOD(device_resume, iflib_device_resume),
227 #ifdef PCI_IOV
228 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
229 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
230 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
231 #endif /* PCI_IOV */
232 	DEVMETHOD_END
233 };
234 
235 static driver_t ix_driver = {
236 	"ix", ix_methods, sizeof(struct adapter),
237 };
238 
239 devclass_t ix_devclass;
240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
242 MODULE_DEPEND(ix, pci, 1, 1, 1);
243 MODULE_DEPEND(ix, ether, 1, 1, 1);
244 MODULE_DEPEND(ix, iflib, 1, 1, 1);
245 
246 static device_method_t ixgbe_if_methods[] = {
247 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
248 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
249 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
250 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
251 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
252 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
253 	DEVMETHOD(ifdi_init, ixgbe_if_init),
254 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
255 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
256 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
257 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
258 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
259 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
260 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
262 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
263 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
264 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
265 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
266 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
267 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
268 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
269 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
270 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
271 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
272 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
273 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
274 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
275 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
276 #ifdef PCI_IOV
277 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
278 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
279 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
280 #endif /* PCI_IOV */
281 	DEVMETHOD_END
282 };
283 
284 /*
285  * TUNEABLE PARAMETERS:
286  */
287 
288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
289 static driver_t ixgbe_if_driver = {
290   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
291 };
292 
293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
295     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
296 
297 /* Flow control setting, default to full */
298 static int ixgbe_flow_control = ixgbe_fc_full;
299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
300     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
301 
302 /* Advertise Speed, default to 0 (auto) */
303 static int ixgbe_advertise_speed = 0;
304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
305     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 
307 /*
308  * Smart speed setting, default to on
309  * this only works as a compile option
310  * right now as its during attach, set
311  * this to 'ixgbe_smart_speed_off' to
312  * disable.
313  */
314 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 
316 /*
317  * MSI-X should be the default for best performance,
318  * but this allows it to be forced off for testing.
319  */
320 static int ixgbe_enable_msix = 1;
321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
322     "Enable MSI-X interrupts");
323 
324 /*
325  * Defining this on will allow the use
326  * of unsupported SFP+ modules, note that
327  * doing so you are on your own :)
328  */
329 static int allow_unsupported_sfp = FALSE;
330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
331     &allow_unsupported_sfp, 0,
332     "Allow unsupported SFP modules...use at your own risk");
333 
334 /*
335  * Not sure if Flow Director is fully baked,
336  * so we'll default to turning it off.
337  */
338 static int ixgbe_enable_fdir = 0;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
340     "Enable Flow Director");
341 
342 /* Receive-Side Scaling */
343 static int ixgbe_enable_rss = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
345     "Enable Receive-Side Scaling (RSS)");
346 
347 #if 0
348 /* Keep running tab on them for sanity check */
349 static int ixgbe_total_ports;
350 #endif
351 
352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
353 
354 /*
355  * For Flow Director: this is the number of TX packets we sample
356  * for the filter pool, this means every 20th packet will be probed.
357  *
358  * This feature can be disabled by setting this to 0.
359  */
360 static int atr_sample_rate = 20;
361 
362 extern struct if_txrx ixgbe_txrx;
363 
364 static struct if_shared_ctx ixgbe_sctx_init = {
365 	.isc_magic = IFLIB_MAGIC,
366 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
367 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 	.isc_tx_maxsegsize = PAGE_SIZE,
369 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
370 	.isc_tso_maxsegsize = PAGE_SIZE,
371 	.isc_rx_maxsize = PAGE_SIZE*4,
372 	.isc_rx_nsegments = 1,
373 	.isc_rx_maxsegsize = PAGE_SIZE*4,
374 	.isc_nfl = 1,
375 	.isc_ntxqs = 1,
376 	.isc_nrxqs = 1,
377 
378 	.isc_admin_intrcnt = 1,
379 	.isc_vendor_info = ixgbe_vendor_info_array,
380 	.isc_driver_version = ixgbe_driver_version,
381 	.isc_driver = &ixgbe_if_driver,
382 	.isc_flags = IFLIB_TSO_INIT_IP,
383 
384 	.isc_nrxd_min = {MIN_RXD},
385 	.isc_ntxd_min = {MIN_TXD},
386 	.isc_nrxd_max = {MAX_RXD},
387 	.isc_ntxd_max = {MAX_TXD},
388 	.isc_nrxd_default = {DEFAULT_RXD},
389 	.isc_ntxd_default = {DEFAULT_TXD},
390 };
391 
392 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
393 
394 /************************************************************************
395  * ixgbe_if_tx_queues_alloc
396  ************************************************************************/
397 static int
398 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
399                          int ntxqs, int ntxqsets)
400 {
401 	struct adapter     *adapter = iflib_get_softc(ctx);
402 	if_softc_ctx_t     scctx = adapter->shared;
403 	struct ix_tx_queue *que;
404 	int                i, j, error;
405 
406 	MPASS(adapter->num_tx_queues > 0);
407 	MPASS(adapter->num_tx_queues == ntxqsets);
408 	MPASS(ntxqs == 1);
409 
410 	/* Allocate queue structure memory */
411 	adapter->tx_queues =
412 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
413 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
414 	if (!adapter->tx_queues) {
415 		device_printf(iflib_get_dev(ctx),
416 		    "Unable to allocate TX ring memory\n");
417 		return (ENOMEM);
418 	}
419 
420 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
421 		struct tx_ring *txr = &que->txr;
422 
423 		/* In case SR-IOV is enabled, align the index properly */
424 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
425 		    i);
426 
427 		txr->adapter = que->adapter = adapter;
428 		adapter->active_queues |= (u64)1 << txr->me;
429 
430 		/* Allocate report status array */
431 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
432 		if (txr->tx_rsq == NULL) {
433 			error = ENOMEM;
434 			goto fail;
435 		}
436 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
437 			txr->tx_rsq[j] = QIDX_INVALID;
438 		/* get the virtual and physical address of the hardware queues */
439 		txr->tail = IXGBE_TDT(txr->me);
440 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
441 		txr->tx_paddr = paddrs[i];
442 
443 		txr->bytes = 0;
444 		txr->total_packets = 0;
445 
446 		/* Set the rate at which we sample packets */
447 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
448 			txr->atr_sample = atr_sample_rate;
449 
450 	}
451 
452 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
453 	    adapter->num_tx_queues);
454 
455 	return (0);
456 
457 fail:
458 	ixgbe_if_queues_free(ctx);
459 
460 	return (error);
461 } /* ixgbe_if_tx_queues_alloc */
462 
463 /************************************************************************
464  * ixgbe_if_rx_queues_alloc
465  ************************************************************************/
466 static int
467 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
468                          int nrxqs, int nrxqsets)
469 {
470 	struct adapter     *adapter = iflib_get_softc(ctx);
471 	struct ix_rx_queue *que;
472 	int                i;
473 
474 	MPASS(adapter->num_rx_queues > 0);
475 	MPASS(adapter->num_rx_queues == nrxqsets);
476 	MPASS(nrxqs == 1);
477 
478 	/* Allocate queue structure memory */
479 	adapter->rx_queues =
480 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
481 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
482 	if (!adapter->rx_queues) {
483 		device_printf(iflib_get_dev(ctx),
484 		    "Unable to allocate TX ring memory\n");
485 		return (ENOMEM);
486 	}
487 
488 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
489 		struct rx_ring *rxr = &que->rxr;
490 
491 		/* In case SR-IOV is enabled, align the index properly */
492 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
493 		    i);
494 
495 		rxr->adapter = que->adapter = adapter;
496 
497 		/* get the virtual and physical address of the hw queues */
498 		rxr->tail = IXGBE_RDT(rxr->me);
499 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
500 		rxr->rx_paddr = paddrs[i];
501 		rxr->bytes = 0;
502 		rxr->que = que;
503 	}
504 
505 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
506 	    adapter->num_rx_queues);
507 
508 	return (0);
509 } /* ixgbe_if_rx_queues_alloc */
510 
511 /************************************************************************
512  * ixgbe_if_queues_free
513  ************************************************************************/
514 static void
515 ixgbe_if_queues_free(if_ctx_t ctx)
516 {
517 	struct adapter     *adapter = iflib_get_softc(ctx);
518 	struct ix_tx_queue *tx_que = adapter->tx_queues;
519 	struct ix_rx_queue *rx_que = adapter->rx_queues;
520 	int                i;
521 
522 	if (tx_que != NULL) {
523 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
524 			struct tx_ring *txr = &tx_que->txr;
525 			if (txr->tx_rsq == NULL)
526 				break;
527 
528 			free(txr->tx_rsq, M_IXGBE);
529 			txr->tx_rsq = NULL;
530 		}
531 
532 		free(adapter->tx_queues, M_IXGBE);
533 		adapter->tx_queues = NULL;
534 	}
535 	if (rx_que != NULL) {
536 		free(adapter->rx_queues, M_IXGBE);
537 		adapter->rx_queues = NULL;
538 	}
539 } /* ixgbe_if_queues_free */
540 
541 /************************************************************************
542  * ixgbe_initialize_rss_mapping
543  ************************************************************************/
544 static void
545 ixgbe_initialize_rss_mapping(struct adapter *adapter)
546 {
547 	struct ixgbe_hw *hw = &adapter->hw;
548 	u32             reta = 0, mrqc, rss_key[10];
549 	int             queue_id, table_size, index_mult;
550 	int             i, j;
551 	u32             rss_hash_config;
552 
553 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
554 		/* Fetch the configured RSS key */
555 		rss_getkey((uint8_t *)&rss_key);
556 	} else {
557 		/* set up random bits */
558 		arc4rand(&rss_key, sizeof(rss_key), 0);
559 	}
560 
561 	/* Set multiplier for RETA setup and table size based on MAC */
562 	index_mult = 0x1;
563 	table_size = 128;
564 	switch (adapter->hw.mac.type) {
565 	case ixgbe_mac_82598EB:
566 		index_mult = 0x11;
567 		break;
568 	case ixgbe_mac_X550:
569 	case ixgbe_mac_X550EM_x:
570 	case ixgbe_mac_X550EM_a:
571 		table_size = 512;
572 		break;
573 	default:
574 		break;
575 	}
576 
577 	/* Set up the redirection table */
578 	for (i = 0, j = 0; i < table_size; i++, j++) {
579 		if (j == adapter->num_rx_queues)
580 			j = 0;
581 
582 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
583 			/*
584 			 * Fetch the RSS bucket id for the given indirection
585 			 * entry. Cap it at the number of configured buckets
586 			 * (which is num_rx_queues.)
587 			 */
588 			queue_id = rss_get_indirection_to_bucket(i);
589 			queue_id = queue_id % adapter->num_rx_queues;
590 		} else
591 			queue_id = (j * index_mult);
592 
593 		/*
594 		 * The low 8 bits are for hash value (n+0);
595 		 * The next 8 bits are for hash value (n+1), etc.
596 		 */
597 		reta = reta >> 8;
598 		reta = reta | (((uint32_t)queue_id) << 24);
599 		if ((i & 3) == 3) {
600 			if (i < 128)
601 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
602 			else
603 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
604 				    reta);
605 			reta = 0;
606 		}
607 	}
608 
609 	/* Now fill our hash function seeds */
610 	for (i = 0; i < 10; i++)
611 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
612 
613 	/* Perform hash on these packet types */
614 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
615 		rss_hash_config = rss_gethashconfig();
616 	else {
617 		/*
618 		 * Disable UDP - IP fragments aren't currently being handled
619 		 * and so we end up with a mix of 2-tuple and 4-tuple
620 		 * traffic.
621 		 */
622 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
623 		                | RSS_HASHTYPE_RSS_TCP_IPV4
624 		                | RSS_HASHTYPE_RSS_IPV6
625 		                | RSS_HASHTYPE_RSS_TCP_IPV6
626 		                | RSS_HASHTYPE_RSS_IPV6_EX
627 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
628 	}
629 
630 	mrqc = IXGBE_MRQC_RSSEN;
631 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
632 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
633 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
634 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
635 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
636 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
637 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
638 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
639 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
640 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
649 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
650 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
651 } /* ixgbe_initialize_rss_mapping */
652 
653 /************************************************************************
654  * ixgbe_initialize_receive_units - Setup receive registers and features.
655  ************************************************************************/
656 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
657 
658 static void
659 ixgbe_initialize_receive_units(if_ctx_t ctx)
660 {
661 	struct adapter     *adapter = iflib_get_softc(ctx);
662 	if_softc_ctx_t     scctx = adapter->shared;
663 	struct ixgbe_hw    *hw = &adapter->hw;
664 	struct ifnet       *ifp = iflib_get_ifp(ctx);
665 	struct ix_rx_queue *que;
666 	int                i, j;
667 	u32                bufsz, fctrl, srrctl, rxcsum;
668 	u32                hlreg;
669 
670 	/*
671 	 * Make sure receives are disabled while
672 	 * setting up the descriptor ring
673 	 */
674 	ixgbe_disable_rx(hw);
675 
676 	/* Enable broadcasts */
677 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
678 	fctrl |= IXGBE_FCTRL_BAM;
679 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
680 		fctrl |= IXGBE_FCTRL_DPF;
681 		fctrl |= IXGBE_FCTRL_PMCF;
682 	}
683 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
684 
685 	/* Set for Jumbo Frames? */
686 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
687 	if (ifp->if_mtu > ETHERMTU)
688 		hlreg |= IXGBE_HLREG0_JUMBOEN;
689 	else
690 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
691 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
692 
693 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
694 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
695 
696 	/* Setup the Base and Length of the Rx Descriptor Ring */
697 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
698 		struct rx_ring *rxr = &que->rxr;
699 		u64            rdba = rxr->rx_paddr;
700 
701 		j = rxr->me;
702 
703 		/* Setup the Base and Length of the Rx Descriptor Ring */
704 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
705 		    (rdba & 0x00000000ffffffffULL));
706 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
707 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
708 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
709 
710 		/* Set up the SRRCTL register */
711 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
712 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
713 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
714 		srrctl |= bufsz;
715 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
716 
717 		/*
718 		 * Set DROP_EN iff we have no flow control and >1 queue.
719 		 * Note that srrctl was cleared shortly before during reset,
720 		 * so we do not need to clear the bit, but do it just in case
721 		 * this code is moved elsewhere.
722 		 */
723 		if (adapter->num_rx_queues > 1 &&
724 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
725 			srrctl |= IXGBE_SRRCTL_DROP_EN;
726 		} else {
727 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
728 		}
729 
730 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
731 
732 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
733 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
734 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
735 
736 		/* Set the driver rx tail address */
737 		rxr->tail =  IXGBE_RDT(rxr->me);
738 	}
739 
740 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
741 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
742 		            | IXGBE_PSRTYPE_UDPHDR
743 		            | IXGBE_PSRTYPE_IPV4HDR
744 		            | IXGBE_PSRTYPE_IPV6HDR;
745 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
746 	}
747 
748 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
749 
750 	ixgbe_initialize_rss_mapping(adapter);
751 
752 	if (adapter->num_rx_queues > 1) {
753 		/* RSS and RX IPP Checksum are mutually exclusive */
754 		rxcsum |= IXGBE_RXCSUM_PCSD;
755 	}
756 
757 	if (ifp->if_capenable & IFCAP_RXCSUM)
758 		rxcsum |= IXGBE_RXCSUM_PCSD;
759 
760 	/* This is useful for calculating UDP/IP fragment checksums */
761 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
762 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
763 
764 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
765 
766 } /* ixgbe_initialize_receive_units */
767 
768 /************************************************************************
769  * ixgbe_initialize_transmit_units - Enable transmit units.
770  ************************************************************************/
771 static void
772 ixgbe_initialize_transmit_units(if_ctx_t ctx)
773 {
774 	struct adapter     *adapter = iflib_get_softc(ctx);
775 	struct ixgbe_hw    *hw = &adapter->hw;
776 	if_softc_ctx_t     scctx = adapter->shared;
777 	struct ix_tx_queue *que;
778 	int i;
779 
780 	/* Setup the Base and Length of the Tx Descriptor Ring */
781 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
782 	    i++, que++) {
783 		struct tx_ring	   *txr = &que->txr;
784 		u64 tdba = txr->tx_paddr;
785 		u32 txctrl = 0;
786 		int j = txr->me;
787 
788 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
789 		    (tdba & 0x00000000ffffffffULL));
790 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
791 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
792 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
793 
794 		/* Setup the HW Tx Head and Tail descriptor pointers */
795 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
796 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
797 
798 		/* Cache the tail address */
799 		txr->tx_rs_cidx = txr->tx_rs_pidx;
800 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
801 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
802 			txr->tx_rsq[k] = QIDX_INVALID;
803 
804 		/* Disable Head Writeback */
805 		/*
806 		 * Note: for X550 series devices, these registers are actually
807 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
808 		 * fields remain the same.
809 		 */
810 		switch (hw->mac.type) {
811 		case ixgbe_mac_82598EB:
812 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
813 			break;
814 		default:
815 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
816 			break;
817 		}
818 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
819 		switch (hw->mac.type) {
820 		case ixgbe_mac_82598EB:
821 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
822 			break;
823 		default:
824 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
825 			break;
826 		}
827 
828 	}
829 
830 	if (hw->mac.type != ixgbe_mac_82598EB) {
831 		u32 dmatxctl, rttdcs;
832 
833 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
834 		dmatxctl |= IXGBE_DMATXCTL_TE;
835 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
836 		/* Disable arbiter to set MTQC */
837 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
838 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
839 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
840 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
841 		    ixgbe_get_mtqc(adapter->iov_mode));
842 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
843 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
844 	}
845 
846 } /* ixgbe_initialize_transmit_units */
847 
848 /************************************************************************
849  * ixgbe_register
850  ************************************************************************/
851 static void *
852 ixgbe_register(device_t dev)
853 {
854 	return (ixgbe_sctx);
855 } /* ixgbe_register */
856 
857 /************************************************************************
858  * ixgbe_if_attach_pre - Device initialization routine, part 1
859  *
860  *   Called when the driver is being loaded.
861  *   Identifies the type of hardware, initializes the hardware,
862  *   and initializes iflib structures.
863  *
864  *   return 0 on success, positive on failure
865  ************************************************************************/
866 static int
867 ixgbe_if_attach_pre(if_ctx_t ctx)
868 {
869 	struct adapter  *adapter;
870 	device_t        dev;
871 	if_softc_ctx_t  scctx;
872 	struct ixgbe_hw *hw;
873 	int             error = 0;
874 	u32             ctrl_ext;
875 
876 	INIT_DEBUGOUT("ixgbe_attach: begin");
877 
878 	/* Allocate, clear, and link in our adapter structure */
879 	dev = iflib_get_dev(ctx);
880 	adapter = iflib_get_softc(ctx);
881 	adapter->hw.back = adapter;
882 	adapter->ctx = ctx;
883 	adapter->dev = dev;
884 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
885 	adapter->media = iflib_get_media(ctx);
886 	hw = &adapter->hw;
887 
888 	/* Determine hardware revision */
889 	hw->vendor_id = pci_get_vendor(dev);
890 	hw->device_id = pci_get_device(dev);
891 	hw->revision_id = pci_get_revid(dev);
892 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
893 	hw->subsystem_device_id = pci_get_subdevice(dev);
894 
895 	/* Do base PCI setup - map BAR0 */
896 	if (ixgbe_allocate_pci_resources(ctx)) {
897 		device_printf(dev, "Allocation of PCI resources failed\n");
898 		return (ENXIO);
899 	}
900 
901 	/* let hardware know driver is loaded */
902 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
903 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
904 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
905 
906 	/*
907 	 * Initialize the shared code
908 	 */
909 	if (ixgbe_init_shared_code(hw) != 0) {
910 		device_printf(dev, "Unable to initialize the shared code\n");
911 		error = ENXIO;
912 		goto err_pci;
913 	}
914 
915 	if (hw->mbx.ops.init_params)
916 		hw->mbx.ops.init_params(hw);
917 
918 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
919 
920 	if (hw->mac.type != ixgbe_mac_82598EB)
921 		hw->phy.smart_speed = ixgbe_smart_speed;
922 
923 	ixgbe_init_device_features(adapter);
924 
925 	/* Enable WoL (if supported) */
926 	ixgbe_check_wol_support(adapter);
927 
928 	/* Verify adapter fan is still functional (if applicable) */
929 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
930 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
931 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
932 	}
933 
934 	/* Ensure SW/FW semaphore is free */
935 	ixgbe_init_swfw_semaphore(hw);
936 
937 	/* Set an initial default flow control value */
938 	hw->fc.requested_mode = ixgbe_flow_control;
939 
940 	hw->phy.reset_if_overtemp = TRUE;
941 	error = ixgbe_reset_hw(hw);
942 	hw->phy.reset_if_overtemp = FALSE;
943 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
944 		/*
945 		 * No optics in this port, set up
946 		 * so the timer routine will probe
947 		 * for later insertion.
948 		 */
949 		adapter->sfp_probe = TRUE;
950 		error = 0;
951 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
952 		device_printf(dev, "Unsupported SFP+ module detected!\n");
953 		error = EIO;
954 		goto err_pci;
955 	} else if (error) {
956 		device_printf(dev, "Hardware initialization failed\n");
957 		error = EIO;
958 		goto err_pci;
959 	}
960 
961 	/* Make sure we have a good EEPROM before we read from it */
962 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
963 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
964 		error = EIO;
965 		goto err_pci;
966 	}
967 
968 	error = ixgbe_start_hw(hw);
969 	switch (error) {
970 	case IXGBE_ERR_EEPROM_VERSION:
971 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
972 		break;
973 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
974 		device_printf(dev, "Unsupported SFP+ Module\n");
975 		error = EIO;
976 		goto err_pci;
977 	case IXGBE_ERR_SFP_NOT_PRESENT:
978 		device_printf(dev, "No SFP+ Module found\n");
979 		/* falls thru */
980 	default:
981 		break;
982 	}
983 
984 	/* Most of the iflib initialization... */
985 
986 	iflib_set_mac(ctx, hw->mac.addr);
987 	switch (adapter->hw.mac.type) {
988 	case ixgbe_mac_X550:
989 	case ixgbe_mac_X550EM_x:
990 	case ixgbe_mac_X550EM_a:
991 		scctx->isc_rss_table_size = 512;
992 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
993 		break;
994 	default:
995 		scctx->isc_rss_table_size = 128;
996 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
997 	}
998 
999 	/* Allow legacy interrupts */
1000 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1001 
1002 	scctx->isc_txqsizes[0] =
1003 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1004 	    sizeof(u32), DBA_ALIGN),
1005 	scctx->isc_rxqsizes[0] =
1006 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1007 	    DBA_ALIGN);
1008 
1009 	/* XXX */
1010 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1011 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1012 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1013 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1014 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1015 	} else {
1016 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1017 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1018 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1019 	}
1020 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1021 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1022 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1023 
1024 	scctx->isc_txrx = &ixgbe_txrx;
1025 
1026 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1027 
1028 	return (0);
1029 
1030 err_pci:
1031 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1032 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1033 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1034 	ixgbe_free_pci_resources(ctx);
1035 
1036 	return (error);
1037 } /* ixgbe_if_attach_pre */
1038 
1039  /*********************************************************************
1040  * ixgbe_if_attach_post - Device initialization routine, part 2
1041  *
1042  *   Called during driver load, but after interrupts and
1043  *   resources have been allocated and configured.
1044  *   Sets up some data structures not relevant to iflib.
1045  *
1046  *   return 0 on success, positive on failure
1047  *********************************************************************/
1048 static int
1049 ixgbe_if_attach_post(if_ctx_t ctx)
1050 {
1051 	device_t dev;
1052 	struct adapter  *adapter;
1053 	struct ixgbe_hw *hw;
1054 	int             error = 0;
1055 
1056 	dev = iflib_get_dev(ctx);
1057 	adapter = iflib_get_softc(ctx);
1058 	hw = &adapter->hw;
1059 
1060 
1061 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1062 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1063 		device_printf(dev, "Device does not support legacy interrupts");
1064 		error = ENXIO;
1065 		goto err;
1066 	}
1067 
1068 	/* Allocate multicast array memory. */
1069 	adapter->mta = malloc(sizeof(*adapter->mta) *
1070 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1071 	if (adapter->mta == NULL) {
1072 		device_printf(dev, "Can not allocate multicast setup array\n");
1073 		error = ENOMEM;
1074 		goto err;
1075 	}
1076 
1077 	/* hw.ix defaults init */
1078 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1079 
1080 	/* Enable the optics for 82599 SFP+ fiber */
1081 	ixgbe_enable_tx_laser(hw);
1082 
1083 	/* Enable power to the phy. */
1084 	ixgbe_set_phy_power(hw, TRUE);
1085 
1086 	ixgbe_initialize_iov(adapter);
1087 
1088 	error = ixgbe_setup_interface(ctx);
1089 	if (error) {
1090 		device_printf(dev, "Interface setup failed: %d\n", error);
1091 		goto err;
1092 	}
1093 
1094 	ixgbe_if_update_admin_status(ctx);
1095 
1096 	/* Initialize statistics */
1097 	ixgbe_update_stats_counters(adapter);
1098 	ixgbe_add_hw_stats(adapter);
1099 
1100 	/* Check PCIE slot type/speed/width */
1101 	ixgbe_get_slot_info(adapter);
1102 
1103 	/*
1104 	 * Do time init and sysctl init here, but
1105 	 * only on the first port of a bypass adapter.
1106 	 */
1107 	ixgbe_bypass_init(adapter);
1108 
1109 	/* Set an initial dmac value */
1110 	adapter->dmac = 0;
1111 	/* Set initial advertised speeds (if applicable) */
1112 	adapter->advertise = ixgbe_get_advertise(adapter);
1113 
1114 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1115 		ixgbe_define_iov_schemas(dev, &error);
1116 
1117 	/* Add sysctls */
1118 	ixgbe_add_device_sysctls(ctx);
1119 
1120 	return (0);
1121 err:
1122 	return (error);
1123 } /* ixgbe_if_attach_post */
1124 
1125 /************************************************************************
1126  * ixgbe_check_wol_support
1127  *
1128  *   Checks whether the adapter's ports are capable of
1129  *   Wake On LAN by reading the adapter's NVM.
1130  *
1131  *   Sets each port's hw->wol_enabled value depending
1132  *   on the value read here.
1133  ************************************************************************/
1134 static void
1135 ixgbe_check_wol_support(struct adapter *adapter)
1136 {
1137 	struct ixgbe_hw *hw = &adapter->hw;
1138 	u16             dev_caps = 0;
1139 
1140 	/* Find out WoL support for port */
1141 	adapter->wol_support = hw->wol_enabled = 0;
1142 	ixgbe_get_device_caps(hw, &dev_caps);
1143 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1144 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1145 	     hw->bus.func == 0))
1146 		adapter->wol_support = hw->wol_enabled = 1;
1147 
1148 	/* Save initial wake up filter configuration */
1149 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1150 
1151 	return;
1152 } /* ixgbe_check_wol_support */
1153 
1154 /************************************************************************
1155  * ixgbe_setup_interface
1156  *
1157  *   Setup networking device structure and register an interface.
1158  ************************************************************************/
1159 static int
1160 ixgbe_setup_interface(if_ctx_t ctx)
1161 {
1162 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1163 	struct adapter *adapter = iflib_get_softc(ctx);
1164 
1165 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1166 
1167 	if_setbaudrate(ifp, IF_Gbps(10));
1168 
1169 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1170 
1171 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1172 
1173 	ixgbe_add_media_types(ctx);
1174 
1175 	/* Autoselect media by default */
1176 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1177 
1178 	return (0);
1179 } /* ixgbe_setup_interface */
1180 
1181 /************************************************************************
1182  * ixgbe_if_get_counter
1183  ************************************************************************/
1184 static uint64_t
1185 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1186 {
1187 	struct adapter *adapter = iflib_get_softc(ctx);
1188 	if_t           ifp = iflib_get_ifp(ctx);
1189 
1190 	switch (cnt) {
1191 	case IFCOUNTER_IPACKETS:
1192 		return (adapter->ipackets);
1193 	case IFCOUNTER_OPACKETS:
1194 		return (adapter->opackets);
1195 	case IFCOUNTER_IBYTES:
1196 		return (adapter->ibytes);
1197 	case IFCOUNTER_OBYTES:
1198 		return (adapter->obytes);
1199 	case IFCOUNTER_IMCASTS:
1200 		return (adapter->imcasts);
1201 	case IFCOUNTER_OMCASTS:
1202 		return (adapter->omcasts);
1203 	case IFCOUNTER_COLLISIONS:
1204 		return (0);
1205 	case IFCOUNTER_IQDROPS:
1206 		return (adapter->iqdrops);
1207 	case IFCOUNTER_OQDROPS:
1208 		return (0);
1209 	case IFCOUNTER_IERRORS:
1210 		return (adapter->ierrors);
1211 	default:
1212 		return (if_get_counter_default(ifp, cnt));
1213 	}
1214 } /* ixgbe_if_get_counter */
1215 
1216 /************************************************************************
1217  * ixgbe_if_i2c_req
1218  ************************************************************************/
1219 static int
1220 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1221 {
1222 	struct adapter		*adapter = iflib_get_softc(ctx);
1223 	struct ixgbe_hw 	*hw = &adapter->hw;
1224 	int 			i;
1225 
1226 
1227 	if (hw->phy.ops.read_i2c_byte == NULL)
1228 		return (ENXIO);
1229 	for (i = 0; i < req->len; i++)
1230 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1231 		    req->dev_addr, &req->data[i]);
1232 	return (0);
1233 } /* ixgbe_if_i2c_req */
1234 
1235 /************************************************************************
1236  * ixgbe_add_media_types
1237  ************************************************************************/
1238 static void
1239 ixgbe_add_media_types(if_ctx_t ctx)
1240 {
1241 	struct adapter  *adapter = iflib_get_softc(ctx);
1242 	struct ixgbe_hw *hw = &adapter->hw;
1243 	device_t        dev = iflib_get_dev(ctx);
1244 	u64             layer;
1245 
1246 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1247 
1248 	/* Media types with matching FreeBSD media defines */
1249 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1250 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1251 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1252 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1253 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1254 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1255 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1256 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1257 
1258 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1259 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1260 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1261 		    NULL);
1262 
1263 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1264 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1265 		if (hw->phy.multispeed_fiber)
1266 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1267 			    NULL);
1268 	}
1269 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1270 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1271 		if (hw->phy.multispeed_fiber)
1272 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1273 			    NULL);
1274 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1275 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1276 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1277 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1278 
1279 #ifdef IFM_ETH_XTYPE
1280 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1281 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1282 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1283 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1284 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1285 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1286 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1287 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1288 #else
1289 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1290 		device_printf(dev, "Media supported: 10GbaseKR\n");
1291 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1292 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1293 	}
1294 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1295 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1296 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1297 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1298 	}
1299 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1300 		device_printf(dev, "Media supported: 1000baseKX\n");
1301 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1302 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1303 	}
1304 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1305 		device_printf(dev, "Media supported: 2500baseKX\n");
1306 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1307 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1308 	}
1309 #endif
1310 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1311 		device_printf(dev, "Media supported: 1000baseBX\n");
1312 
1313 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1314 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1315 		    0, NULL);
1316 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1317 	}
1318 
1319 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1320 } /* ixgbe_add_media_types */
1321 
1322 /************************************************************************
1323  * ixgbe_is_sfp
1324  ************************************************************************/
1325 static inline bool
1326 ixgbe_is_sfp(struct ixgbe_hw *hw)
1327 {
1328 	switch (hw->mac.type) {
1329 	case ixgbe_mac_82598EB:
1330 		if (hw->phy.type == ixgbe_phy_nl)
1331 			return (TRUE);
1332 		return (FALSE);
1333 	case ixgbe_mac_82599EB:
1334 		switch (hw->mac.ops.get_media_type(hw)) {
1335 		case ixgbe_media_type_fiber:
1336 		case ixgbe_media_type_fiber_qsfp:
1337 			return (TRUE);
1338 		default:
1339 			return (FALSE);
1340 		}
1341 	case ixgbe_mac_X550EM_x:
1342 	case ixgbe_mac_X550EM_a:
1343 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1344 			return (TRUE);
1345 		return (FALSE);
1346 	default:
1347 		return (FALSE);
1348 	}
1349 } /* ixgbe_is_sfp */
1350 
1351 /************************************************************************
1352  * ixgbe_config_link
1353  ************************************************************************/
1354 static void
1355 ixgbe_config_link(if_ctx_t ctx)
1356 {
1357 	struct adapter  *adapter = iflib_get_softc(ctx);
1358 	struct ixgbe_hw *hw = &adapter->hw;
1359 	u32             autoneg, err = 0;
1360 	bool            sfp, negotiate;
1361 
1362 	sfp = ixgbe_is_sfp(hw);
1363 
1364 	if (sfp) {
1365 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1366 		iflib_admin_intr_deferred(ctx);
1367 	} else {
1368 		if (hw->mac.ops.check_link)
1369 			err = ixgbe_check_link(hw, &adapter->link_speed,
1370 			    &adapter->link_up, FALSE);
1371 		if (err)
1372 			return;
1373 		autoneg = hw->phy.autoneg_advertised;
1374 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1375 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1376 			    &negotiate);
1377 		if (err)
1378 			return;
1379 		if (hw->mac.ops.setup_link)
1380 			err = hw->mac.ops.setup_link(hw, autoneg,
1381 			    adapter->link_up);
1382 	}
1383 } /* ixgbe_config_link */
1384 
1385 /************************************************************************
1386  * ixgbe_update_stats_counters - Update board statistics counters.
1387  ************************************************************************/
1388 static void
1389 ixgbe_update_stats_counters(struct adapter *adapter)
1390 {
1391 	struct ixgbe_hw       *hw = &adapter->hw;
1392 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1393 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1394 	u64                   total_missed_rx = 0;
1395 
1396 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1397 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1398 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1399 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1400 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1401 
1402 	for (int i = 0; i < 16; i++) {
1403 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1404 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1405 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1406 	}
1407 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1408 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1409 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1410 
1411 	/* Hardware workaround, gprc counts missed packets */
1412 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1413 	stats->gprc -= missed_rx;
1414 
1415 	if (hw->mac.type != ixgbe_mac_82598EB) {
1416 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1417 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1418 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1419 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1420 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1421 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1422 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1423 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1424 	} else {
1425 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1426 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1427 		/* 82598 only has a counter in the high register */
1428 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1429 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1430 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1431 	}
1432 
1433 	/*
1434 	 * Workaround: mprc hardware is incorrectly counting
1435 	 * broadcasts, so for now we subtract those.
1436 	 */
1437 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1438 	stats->bprc += bprc;
1439 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1440 	if (hw->mac.type == ixgbe_mac_82598EB)
1441 		stats->mprc -= bprc;
1442 
1443 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1444 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1445 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1446 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1447 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1448 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1449 
1450 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1451 	stats->lxontxc += lxon;
1452 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1453 	stats->lxofftxc += lxoff;
1454 	total = lxon + lxoff;
1455 
1456 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1457 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1458 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1459 	stats->gptc -= total;
1460 	stats->mptc -= total;
1461 	stats->ptc64 -= total;
1462 	stats->gotc -= total * ETHER_MIN_LEN;
1463 
1464 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1465 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1466 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1467 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1468 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1469 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1470 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1471 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1472 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1473 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1474 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1475 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1476 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1477 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1478 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1479 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1480 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1481 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1482 	/* Only read FCOE on 82599 */
1483 	if (hw->mac.type != ixgbe_mac_82598EB) {
1484 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1485 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1486 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1487 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1488 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1489 	}
1490 
1491 	/* Fill out the OS statistics structure */
1492 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1493 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1494 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1495 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1496 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1497 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1498 	IXGBE_SET_COLLISIONS(adapter, 0);
1499 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1500 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1501 } /* ixgbe_update_stats_counters */
1502 
1503 /************************************************************************
1504  * ixgbe_add_hw_stats
1505  *
1506  *   Add sysctl variables, one per statistic, to the system.
1507  ************************************************************************/
1508 static void
1509 ixgbe_add_hw_stats(struct adapter *adapter)
1510 {
1511 	device_t               dev = iflib_get_dev(adapter->ctx);
1512 	struct ix_rx_queue     *rx_que;
1513 	struct ix_tx_queue     *tx_que;
1514 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1515 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1516 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1517 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1518 	struct sysctl_oid      *stat_node, *queue_node;
1519 	struct sysctl_oid_list *stat_list, *queue_list;
1520 	int                    i;
1521 
1522 #define QUEUE_NAME_LEN 32
1523 	char                   namebuf[QUEUE_NAME_LEN];
1524 
1525 	/* Driver Statistics */
1526 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1527 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1528 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1529 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1530 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1531 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1532 
1533 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1534 		struct tx_ring *txr = &tx_que->txr;
1535 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1536 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1537 		    CTLFLAG_RD, NULL, "Queue Name");
1538 		queue_list = SYSCTL_CHILDREN(queue_node);
1539 
1540 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1541 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1542 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1543 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1544 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1545 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1546 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1547 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1548 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1549 		    CTLFLAG_RD, &txr->total_packets,
1550 		    "Queue Packets Transmitted");
1551 	}
1552 
1553 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1554 		struct rx_ring *rxr = &rx_que->rxr;
1555 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1556 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1557 		    CTLFLAG_RD, NULL, "Queue Name");
1558 		queue_list = SYSCTL_CHILDREN(queue_node);
1559 
1560 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1561 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1562 		    sizeof(&adapter->rx_queues[i]),
1563 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1564 		    "Interrupt Rate");
1565 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1566 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1567 		    "irqs on this queue");
1568 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1569 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1570 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1571 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1572 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1573 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1574 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1575 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1576 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1577 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1578 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1579 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1580 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1581 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1582 	}
1583 
1584 	/* MAC stats get their own sub node */
1585 
1586 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1587 	    CTLFLAG_RD, NULL, "MAC Statistics");
1588 	stat_list = SYSCTL_CHILDREN(stat_node);
1589 
1590 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1591 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1592 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1593 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1594 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1595 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1596 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1597 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1598 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1599 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1600 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1601 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1602 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1603 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1604 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1605 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1606 
1607 	/* Flow Control stats */
1608 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1609 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1610 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1611 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1612 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1613 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1614 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1615 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1616 
1617 	/* Packet Reception Stats */
1618 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1619 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1620 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1621 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1622 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1623 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1624 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1625 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1626 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1627 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1628 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1629 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1630 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1631 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1632 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1633 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1634 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1635 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1636 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1637 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1638 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1639 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1640 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1641 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1642 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1643 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1644 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1645 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1646 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1647 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1648 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1649 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1650 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1651 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1652 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1653 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1654 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1655 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1656 
1657 	/* Packet Transmission Stats */
1658 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1659 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1660 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1661 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1662 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1663 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1664 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1665 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1666 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1667 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1668 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1669 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1670 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1671 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1672 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1673 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1674 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1675 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1676 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1677 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1678 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1679 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1680 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1681 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1682 } /* ixgbe_add_hw_stats */
1683 
1684 /************************************************************************
1685  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1686  *
1687  *   Retrieves the TDH value from the hardware
1688  ************************************************************************/
1689 static int
1690 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1691 {
1692 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1693 	int            error;
1694 	unsigned int   val;
1695 
1696 	if (!txr)
1697 		return (0);
1698 
1699 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1700 	error = sysctl_handle_int(oidp, &val, 0, req);
1701 	if (error || !req->newptr)
1702 		return error;
1703 
1704 	return (0);
1705 } /* ixgbe_sysctl_tdh_handler */
1706 
1707 /************************************************************************
1708  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1709  *
1710  *   Retrieves the TDT value from the hardware
1711  ************************************************************************/
1712 static int
1713 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1714 {
1715 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1716 	int            error;
1717 	unsigned int   val;
1718 
1719 	if (!txr)
1720 		return (0);
1721 
1722 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1723 	error = sysctl_handle_int(oidp, &val, 0, req);
1724 	if (error || !req->newptr)
1725 		return error;
1726 
1727 	return (0);
1728 } /* ixgbe_sysctl_tdt_handler */
1729 
1730 /************************************************************************
1731  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1732  *
1733  *   Retrieves the RDH value from the hardware
1734  ************************************************************************/
1735 static int
1736 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1737 {
1738 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1739 	int            error;
1740 	unsigned int   val;
1741 
1742 	if (!rxr)
1743 		return (0);
1744 
1745 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1746 	error = sysctl_handle_int(oidp, &val, 0, req);
1747 	if (error || !req->newptr)
1748 		return error;
1749 
1750 	return (0);
1751 } /* ixgbe_sysctl_rdh_handler */
1752 
1753 /************************************************************************
1754  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1755  *
1756  *   Retrieves the RDT value from the hardware
1757  ************************************************************************/
1758 static int
1759 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1760 {
1761 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1762 	int            error;
1763 	unsigned int   val;
1764 
1765 	if (!rxr)
1766 		return (0);
1767 
1768 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1769 	error = sysctl_handle_int(oidp, &val, 0, req);
1770 	if (error || !req->newptr)
1771 		return error;
1772 
1773 	return (0);
1774 } /* ixgbe_sysctl_rdt_handler */
1775 
1776 /************************************************************************
1777  * ixgbe_if_vlan_register
1778  *
1779  *   Run via vlan config EVENT, it enables us to use the
1780  *   HW Filter table since we can get the vlan id. This
1781  *   just creates the entry in the soft version of the
1782  *   VFTA, init will repopulate the real table.
1783  ************************************************************************/
1784 static void
1785 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1786 {
1787 	struct adapter *adapter = iflib_get_softc(ctx);
1788 	u16            index, bit;
1789 
1790 	index = (vtag >> 5) & 0x7F;
1791 	bit = vtag & 0x1F;
1792 	adapter->shadow_vfta[index] |= (1 << bit);
1793 	++adapter->num_vlans;
1794 	ixgbe_setup_vlan_hw_support(ctx);
1795 } /* ixgbe_if_vlan_register */
1796 
1797 /************************************************************************
1798  * ixgbe_if_vlan_unregister
1799  *
1800  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1801  ************************************************************************/
1802 static void
1803 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1804 {
1805 	struct adapter *adapter = iflib_get_softc(ctx);
1806 	u16            index, bit;
1807 
1808 	index = (vtag >> 5) & 0x7F;
1809 	bit = vtag & 0x1F;
1810 	adapter->shadow_vfta[index] &= ~(1 << bit);
1811 	--adapter->num_vlans;
1812 	/* Re-init to load the changes */
1813 	ixgbe_setup_vlan_hw_support(ctx);
1814 } /* ixgbe_if_vlan_unregister */
1815 
1816 /************************************************************************
1817  * ixgbe_setup_vlan_hw_support
1818  ************************************************************************/
1819 static void
1820 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1821 {
1822 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1823 	struct adapter  *adapter = iflib_get_softc(ctx);
1824 	struct ixgbe_hw *hw = &adapter->hw;
1825 	struct rx_ring  *rxr;
1826 	int             i;
1827 	u32             ctrl;
1828 
1829 
1830 	/*
1831 	 * We get here thru init_locked, meaning
1832 	 * a soft reset, this has already cleared
1833 	 * the VFTA and other state, so if there
1834 	 * have been no vlan's registered do nothing.
1835 	 */
1836 	if (adapter->num_vlans == 0)
1837 		return;
1838 
1839 	/* Setup the queues for vlans */
1840 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1841 		for (i = 0; i < adapter->num_rx_queues; i++) {
1842 			rxr = &adapter->rx_queues[i].rxr;
1843 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1844 			if (hw->mac.type != ixgbe_mac_82598EB) {
1845 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1846 				ctrl |= IXGBE_RXDCTL_VME;
1847 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1848 			}
1849 			rxr->vtag_strip = TRUE;
1850 		}
1851 	}
1852 
1853 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1854 		return;
1855 	/*
1856 	 * A soft reset zero's out the VFTA, so
1857 	 * we need to repopulate it now.
1858 	 */
1859 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1860 		if (adapter->shadow_vfta[i] != 0)
1861 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1862 			    adapter->shadow_vfta[i]);
1863 
1864 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1865 	/* Enable the Filter Table if enabled */
1866 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1867 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1868 		ctrl |= IXGBE_VLNCTRL_VFE;
1869 	}
1870 	if (hw->mac.type == ixgbe_mac_82598EB)
1871 		ctrl |= IXGBE_VLNCTRL_VME;
1872 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1873 } /* ixgbe_setup_vlan_hw_support */
1874 
1875 /************************************************************************
1876  * ixgbe_get_slot_info
1877  *
1878  *   Get the width and transaction speed of
1879  *   the slot this adapter is plugged into.
1880  ************************************************************************/
1881 static void
1882 ixgbe_get_slot_info(struct adapter *adapter)
1883 {
1884 	device_t        dev = iflib_get_dev(adapter->ctx);
1885 	struct ixgbe_hw *hw = &adapter->hw;
1886 	int             bus_info_valid = TRUE;
1887 	u32             offset;
1888 	u16             link;
1889 
1890 	/* Some devices are behind an internal bridge */
1891 	switch (hw->device_id) {
1892 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1893 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1894 		goto get_parent_info;
1895 	default:
1896 		break;
1897 	}
1898 
1899 	ixgbe_get_bus_info(hw);
1900 
1901 	/*
1902 	 * Some devices don't use PCI-E, but there is no need
1903 	 * to display "Unknown" for bus speed and width.
1904 	 */
1905 	switch (hw->mac.type) {
1906 	case ixgbe_mac_X550EM_x:
1907 	case ixgbe_mac_X550EM_a:
1908 		return;
1909 	default:
1910 		goto display;
1911 	}
1912 
1913 get_parent_info:
1914 	/*
1915 	 * For the Quad port adapter we need to parse back
1916 	 * up the PCI tree to find the speed of the expansion
1917 	 * slot into which this adapter is plugged. A bit more work.
1918 	 */
1919 	dev = device_get_parent(device_get_parent(dev));
1920 #ifdef IXGBE_DEBUG
1921 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1922 	    pci_get_slot(dev), pci_get_function(dev));
1923 #endif
1924 	dev = device_get_parent(device_get_parent(dev));
1925 #ifdef IXGBE_DEBUG
1926 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1927 	    pci_get_slot(dev), pci_get_function(dev));
1928 #endif
1929 	/* Now get the PCI Express Capabilities offset */
1930 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1931 		/*
1932 		 * Hmm...can't get PCI-Express capabilities.
1933 		 * Falling back to default method.
1934 		 */
1935 		bus_info_valid = FALSE;
1936 		ixgbe_get_bus_info(hw);
1937 		goto display;
1938 	}
1939 	/* ...and read the Link Status Register */
1940 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1941 	ixgbe_set_pci_config_data_generic(hw, link);
1942 
1943 display:
1944 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1945 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1946 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1947 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1948 	     "Unknown"),
1949 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1950 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1951 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1952 	     "Unknown"));
1953 
1954 	if (bus_info_valid) {
1955 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1956 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1957 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1958 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1959 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1960 		}
1961 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1962 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1963 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1964 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1965 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1966 		}
1967 	} else
1968 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1969 
1970 	return;
1971 } /* ixgbe_get_slot_info */
1972 
1973 /************************************************************************
1974  * ixgbe_if_msix_intr_assign
1975  *
1976  *   Setup MSI-X Interrupt resources and handlers
1977  ************************************************************************/
1978 static int
1979 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1980 {
1981 	struct adapter     *adapter = iflib_get_softc(ctx);
1982 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1983 	struct ix_tx_queue *tx_que;
1984 	int                error, rid, vector = 0;
1985 	int                cpu_id = 0;
1986 	char               buf[16];
1987 
1988 	/* Admin Que is vector 0*/
1989 	rid = vector + 1;
1990 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1991 		rid = vector + 1;
1992 
1993 		snprintf(buf, sizeof(buf), "rxq%d", i);
1994 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1995 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
1996 
1997 		if (error) {
1998 			device_printf(iflib_get_dev(ctx),
1999 			    "Failed to allocate que int %d err: %d", i, error);
2000 			adapter->num_rx_queues = i + 1;
2001 			goto fail;
2002 		}
2003 
2004 		rx_que->msix = vector;
2005 		adapter->active_queues |= (u64)(1 << rx_que->msix);
2006 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2007 			/*
2008 			 * The queue ID is used as the RSS layer bucket ID.
2009 			 * We look up the queue ID -> RSS CPU ID and select
2010 			 * that.
2011 			 */
2012 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2013 		} else {
2014 			/*
2015 			 * Bind the MSI-X vector, and thus the
2016 			 * rings to the corresponding cpu.
2017 			 *
2018 			 * This just happens to match the default RSS
2019 			 * round-robin bucket -> queue -> CPU allocation.
2020 			 */
2021 			if (adapter->num_rx_queues > 1)
2022 				cpu_id = i;
2023 		}
2024 
2025 	}
2026 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2027 		snprintf(buf, sizeof(buf), "txq%d", i);
2028 		tx_que = &adapter->tx_queues[i];
2029 		tx_que->msix = i % adapter->num_rx_queues;
2030 		iflib_softirq_alloc_generic(ctx,
2031 		    &adapter->rx_queues[tx_que->msix].que_irq,
2032 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2033 	}
2034 	rid = vector + 1;
2035 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2036 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2037 	if (error) {
2038 		device_printf(iflib_get_dev(ctx),
2039 		    "Failed to register admin handler");
2040 		return (error);
2041 	}
2042 
2043 	adapter->vector = vector;
2044 
2045 	return (0);
2046 fail:
2047 	iflib_irq_free(ctx, &adapter->irq);
2048 	rx_que = adapter->rx_queues;
2049 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2050 		iflib_irq_free(ctx, &rx_que->que_irq);
2051 
2052 	return (error);
2053 } /* ixgbe_if_msix_intr_assign */
2054 
2055 /*********************************************************************
2056  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2057  **********************************************************************/
2058 static int
2059 ixgbe_msix_que(void *arg)
2060 {
2061 	struct ix_rx_queue *que = arg;
2062 	struct adapter     *adapter = que->adapter;
2063 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2064 
2065 	/* Protect against spurious interrupts */
2066 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2067 		return 0;
2068 
2069 	ixgbe_disable_queue(adapter, que->msix);
2070 	++que->irqs;
2071 
2072 	return (FILTER_SCHEDULE_THREAD);
2073 } /* ixgbe_msix_que */
2074 
2075 /************************************************************************
2076  * ixgbe_media_status - Media Ioctl callback
2077  *
2078  *   Called whenever the user queries the status of
2079  *   the interface using ifconfig.
2080  ************************************************************************/
2081 static void
2082 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2083 {
2084 	struct adapter  *adapter = iflib_get_softc(ctx);
2085 	struct ixgbe_hw *hw = &adapter->hw;
2086 	int             layer;
2087 
2088 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2089 
2090 	ifmr->ifm_status = IFM_AVALID;
2091 	ifmr->ifm_active = IFM_ETHER;
2092 
2093 	if (!adapter->link_active)
2094 		return;
2095 
2096 	ifmr->ifm_status |= IFM_ACTIVE;
2097 	layer = adapter->phy_layer;
2098 
2099 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2100 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2101 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2102 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2103 		switch (adapter->link_speed) {
2104 		case IXGBE_LINK_SPEED_10GB_FULL:
2105 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2106 			break;
2107 		case IXGBE_LINK_SPEED_1GB_FULL:
2108 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2109 			break;
2110 		case IXGBE_LINK_SPEED_100_FULL:
2111 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2112 			break;
2113 		case IXGBE_LINK_SPEED_10_FULL:
2114 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2115 			break;
2116 		}
2117 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2118 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2119 		switch (adapter->link_speed) {
2120 		case IXGBE_LINK_SPEED_10GB_FULL:
2121 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2122 			break;
2123 		}
2124 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2125 		switch (adapter->link_speed) {
2126 		case IXGBE_LINK_SPEED_10GB_FULL:
2127 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2128 			break;
2129 		case IXGBE_LINK_SPEED_1GB_FULL:
2130 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2131 			break;
2132 		}
2133 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2134 		switch (adapter->link_speed) {
2135 		case IXGBE_LINK_SPEED_10GB_FULL:
2136 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2137 			break;
2138 		case IXGBE_LINK_SPEED_1GB_FULL:
2139 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2140 			break;
2141 		}
2142 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2143 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2144 		switch (adapter->link_speed) {
2145 		case IXGBE_LINK_SPEED_10GB_FULL:
2146 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2147 			break;
2148 		case IXGBE_LINK_SPEED_1GB_FULL:
2149 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2150 			break;
2151 		}
2152 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2153 		switch (adapter->link_speed) {
2154 		case IXGBE_LINK_SPEED_10GB_FULL:
2155 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2156 			break;
2157 		}
2158 	/*
2159 	 * XXX: These need to use the proper media types once
2160 	 * they're added.
2161 	 */
2162 #ifndef IFM_ETH_XTYPE
2163 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2164 		switch (adapter->link_speed) {
2165 		case IXGBE_LINK_SPEED_10GB_FULL:
2166 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2167 			break;
2168 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2169 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2170 			break;
2171 		case IXGBE_LINK_SPEED_1GB_FULL:
2172 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2173 			break;
2174 		}
2175 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2176 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2177 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2178 		switch (adapter->link_speed) {
2179 		case IXGBE_LINK_SPEED_10GB_FULL:
2180 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2181 			break;
2182 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2183 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2184 			break;
2185 		case IXGBE_LINK_SPEED_1GB_FULL:
2186 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2187 			break;
2188 		}
2189 #else
2190 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2191 		switch (adapter->link_speed) {
2192 		case IXGBE_LINK_SPEED_10GB_FULL:
2193 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2194 			break;
2195 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2196 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2197 			break;
2198 		case IXGBE_LINK_SPEED_1GB_FULL:
2199 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2200 			break;
2201 		}
2202 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2203 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2204 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2205 		switch (adapter->link_speed) {
2206 		case IXGBE_LINK_SPEED_10GB_FULL:
2207 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2208 			break;
2209 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2210 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2211 			break;
2212 		case IXGBE_LINK_SPEED_1GB_FULL:
2213 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2214 			break;
2215 		}
2216 #endif
2217 
2218 	/* If nothing is recognized... */
2219 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2220 		ifmr->ifm_active |= IFM_UNKNOWN;
2221 
2222 	/* Display current flow control setting used on link */
2223 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2224 	    hw->fc.current_mode == ixgbe_fc_full)
2225 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2226 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2227 	    hw->fc.current_mode == ixgbe_fc_full)
2228 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2229 } /* ixgbe_media_status */
2230 
2231 /************************************************************************
2232  * ixgbe_media_change - Media Ioctl callback
2233  *
2234  *   Called when the user changes speed/duplex using
2235  *   media/mediopt option with ifconfig.
2236  ************************************************************************/
2237 static int
2238 ixgbe_if_media_change(if_ctx_t ctx)
2239 {
2240 	struct adapter   *adapter = iflib_get_softc(ctx);
2241 	struct ifmedia   *ifm = iflib_get_media(ctx);
2242 	struct ixgbe_hw  *hw = &adapter->hw;
2243 	ixgbe_link_speed speed = 0;
2244 
2245 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2246 
2247 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2248 		return (EINVAL);
2249 
2250 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2251 		return (EPERM);
2252 
2253 	/*
2254 	 * We don't actually need to check against the supported
2255 	 * media types of the adapter; ifmedia will take care of
2256 	 * that for us.
2257 	 */
2258 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2259 	case IFM_AUTO:
2260 	case IFM_10G_T:
2261 		speed |= IXGBE_LINK_SPEED_100_FULL;
2262 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2263 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2264 		break;
2265 	case IFM_10G_LRM:
2266 	case IFM_10G_LR:
2267 #ifndef IFM_ETH_XTYPE
2268 	case IFM_10G_SR: /* KR, too */
2269 	case IFM_10G_CX4: /* KX4 */
2270 #else
2271 	case IFM_10G_KR:
2272 	case IFM_10G_KX4:
2273 #endif
2274 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2275 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2276 		break;
2277 #ifndef IFM_ETH_XTYPE
2278 	case IFM_1000_CX: /* KX */
2279 #else
2280 	case IFM_1000_KX:
2281 #endif
2282 	case IFM_1000_LX:
2283 	case IFM_1000_SX:
2284 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2285 		break;
2286 	case IFM_1000_T:
2287 		speed |= IXGBE_LINK_SPEED_100_FULL;
2288 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2289 		break;
2290 	case IFM_10G_TWINAX:
2291 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2292 		break;
2293 	case IFM_100_TX:
2294 		speed |= IXGBE_LINK_SPEED_100_FULL;
2295 		break;
2296 	case IFM_10_T:
2297 		speed |= IXGBE_LINK_SPEED_10_FULL;
2298 		break;
2299 	default:
2300 		goto invalid;
2301 	}
2302 
2303 	hw->mac.autotry_restart = TRUE;
2304 	hw->mac.ops.setup_link(hw, speed, TRUE);
2305 	adapter->advertise =
2306 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2307 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2308 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2309 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2310 
2311 	return (0);
2312 
2313 invalid:
2314 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2315 
2316 	return (EINVAL);
2317 } /* ixgbe_if_media_change */
2318 
2319 /************************************************************************
2320  * ixgbe_set_promisc
2321  ************************************************************************/
2322 static int
2323 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2324 {
2325 	struct adapter *adapter = iflib_get_softc(ctx);
2326 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2327 	u32            rctl;
2328 	int            mcnt = 0;
2329 
2330 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2331 	rctl &= (~IXGBE_FCTRL_UPE);
2332 	if (ifp->if_flags & IFF_ALLMULTI)
2333 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2334 	else {
2335 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2336 	}
2337 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2338 		rctl &= (~IXGBE_FCTRL_MPE);
2339 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2340 
2341 	if (ifp->if_flags & IFF_PROMISC) {
2342 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2343 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2344 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2345 		rctl |= IXGBE_FCTRL_MPE;
2346 		rctl &= ~IXGBE_FCTRL_UPE;
2347 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2348 	}
2349 	return (0);
2350 } /* ixgbe_if_promisc_set */
2351 
2352 /************************************************************************
2353  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2354  ************************************************************************/
2355 static int
2356 ixgbe_msix_link(void *arg)
2357 {
2358 	struct adapter  *adapter = arg;
2359 	struct ixgbe_hw *hw = &adapter->hw;
2360 	u32             eicr, eicr_mask;
2361 	s32             retval;
2362 
2363 	++adapter->link_irq;
2364 
2365 	/* Pause other interrupts */
2366 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2367 
2368 	/* First get the cause */
2369 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2370 	/* Be sure the queue bits are not cleared */
2371 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2372 	/* Clear interrupt with write */
2373 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2374 
2375 	/* Link status change */
2376 	if (eicr & IXGBE_EICR_LSC) {
2377 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2378 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2379 	}
2380 
2381 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2382 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2383 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2384 			/* This is probably overkill :) */
2385 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2386 				return (FILTER_HANDLED);
2387 			/* Disable the interrupt */
2388 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2389 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2390 		} else
2391 			if (eicr & IXGBE_EICR_ECC) {
2392 				device_printf(iflib_get_dev(adapter->ctx),
2393 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2394 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2395 			}
2396 
2397 		/* Check for over temp condition */
2398 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2399 			switch (adapter->hw.mac.type) {
2400 			case ixgbe_mac_X550EM_a:
2401 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2402 					break;
2403 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2404 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2405 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2406 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2407 				retval = hw->phy.ops.check_overtemp(hw);
2408 				if (retval != IXGBE_ERR_OVERTEMP)
2409 					break;
2410 				device_printf(iflib_get_dev(adapter->ctx),
2411 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2412 				device_printf(iflib_get_dev(adapter->ctx),
2413 				    "System shutdown required!\n");
2414 				break;
2415 			default:
2416 				if (!(eicr & IXGBE_EICR_TS))
2417 					break;
2418 				retval = hw->phy.ops.check_overtemp(hw);
2419 				if (retval != IXGBE_ERR_OVERTEMP)
2420 					break;
2421 				device_printf(iflib_get_dev(adapter->ctx),
2422 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2423 				device_printf(iflib_get_dev(adapter->ctx),
2424 				    "System shutdown required!\n");
2425 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2426 				break;
2427 			}
2428 		}
2429 
2430 		/* Check for VF message */
2431 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2432 		    (eicr & IXGBE_EICR_MAILBOX))
2433 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2434 	}
2435 
2436 	if (ixgbe_is_sfp(hw)) {
2437 		/* Pluggable optics-related interrupt */
2438 		if (hw->mac.type >= ixgbe_mac_X540)
2439 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2440 		else
2441 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2442 
2443 		if (eicr & eicr_mask) {
2444 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2445 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2446 		}
2447 
2448 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2449 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2450 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2451 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2452 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2453 		}
2454 	}
2455 
2456 	/* Check for fan failure */
2457 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2458 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2459 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2460 	}
2461 
2462 	/* External PHY interrupt */
2463 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2464 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2465 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2466 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2467 	}
2468 
2469 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2470 } /* ixgbe_msix_link */
2471 
2472 /************************************************************************
2473  * ixgbe_sysctl_interrupt_rate_handler
2474  ************************************************************************/
2475 static int
2476 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2477 {
2478 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2479 	int                error;
2480 	unsigned int       reg, usec, rate;
2481 
2482 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2483 	usec = ((reg & 0x0FF8) >> 3);
2484 	if (usec > 0)
2485 		rate = 500000 / usec;
2486 	else
2487 		rate = 0;
2488 	error = sysctl_handle_int(oidp, &rate, 0, req);
2489 	if (error || !req->newptr)
2490 		return error;
2491 	reg &= ~0xfff; /* default, no limitation */
2492 	ixgbe_max_interrupt_rate = 0;
2493 	if (rate > 0 && rate < 500000) {
2494 		if (rate < 1000)
2495 			rate = 1000;
2496 		ixgbe_max_interrupt_rate = rate;
2497 		reg |= ((4000000/rate) & 0xff8);
2498 	}
2499 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2500 
2501 	return (0);
2502 } /* ixgbe_sysctl_interrupt_rate_handler */
2503 
2504 /************************************************************************
2505  * ixgbe_add_device_sysctls
2506  ************************************************************************/
2507 static void
2508 ixgbe_add_device_sysctls(if_ctx_t ctx)
2509 {
2510 	struct adapter         *adapter = iflib_get_softc(ctx);
2511 	device_t               dev = iflib_get_dev(ctx);
2512 	struct ixgbe_hw        *hw = &adapter->hw;
2513 	struct sysctl_oid_list *child;
2514 	struct sysctl_ctx_list *ctx_list;
2515 
2516 	ctx_list = device_get_sysctl_ctx(dev);
2517 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2518 
2519 	/* Sysctls for all devices */
2520 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2521 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2522 	    IXGBE_SYSCTL_DESC_SET_FC);
2523 
2524 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2525 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2526 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2527 
2528 #ifdef IXGBE_DEBUG
2529 	/* testing sysctls (for all devices) */
2530 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2531 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2532 	    "I", "PCI Power State");
2533 
2534 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2535 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2536 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2537 #endif
2538 	/* for X550 series devices */
2539 	if (hw->mac.type >= ixgbe_mac_X550)
2540 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2541 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2542 		    "I", "DMA Coalesce");
2543 
2544 	/* for WoL-capable devices */
2545 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2546 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2547 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2548 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2549 
2550 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2551 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2552 		    "I", "Enable/Disable Wake Up Filters");
2553 	}
2554 
2555 	/* for X552/X557-AT devices */
2556 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2557 		struct sysctl_oid *phy_node;
2558 		struct sysctl_oid_list *phy_list;
2559 
2560 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2561 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2562 		phy_list = SYSCTL_CHILDREN(phy_node);
2563 
2564 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2565 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2566 		    "I", "Current External PHY Temperature (Celsius)");
2567 
2568 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2569 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2570 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2571 		    "External PHY High Temperature Event Occurred");
2572 	}
2573 
2574 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2575 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2576 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2577 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2578 	}
2579 } /* ixgbe_add_device_sysctls */
2580 
2581 /************************************************************************
2582  * ixgbe_allocate_pci_resources
2583  ************************************************************************/
2584 static int
2585 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2586 {
2587 	struct adapter *adapter = iflib_get_softc(ctx);
2588 	device_t        dev = iflib_get_dev(ctx);
2589 	int             rid;
2590 
2591 	rid = PCIR_BAR(0);
2592 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2593 	    RF_ACTIVE);
2594 
2595 	if (!(adapter->pci_mem)) {
2596 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2597 		return (ENXIO);
2598 	}
2599 
2600 	/* Save bus_space values for READ/WRITE_REG macros */
2601 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2602 	adapter->osdep.mem_bus_space_handle =
2603 	    rman_get_bushandle(adapter->pci_mem);
2604 	/* Set hw values for shared code */
2605 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2606 
2607 	return (0);
2608 } /* ixgbe_allocate_pci_resources */
2609 
2610 /************************************************************************
2611  * ixgbe_detach - Device removal routine
2612  *
2613  *   Called when the driver is being removed.
2614  *   Stops the adapter and deallocates all the resources
2615  *   that were allocated for driver operation.
2616  *
2617  *   return 0 on success, positive on failure
2618  ************************************************************************/
2619 static int
2620 ixgbe_if_detach(if_ctx_t ctx)
2621 {
2622 	struct adapter *adapter = iflib_get_softc(ctx);
2623 	device_t       dev = iflib_get_dev(ctx);
2624 	u32            ctrl_ext;
2625 
2626 	INIT_DEBUGOUT("ixgbe_detach: begin");
2627 
2628 	if (ixgbe_pci_iov_detach(dev) != 0) {
2629 		device_printf(dev, "SR-IOV in use; detach first.\n");
2630 		return (EBUSY);
2631 	}
2632 
2633 	ixgbe_setup_low_power_mode(ctx);
2634 
2635 	/* let hardware know driver is unloading */
2636 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2637 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2638 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2639 
2640 	ixgbe_free_pci_resources(ctx);
2641 	free(adapter->mta, M_IXGBE);
2642 
2643 	return (0);
2644 } /* ixgbe_if_detach */
2645 
2646 /************************************************************************
2647  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2648  *
2649  *   Prepare the adapter/port for LPLU and/or WoL
2650  ************************************************************************/
2651 static int
2652 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2653 {
2654 	struct adapter  *adapter = iflib_get_softc(ctx);
2655 	struct ixgbe_hw *hw = &adapter->hw;
2656 	device_t        dev = iflib_get_dev(ctx);
2657 	s32             error = 0;
2658 
2659 	if (!hw->wol_enabled)
2660 		ixgbe_set_phy_power(hw, FALSE);
2661 
2662 	/* Limit power management flow to X550EM baseT */
2663 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2664 	    hw->phy.ops.enter_lplu) {
2665 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2666 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2667 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2668 
2669 		/*
2670 		 * Clear Wake Up Status register to prevent any previous wakeup
2671 		 * events from waking us up immediately after we suspend.
2672 		 */
2673 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2674 
2675 		/*
2676 		 * Program the Wakeup Filter Control register with user filter
2677 		 * settings
2678 		 */
2679 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2680 
2681 		/* Enable wakeups and power management in Wakeup Control */
2682 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2683 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2684 
2685 		/* X550EM baseT adapters need a special LPLU flow */
2686 		hw->phy.reset_disable = TRUE;
2687 		ixgbe_if_stop(ctx);
2688 		error = hw->phy.ops.enter_lplu(hw);
2689 		if (error)
2690 			device_printf(dev, "Error entering LPLU: %d\n", error);
2691 		hw->phy.reset_disable = FALSE;
2692 	} else {
2693 		/* Just stop for other adapters */
2694 		ixgbe_if_stop(ctx);
2695 	}
2696 
2697 	return error;
2698 } /* ixgbe_setup_low_power_mode */
2699 
2700 /************************************************************************
2701  * ixgbe_shutdown - Shutdown entry point
2702  ************************************************************************/
2703 static int
2704 ixgbe_if_shutdown(if_ctx_t ctx)
2705 {
2706 	int error = 0;
2707 
2708 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2709 
2710 	error = ixgbe_setup_low_power_mode(ctx);
2711 
2712 	return (error);
2713 } /* ixgbe_if_shutdown */
2714 
2715 /************************************************************************
2716  * ixgbe_suspend
2717  *
2718  *   From D0 to D3
2719  ************************************************************************/
2720 static int
2721 ixgbe_if_suspend(if_ctx_t ctx)
2722 {
2723 	int error = 0;
2724 
2725 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2726 
2727 	error = ixgbe_setup_low_power_mode(ctx);
2728 
2729 	return (error);
2730 } /* ixgbe_if_suspend */
2731 
2732 /************************************************************************
2733  * ixgbe_resume
2734  *
2735  *   From D3 to D0
2736  ************************************************************************/
2737 static int
2738 ixgbe_if_resume(if_ctx_t ctx)
2739 {
2740 	struct adapter  *adapter = iflib_get_softc(ctx);
2741 	device_t        dev = iflib_get_dev(ctx);
2742 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2743 	struct ixgbe_hw *hw = &adapter->hw;
2744 	u32             wus;
2745 
2746 	INIT_DEBUGOUT("ixgbe_resume: begin");
2747 
2748 	/* Read & clear WUS register */
2749 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2750 	if (wus)
2751 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2752 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2753 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2754 	/* And clear WUFC until next low-power transition */
2755 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2756 
2757 	/*
2758 	 * Required after D3->D0 transition;
2759 	 * will re-advertise all previous advertised speeds
2760 	 */
2761 	if (ifp->if_flags & IFF_UP)
2762 		ixgbe_if_init(ctx);
2763 
2764 	return (0);
2765 } /* ixgbe_if_resume */
2766 
2767 /************************************************************************
2768  * ixgbe_if_mtu_set - Ioctl mtu entry point
2769  *
2770  *   Return 0 on success, EINVAL on failure
2771  ************************************************************************/
2772 static int
2773 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2774 {
2775 	struct adapter *adapter = iflib_get_softc(ctx);
2776 	int error = 0;
2777 
2778 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2779 
2780 	if (mtu > IXGBE_MAX_MTU) {
2781 		error = EINVAL;
2782 	} else {
2783 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2784 	}
2785 
2786 	return error;
2787 } /* ixgbe_if_mtu_set */
2788 
2789 /************************************************************************
2790  * ixgbe_if_crcstrip_set
2791  ************************************************************************/
2792 static void
2793 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2794 {
2795 	struct adapter *sc = iflib_get_softc(ctx);
2796 	struct ixgbe_hw *hw = &sc->hw;
2797 	/* crc stripping is set in two places:
2798 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2799 	 * IXGBE_RDRXCTL (set by the original driver in
2800 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2801 	 *	We disable the setting when netmap is compiled in).
2802 	 * We update the values here, but also in ixgbe.c because
2803 	 * init_locked sometimes is called outside our control.
2804 	 */
2805 	uint32_t hl, rxc;
2806 
2807 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2808 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2809 #ifdef NETMAP
2810 	if (netmap_verbose)
2811 		D("%s read  HLREG 0x%x rxc 0x%x",
2812 			onoff ? "enter" : "exit", hl, rxc);
2813 #endif
2814 	/* hw requirements ... */
2815 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2816 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2817 	if (onoff && !crcstrip) {
2818 		/* keep the crc. Fast rx */
2819 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2820 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2821 	} else {
2822 		/* reset default mode */
2823 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2824 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2825 	}
2826 #ifdef NETMAP
2827 	if (netmap_verbose)
2828 		D("%s write HLREG 0x%x rxc 0x%x",
2829 			onoff ? "enter" : "exit", hl, rxc);
2830 #endif
2831 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2832 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2833 } /* ixgbe_if_crcstrip_set */
2834 
2835 /*********************************************************************
2836  * ixgbe_if_init - Init entry point
2837  *
2838  *   Used in two ways: It is used by the stack as an init
2839  *   entry point in network interface structure. It is also
2840  *   used by the driver as a hw/sw initialization routine to
2841  *   get to a consistent state.
2842  *
2843  *   Return 0 on success, positive on failure
2844  **********************************************************************/
2845 void
2846 ixgbe_if_init(if_ctx_t ctx)
2847 {
2848 	struct adapter     *adapter = iflib_get_softc(ctx);
2849 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2850 	device_t           dev = iflib_get_dev(ctx);
2851 	struct ixgbe_hw *hw = &adapter->hw;
2852 	struct ix_rx_queue *rx_que;
2853 	struct ix_tx_queue *tx_que;
2854 	u32             txdctl, mhadd;
2855 	u32             rxdctl, rxctrl;
2856 	u32             ctrl_ext;
2857 
2858 	int             i, j, err;
2859 
2860 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2861 
2862 	/* Queue indices may change with IOV mode */
2863 	ixgbe_align_all_queue_indices(adapter);
2864 
2865 	/* reprogram the RAR[0] in case user changed it. */
2866 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2867 
2868 	/* Get the latest mac address, User can use a LAA */
2869 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2870 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2871 	hw->addr_ctrl.rar_used_count = 1;
2872 
2873 	ixgbe_init_hw(hw);
2874 
2875 	ixgbe_initialize_iov(adapter);
2876 
2877 	ixgbe_initialize_transmit_units(ctx);
2878 
2879 	/* Setup Multicast table */
2880 	ixgbe_if_multi_set(ctx);
2881 
2882 	/* Determine the correct mbuf pool, based on frame size */
2883 	if (adapter->max_frame_size <= MCLBYTES)
2884 		adapter->rx_mbuf_sz = MCLBYTES;
2885 	else
2886 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2887 
2888 	/* Configure RX settings */
2889 	ixgbe_initialize_receive_units(ctx);
2890 
2891 	/*
2892 	 * Initialize variable holding task enqueue requests
2893 	 * from MSI-X interrupts
2894 	 */
2895 	adapter->task_requests = 0;
2896 
2897 	/* Enable SDP & MSI-X interrupts based on adapter */
2898 	ixgbe_config_gpie(adapter);
2899 
2900 	/* Set MTU size */
2901 	if (ifp->if_mtu > ETHERMTU) {
2902 		/* aka IXGBE_MAXFRS on 82599 and newer */
2903 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2904 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2905 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2906 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2907 	}
2908 
2909 	/* Now enable all the queues */
2910 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2911 		struct tx_ring *txr = &tx_que->txr;
2912 
2913 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2914 		txdctl |= IXGBE_TXDCTL_ENABLE;
2915 		/* Set WTHRESH to 8, burst writeback */
2916 		txdctl |= (8 << 16);
2917 		/*
2918 		 * When the internal queue falls below PTHRESH (32),
2919 		 * start prefetching as long as there are at least
2920 		 * HTHRESH (1) buffers ready. The values are taken
2921 		 * from the Intel linux driver 3.8.21.
2922 		 * Prefetching enables tx line rate even with 1 queue.
2923 		 */
2924 		txdctl |= (32 << 0) | (1 << 8);
2925 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2926 	}
2927 
2928 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2929 		struct rx_ring *rxr = &rx_que->rxr;
2930 
2931 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2932 		if (hw->mac.type == ixgbe_mac_82598EB) {
2933 			/*
2934 			 * PTHRESH = 21
2935 			 * HTHRESH = 4
2936 			 * WTHRESH = 8
2937 			 */
2938 			rxdctl &= ~0x3FFFFF;
2939 			rxdctl |= 0x080420;
2940 		}
2941 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2942 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2943 		for (j = 0; j < 10; j++) {
2944 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2945 			    IXGBE_RXDCTL_ENABLE)
2946 				break;
2947 			else
2948 				msec_delay(1);
2949 		}
2950 		wmb();
2951 	}
2952 
2953 	/* Enable Receive engine */
2954 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2955 	if (hw->mac.type == ixgbe_mac_82598EB)
2956 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2957 	rxctrl |= IXGBE_RXCTRL_RXEN;
2958 	ixgbe_enable_rx_dma(hw, rxctrl);
2959 
2960 	/* Set up MSI/MSI-X routing */
2961 	if (ixgbe_enable_msix)  {
2962 		ixgbe_configure_ivars(adapter);
2963 		/* Set up auto-mask */
2964 		if (hw->mac.type == ixgbe_mac_82598EB)
2965 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2966 		else {
2967 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2968 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2969 		}
2970 	} else {  /* Simple settings for Legacy/MSI */
2971 		ixgbe_set_ivar(adapter, 0, 0, 0);
2972 		ixgbe_set_ivar(adapter, 0, 0, 1);
2973 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2974 	}
2975 
2976 	ixgbe_init_fdir(adapter);
2977 
2978 	/*
2979 	 * Check on any SFP devices that
2980 	 * need to be kick-started
2981 	 */
2982 	if (hw->phy.type == ixgbe_phy_none) {
2983 		err = hw->phy.ops.identify(hw);
2984 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2985 			device_printf(dev,
2986 			    "Unsupported SFP+ module type was detected.\n");
2987 			return;
2988 		}
2989 	}
2990 
2991 	/* Set moderation on the Link interrupt */
2992 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
2993 
2994 	/* Enable power to the phy. */
2995 	ixgbe_set_phy_power(hw, TRUE);
2996 
2997 	/* Config/Enable Link */
2998 	ixgbe_config_link(ctx);
2999 
3000 	/* Hardware Packet Buffer & Flow Control setup */
3001 	ixgbe_config_delay_values(adapter);
3002 
3003 	/* Initialize the FC settings */
3004 	ixgbe_start_hw(hw);
3005 
3006 	/* Set up VLAN support and filter */
3007 	ixgbe_setup_vlan_hw_support(ctx);
3008 
3009 	/* Setup DMA Coalescing */
3010 	ixgbe_config_dmac(adapter);
3011 
3012 	/* And now turn on interrupts */
3013 	ixgbe_if_enable_intr(ctx);
3014 
3015 	/* Enable the use of the MBX by the VF's */
3016 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3017 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3018 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3019 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3020 	}
3021 
3022 } /* ixgbe_init_locked */
3023 
3024 /************************************************************************
3025  * ixgbe_set_ivar
3026  *
3027  *   Setup the correct IVAR register for a particular MSI-X interrupt
3028  *     (yes this is all very magic and confusing :)
3029  *    - entry is the register array entry
3030  *    - vector is the MSI-X vector for this queue
3031  *    - type is RX/TX/MISC
3032  ************************************************************************/
3033 static void
3034 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3035 {
3036 	struct ixgbe_hw *hw = &adapter->hw;
3037 	u32 ivar, index;
3038 
3039 	vector |= IXGBE_IVAR_ALLOC_VAL;
3040 
3041 	switch (hw->mac.type) {
3042 	case ixgbe_mac_82598EB:
3043 		if (type == -1)
3044 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3045 		else
3046 			entry += (type * 64);
3047 		index = (entry >> 2) & 0x1F;
3048 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3049 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3050 		ivar |= (vector << (8 * (entry & 0x3)));
3051 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3052 		break;
3053 	case ixgbe_mac_82599EB:
3054 	case ixgbe_mac_X540:
3055 	case ixgbe_mac_X550:
3056 	case ixgbe_mac_X550EM_x:
3057 	case ixgbe_mac_X550EM_a:
3058 		if (type == -1) { /* MISC IVAR */
3059 			index = (entry & 1) * 8;
3060 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3061 			ivar &= ~(0xFF << index);
3062 			ivar |= (vector << index);
3063 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3064 		} else {          /* RX/TX IVARS */
3065 			index = (16 * (entry & 1)) + (8 * type);
3066 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3067 			ivar &= ~(0xFF << index);
3068 			ivar |= (vector << index);
3069 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3070 		}
3071 	default:
3072 		break;
3073 	}
3074 } /* ixgbe_set_ivar */
3075 
3076 /************************************************************************
3077  * ixgbe_configure_ivars
3078  ************************************************************************/
3079 static void
3080 ixgbe_configure_ivars(struct adapter *adapter)
3081 {
3082 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3083 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3084 	u32                newitr;
3085 
3086 	if (ixgbe_max_interrupt_rate > 0)
3087 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3088 	else {
3089 		/*
3090 		 * Disable DMA coalescing if interrupt moderation is
3091 		 * disabled.
3092 		 */
3093 		adapter->dmac = 0;
3094 		newitr = 0;
3095 	}
3096 
3097 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3098 		struct rx_ring *rxr = &rx_que->rxr;
3099 
3100 		/* First the RX queue entry */
3101 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3102 
3103 		/* Set an Initial EITR value */
3104 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3105 	}
3106 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3107 		struct tx_ring *txr = &tx_que->txr;
3108 
3109 		/* ... and the TX */
3110 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3111 	}
3112 	/* For the Link interrupt */
3113 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3114 } /* ixgbe_configure_ivars */
3115 
3116 /************************************************************************
3117  * ixgbe_config_gpie
3118  ************************************************************************/
3119 static void
3120 ixgbe_config_gpie(struct adapter *adapter)
3121 {
3122 	struct ixgbe_hw *hw = &adapter->hw;
3123 	u32             gpie;
3124 
3125 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3126 
3127 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3128 		/* Enable Enhanced MSI-X mode */
3129 		gpie |= IXGBE_GPIE_MSIX_MODE
3130 		     |  IXGBE_GPIE_EIAME
3131 		     |  IXGBE_GPIE_PBA_SUPPORT
3132 		     |  IXGBE_GPIE_OCD;
3133 	}
3134 
3135 	/* Fan Failure Interrupt */
3136 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3137 		gpie |= IXGBE_SDP1_GPIEN;
3138 
3139 	/* Thermal Sensor Interrupt */
3140 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3141 		gpie |= IXGBE_SDP0_GPIEN_X540;
3142 
3143 	/* Link detection */
3144 	switch (hw->mac.type) {
3145 	case ixgbe_mac_82599EB:
3146 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3147 		break;
3148 	case ixgbe_mac_X550EM_x:
3149 	case ixgbe_mac_X550EM_a:
3150 		gpie |= IXGBE_SDP0_GPIEN_X540;
3151 		break;
3152 	default:
3153 		break;
3154 	}
3155 
3156 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3157 
3158 } /* ixgbe_config_gpie */
3159 
3160 /************************************************************************
3161  * ixgbe_config_delay_values
3162  *
3163  *   Requires adapter->max_frame_size to be set.
3164  ************************************************************************/
3165 static void
3166 ixgbe_config_delay_values(struct adapter *adapter)
3167 {
3168 	struct ixgbe_hw *hw = &adapter->hw;
3169 	u32             rxpb, frame, size, tmp;
3170 
3171 	frame = adapter->max_frame_size;
3172 
3173 	/* Calculate High Water */
3174 	switch (hw->mac.type) {
3175 	case ixgbe_mac_X540:
3176 	case ixgbe_mac_X550:
3177 	case ixgbe_mac_X550EM_x:
3178 	case ixgbe_mac_X550EM_a:
3179 		tmp = IXGBE_DV_X540(frame, frame);
3180 		break;
3181 	default:
3182 		tmp = IXGBE_DV(frame, frame);
3183 		break;
3184 	}
3185 	size = IXGBE_BT2KB(tmp);
3186 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3187 	hw->fc.high_water[0] = rxpb - size;
3188 
3189 	/* Now calculate Low Water */
3190 	switch (hw->mac.type) {
3191 	case ixgbe_mac_X540:
3192 	case ixgbe_mac_X550:
3193 	case ixgbe_mac_X550EM_x:
3194 	case ixgbe_mac_X550EM_a:
3195 		tmp = IXGBE_LOW_DV_X540(frame);
3196 		break;
3197 	default:
3198 		tmp = IXGBE_LOW_DV(frame);
3199 		break;
3200 	}
3201 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3202 
3203 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3204 	hw->fc.send_xon = TRUE;
3205 } /* ixgbe_config_delay_values */
3206 
3207 /************************************************************************
3208  * ixgbe_set_multi - Multicast Update
3209  *
3210  *   Called whenever multicast address list is updated.
3211  ************************************************************************/
3212 static int
3213 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3214 {
3215 	struct adapter *adapter = arg;
3216 	struct ixgbe_mc_addr *mta = adapter->mta;
3217 
3218 	if (ifma->ifma_addr->sa_family != AF_LINK)
3219 		return (0);
3220 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3221 		return (0);
3222 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3223 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3224 	mta[count].vmdq = adapter->pool;
3225 
3226 	return (1);
3227 } /* ixgbe_mc_filter_apply */
3228 
3229 static void
3230 ixgbe_if_multi_set(if_ctx_t ctx)
3231 {
3232 	struct adapter       *adapter = iflib_get_softc(ctx);
3233 	struct ixgbe_mc_addr *mta;
3234 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3235 	u8                   *update_ptr;
3236 	int                  mcnt = 0;
3237 	u32                  fctrl;
3238 
3239 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3240 
3241 	mta = adapter->mta;
3242 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3243 
3244 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3245 
3246 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3247 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3248 	if (ifp->if_flags & IFF_PROMISC)
3249 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3250 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3251 	    ifp->if_flags & IFF_ALLMULTI) {
3252 		fctrl |= IXGBE_FCTRL_MPE;
3253 		fctrl &= ~IXGBE_FCTRL_UPE;
3254 	} else
3255 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3256 
3257 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3258 
3259 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3260 		update_ptr = (u8 *)mta;
3261 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3262 		    ixgbe_mc_array_itr, TRUE);
3263 	}
3264 
3265 } /* ixgbe_if_multi_set */
3266 
3267 /************************************************************************
3268  * ixgbe_mc_array_itr
3269  *
3270  *   An iterator function needed by the multicast shared code.
3271  *   It feeds the shared code routine the addresses in the
3272  *   array of ixgbe_set_multi() one by one.
3273  ************************************************************************/
3274 static u8 *
3275 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3276 {
3277 	struct ixgbe_mc_addr *mta;
3278 
3279 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3280 	*vmdq = mta->vmdq;
3281 
3282 	*update_ptr = (u8*)(mta + 1);
3283 
3284 	return (mta->addr);
3285 } /* ixgbe_mc_array_itr */
3286 
3287 /************************************************************************
3288  * ixgbe_local_timer - Timer routine
3289  *
3290  *   Checks for link status, updates statistics,
3291  *   and runs the watchdog check.
3292  ************************************************************************/
3293 static void
3294 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3295 {
3296 	struct adapter *adapter = iflib_get_softc(ctx);
3297 
3298 	if (qid != 0)
3299 		return;
3300 
3301 	/* Check for pluggable optics */
3302 	if (adapter->sfp_probe)
3303 		if (!ixgbe_sfp_probe(ctx))
3304 			return; /* Nothing to do */
3305 
3306 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3307 	    &adapter->link_up, 0);
3308 
3309 	/* Fire off the adminq task */
3310 	iflib_admin_intr_deferred(ctx);
3311 
3312 } /* ixgbe_if_timer */
3313 
3314 /************************************************************************
3315  * ixgbe_sfp_probe
3316  *
3317  *   Determine if a port had optics inserted.
3318  ************************************************************************/
3319 static bool
3320 ixgbe_sfp_probe(if_ctx_t ctx)
3321 {
3322 	struct adapter  *adapter = iflib_get_softc(ctx);
3323 	struct ixgbe_hw *hw = &adapter->hw;
3324 	device_t        dev = iflib_get_dev(ctx);
3325 	bool            result = FALSE;
3326 
3327 	if ((hw->phy.type == ixgbe_phy_nl) &&
3328 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3329 		s32 ret = hw->phy.ops.identify_sfp(hw);
3330 		if (ret)
3331 			goto out;
3332 		ret = hw->phy.ops.reset(hw);
3333 		adapter->sfp_probe = FALSE;
3334 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3335 			device_printf(dev, "Unsupported SFP+ module detected!");
3336 			device_printf(dev,
3337 			    "Reload driver with supported module.\n");
3338 			goto out;
3339 		} else
3340 			device_printf(dev, "SFP+ module detected!\n");
3341 		/* We now have supported optics */
3342 		result = TRUE;
3343 	}
3344 out:
3345 
3346 	return (result);
3347 } /* ixgbe_sfp_probe */
3348 
3349 /************************************************************************
3350  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3351  ************************************************************************/
3352 static void
3353 ixgbe_handle_mod(void *context)
3354 {
3355 	if_ctx_t        ctx = context;
3356 	struct adapter  *adapter = iflib_get_softc(ctx);
3357 	struct ixgbe_hw *hw = &adapter->hw;
3358 	device_t        dev = iflib_get_dev(ctx);
3359 	u32             err, cage_full = 0;
3360 
3361 	if (adapter->hw.need_crosstalk_fix) {
3362 		switch (hw->mac.type) {
3363 		case ixgbe_mac_82599EB:
3364 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3365 			    IXGBE_ESDP_SDP2;
3366 			break;
3367 		case ixgbe_mac_X550EM_x:
3368 		case ixgbe_mac_X550EM_a:
3369 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3370 			    IXGBE_ESDP_SDP0;
3371 			break;
3372 		default:
3373 			break;
3374 		}
3375 
3376 		if (!cage_full)
3377 			goto handle_mod_out;
3378 	}
3379 
3380 	err = hw->phy.ops.identify_sfp(hw);
3381 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3382 		device_printf(dev,
3383 		    "Unsupported SFP+ module type was detected.\n");
3384 		goto handle_mod_out;
3385 	}
3386 
3387 	if (hw->mac.type == ixgbe_mac_82598EB)
3388 		err = hw->phy.ops.reset(hw);
3389 	else
3390 		err = hw->mac.ops.setup_sfp(hw);
3391 
3392 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3393 		device_printf(dev,
3394 		    "Setup failure - unsupported SFP+ module type.\n");
3395 		goto handle_mod_out;
3396 	}
3397 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3398 	return;
3399 
3400 handle_mod_out:
3401 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3402 } /* ixgbe_handle_mod */
3403 
3404 
3405 /************************************************************************
3406  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3407  ************************************************************************/
3408 static void
3409 ixgbe_handle_msf(void *context)
3410 {
3411 	if_ctx_t        ctx = context;
3412 	struct adapter  *adapter = iflib_get_softc(ctx);
3413 	struct ixgbe_hw *hw = &adapter->hw;
3414 	u32             autoneg;
3415 	bool            negotiate;
3416 
3417 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3418 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3419 
3420 	autoneg = hw->phy.autoneg_advertised;
3421 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3422 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3423 	if (hw->mac.ops.setup_link)
3424 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3425 
3426 	/* Adjust media types shown in ifconfig */
3427 	ifmedia_removeall(adapter->media);
3428 	ixgbe_add_media_types(adapter->ctx);
3429 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3430 } /* ixgbe_handle_msf */
3431 
3432 /************************************************************************
3433  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3434  ************************************************************************/
3435 static void
3436 ixgbe_handle_phy(void *context)
3437 {
3438 	if_ctx_t        ctx = context;
3439 	struct adapter  *adapter = iflib_get_softc(ctx);
3440 	struct ixgbe_hw *hw = &adapter->hw;
3441 	int             error;
3442 
3443 	error = hw->phy.ops.handle_lasi(hw);
3444 	if (error == IXGBE_ERR_OVERTEMP)
3445 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3446 	else if (error)
3447 		device_printf(adapter->dev,
3448 		    "Error handling LASI interrupt: %d\n", error);
3449 } /* ixgbe_handle_phy */
3450 
3451 /************************************************************************
3452  * ixgbe_if_stop - Stop the hardware
3453  *
3454  *   Disables all traffic on the adapter by issuing a
3455  *   global reset on the MAC and deallocates TX/RX buffers.
3456  ************************************************************************/
3457 static void
3458 ixgbe_if_stop(if_ctx_t ctx)
3459 {
3460 	struct adapter  *adapter = iflib_get_softc(ctx);
3461 	struct ixgbe_hw *hw = &adapter->hw;
3462 
3463 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3464 
3465 	ixgbe_reset_hw(hw);
3466 	hw->adapter_stopped = FALSE;
3467 	ixgbe_stop_adapter(hw);
3468 	if (hw->mac.type == ixgbe_mac_82599EB)
3469 		ixgbe_stop_mac_link_on_d3_82599(hw);
3470 	/* Turn off the laser - noop with no optics */
3471 	ixgbe_disable_tx_laser(hw);
3472 
3473 	/* Update the stack */
3474 	adapter->link_up = FALSE;
3475 	ixgbe_if_update_admin_status(ctx);
3476 
3477 	/* reprogram the RAR[0] in case user changed it. */
3478 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3479 
3480 	return;
3481 } /* ixgbe_if_stop */
3482 
3483 /************************************************************************
3484  * ixgbe_update_link_status - Update OS on link state
3485  *
3486  * Note: Only updates the OS on the cached link state.
3487  *       The real check of the hardware only happens with
3488  *       a link interrupt.
3489  ************************************************************************/
3490 static void
3491 ixgbe_if_update_admin_status(if_ctx_t ctx)
3492 {
3493 	struct adapter *adapter = iflib_get_softc(ctx);
3494 	device_t       dev = iflib_get_dev(ctx);
3495 
3496 	if (adapter->link_up) {
3497 		if (adapter->link_active == FALSE) {
3498 			if (bootverbose)
3499 				device_printf(dev, "Link is up %d Gbps %s \n",
3500 				    ((adapter->link_speed == 128) ? 10 : 1),
3501 				    "Full Duplex");
3502 			adapter->link_active = TRUE;
3503 			/* Update any Flow Control changes */
3504 			ixgbe_fc_enable(&adapter->hw);
3505 			/* Update DMA coalescing config */
3506 			ixgbe_config_dmac(adapter);
3507 			/* should actually be negotiated value */
3508 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3509 
3510 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3511 				ixgbe_ping_all_vfs(adapter);
3512 		}
3513 	} else { /* Link down */
3514 		if (adapter->link_active == TRUE) {
3515 			if (bootverbose)
3516 				device_printf(dev, "Link is Down\n");
3517 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3518 			adapter->link_active = FALSE;
3519 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3520 				ixgbe_ping_all_vfs(adapter);
3521 		}
3522 	}
3523 
3524 	/* Handle task requests from msix_link() */
3525 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3526 		ixgbe_handle_mod(ctx);
3527 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3528 		ixgbe_handle_msf(ctx);
3529 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3530 		ixgbe_handle_mbx(ctx);
3531 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3532 		ixgbe_reinit_fdir(ctx);
3533 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3534 		ixgbe_handle_phy(ctx);
3535 	adapter->task_requests = 0;
3536 
3537 	ixgbe_update_stats_counters(adapter);
3538 } /* ixgbe_if_update_admin_status */
3539 
3540 /************************************************************************
3541  * ixgbe_config_dmac - Configure DMA Coalescing
3542  ************************************************************************/
3543 static void
3544 ixgbe_config_dmac(struct adapter *adapter)
3545 {
3546 	struct ixgbe_hw          *hw = &adapter->hw;
3547 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3548 
3549 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3550 		return;
3551 
3552 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3553 	    dcfg->link_speed ^ adapter->link_speed) {
3554 		dcfg->watchdog_timer = adapter->dmac;
3555 		dcfg->fcoe_en = FALSE;
3556 		dcfg->link_speed = adapter->link_speed;
3557 		dcfg->num_tcs = 1;
3558 
3559 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3560 		    dcfg->watchdog_timer, dcfg->link_speed);
3561 
3562 		hw->mac.ops.dmac_config(hw);
3563 	}
3564 } /* ixgbe_config_dmac */
3565 
3566 /************************************************************************
3567  * ixgbe_if_enable_intr
3568  ************************************************************************/
3569 void
3570 ixgbe_if_enable_intr(if_ctx_t ctx)
3571 {
3572 	struct adapter     *adapter = iflib_get_softc(ctx);
3573 	struct ixgbe_hw    *hw = &adapter->hw;
3574 	struct ix_rx_queue *que = adapter->rx_queues;
3575 	u32                mask, fwsm;
3576 
3577 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3578 
3579 	switch (adapter->hw.mac.type) {
3580 	case ixgbe_mac_82599EB:
3581 		mask |= IXGBE_EIMS_ECC;
3582 		/* Temperature sensor on some adapters */
3583 		mask |= IXGBE_EIMS_GPI_SDP0;
3584 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3585 		mask |= IXGBE_EIMS_GPI_SDP1;
3586 		mask |= IXGBE_EIMS_GPI_SDP2;
3587 		break;
3588 	case ixgbe_mac_X540:
3589 		/* Detect if Thermal Sensor is enabled */
3590 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3591 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3592 			mask |= IXGBE_EIMS_TS;
3593 		mask |= IXGBE_EIMS_ECC;
3594 		break;
3595 	case ixgbe_mac_X550:
3596 		/* MAC thermal sensor is automatically enabled */
3597 		mask |= IXGBE_EIMS_TS;
3598 		mask |= IXGBE_EIMS_ECC;
3599 		break;
3600 	case ixgbe_mac_X550EM_x:
3601 	case ixgbe_mac_X550EM_a:
3602 		/* Some devices use SDP0 for important information */
3603 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3604 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3605 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3606 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3607 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3608 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3609 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3610 		mask |= IXGBE_EIMS_ECC;
3611 		break;
3612 	default:
3613 		break;
3614 	}
3615 
3616 	/* Enable Fan Failure detection */
3617 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3618 		mask |= IXGBE_EIMS_GPI_SDP1;
3619 	/* Enable SR-IOV */
3620 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3621 		mask |= IXGBE_EIMS_MAILBOX;
3622 	/* Enable Flow Director */
3623 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3624 		mask |= IXGBE_EIMS_FLOW_DIR;
3625 
3626 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3627 
3628 	/* With MSI-X we use auto clear */
3629 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3630 		mask = IXGBE_EIMS_ENABLE_MASK;
3631 		/* Don't autoclear Link */
3632 		mask &= ~IXGBE_EIMS_OTHER;
3633 		mask &= ~IXGBE_EIMS_LSC;
3634 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3635 			mask &= ~IXGBE_EIMS_MAILBOX;
3636 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3637 	}
3638 
3639 	/*
3640 	 * Now enable all queues, this is done separately to
3641 	 * allow for handling the extended (beyond 32) MSI-X
3642 	 * vectors that can be used by 82599
3643 	 */
3644 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3645 		ixgbe_enable_queue(adapter, que->msix);
3646 
3647 	IXGBE_WRITE_FLUSH(hw);
3648 
3649 } /* ixgbe_if_enable_intr */
3650 
3651 /************************************************************************
3652  * ixgbe_disable_intr
3653  ************************************************************************/
3654 static void
3655 ixgbe_if_disable_intr(if_ctx_t ctx)
3656 {
3657 	struct adapter *adapter = iflib_get_softc(ctx);
3658 
3659 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3660 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3661 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3662 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3663 	} else {
3664 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3665 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3666 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3667 	}
3668 	IXGBE_WRITE_FLUSH(&adapter->hw);
3669 
3670 } /* ixgbe_if_disable_intr */
3671 
3672 /************************************************************************
3673  * ixgbe_link_intr_enable
3674  ************************************************************************/
3675 static void
3676 ixgbe_link_intr_enable(if_ctx_t ctx)
3677 {
3678 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3679 
3680 	/* Re-enable other interrupts */
3681 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3682 } /* ixgbe_link_intr_enable */
3683 
3684 /************************************************************************
3685  * ixgbe_if_rx_queue_intr_enable
3686  ************************************************************************/
3687 static int
3688 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3689 {
3690 	struct adapter     *adapter = iflib_get_softc(ctx);
3691 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3692 
3693 	ixgbe_enable_queue(adapter, que->rxr.me);
3694 
3695 	return (0);
3696 } /* ixgbe_if_rx_queue_intr_enable */
3697 
3698 /************************************************************************
3699  * ixgbe_enable_queue
3700  ************************************************************************/
3701 static void
3702 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3703 {
3704 	struct ixgbe_hw *hw = &adapter->hw;
3705 	u64             queue = (u64)(1 << vector);
3706 	u32             mask;
3707 
3708 	if (hw->mac.type == ixgbe_mac_82598EB) {
3709 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3710 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3711 	} else {
3712 		mask = (queue & 0xFFFFFFFF);
3713 		if (mask)
3714 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3715 		mask = (queue >> 32);
3716 		if (mask)
3717 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3718 	}
3719 } /* ixgbe_enable_queue */
3720 
3721 /************************************************************************
3722  * ixgbe_disable_queue
3723  ************************************************************************/
3724 static void
3725 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3726 {
3727 	struct ixgbe_hw *hw = &adapter->hw;
3728 	u64             queue = (u64)(1 << vector);
3729 	u32             mask;
3730 
3731 	if (hw->mac.type == ixgbe_mac_82598EB) {
3732 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3733 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3734 	} else {
3735 		mask = (queue & 0xFFFFFFFF);
3736 		if (mask)
3737 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3738 		mask = (queue >> 32);
3739 		if (mask)
3740 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3741 	}
3742 } /* ixgbe_disable_queue */
3743 
3744 /************************************************************************
3745  * ixgbe_intr - Legacy Interrupt Service Routine
3746  ************************************************************************/
3747 int
3748 ixgbe_intr(void *arg)
3749 {
3750 	struct adapter     *adapter = arg;
3751 	struct ix_rx_queue *que = adapter->rx_queues;
3752 	struct ixgbe_hw    *hw = &adapter->hw;
3753 	if_ctx_t           ctx = adapter->ctx;
3754 	u32                eicr, eicr_mask;
3755 
3756 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3757 
3758 	++que->irqs;
3759 	if (eicr == 0) {
3760 		ixgbe_if_enable_intr(ctx);
3761 		return (FILTER_HANDLED);
3762 	}
3763 
3764 	/* Check for fan failure */
3765 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3766 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3767 		device_printf(adapter->dev,
3768 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3769 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3770 	}
3771 
3772 	/* Link status change */
3773 	if (eicr & IXGBE_EICR_LSC) {
3774 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3775 		iflib_admin_intr_deferred(ctx);
3776 	}
3777 
3778 	if (ixgbe_is_sfp(hw)) {
3779 		/* Pluggable optics-related interrupt */
3780 		if (hw->mac.type >= ixgbe_mac_X540)
3781 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3782 		else
3783 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3784 
3785 		if (eicr & eicr_mask) {
3786 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3787 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3788 		}
3789 
3790 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3791 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3792 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3793 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3794 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3795 		}
3796 	}
3797 
3798 	/* External PHY interrupt */
3799 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3800 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3801 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3802 
3803 	return (FILTER_SCHEDULE_THREAD);
3804 } /* ixgbe_intr */
3805 
3806 /************************************************************************
3807  * ixgbe_free_pci_resources
3808  ************************************************************************/
3809 static void
3810 ixgbe_free_pci_resources(if_ctx_t ctx)
3811 {
3812 	struct adapter *adapter = iflib_get_softc(ctx);
3813 	struct         ix_rx_queue *que = adapter->rx_queues;
3814 	device_t       dev = iflib_get_dev(ctx);
3815 
3816 	/* Release all MSI-X queue resources */
3817 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3818 		iflib_irq_free(ctx, &adapter->irq);
3819 
3820 	if (que != NULL) {
3821 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3822 			iflib_irq_free(ctx, &que->que_irq);
3823 		}
3824 	}
3825 
3826 	if (adapter->pci_mem != NULL)
3827 		bus_release_resource(dev, SYS_RES_MEMORY,
3828 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3829 } /* ixgbe_free_pci_resources */
3830 
3831 /************************************************************************
3832  * ixgbe_sysctl_flowcntl
3833  *
3834  *   SYSCTL wrapper around setting Flow Control
3835  ************************************************************************/
3836 static int
3837 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3838 {
3839 	struct adapter *adapter;
3840 	int            error, fc;
3841 
3842 	adapter = (struct adapter *)arg1;
3843 	fc = adapter->hw.fc.current_mode;
3844 
3845 	error = sysctl_handle_int(oidp, &fc, 0, req);
3846 	if ((error) || (req->newptr == NULL))
3847 		return (error);
3848 
3849 	/* Don't bother if it's not changed */
3850 	if (fc == adapter->hw.fc.current_mode)
3851 		return (0);
3852 
3853 	return ixgbe_set_flowcntl(adapter, fc);
3854 } /* ixgbe_sysctl_flowcntl */
3855 
3856 /************************************************************************
3857  * ixgbe_set_flowcntl - Set flow control
3858  *
3859  *   Flow control values:
3860  *     0 - off
3861  *     1 - rx pause
3862  *     2 - tx pause
3863  *     3 - full
3864  ************************************************************************/
3865 static int
3866 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3867 {
3868 	switch (fc) {
3869 	case ixgbe_fc_rx_pause:
3870 	case ixgbe_fc_tx_pause:
3871 	case ixgbe_fc_full:
3872 		adapter->hw.fc.requested_mode = fc;
3873 		if (adapter->num_rx_queues > 1)
3874 			ixgbe_disable_rx_drop(adapter);
3875 		break;
3876 	case ixgbe_fc_none:
3877 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3878 		if (adapter->num_rx_queues > 1)
3879 			ixgbe_enable_rx_drop(adapter);
3880 		break;
3881 	default:
3882 		return (EINVAL);
3883 	}
3884 
3885 	/* Don't autoneg if forcing a value */
3886 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3887 	ixgbe_fc_enable(&adapter->hw);
3888 
3889 	return (0);
3890 } /* ixgbe_set_flowcntl */
3891 
3892 /************************************************************************
3893  * ixgbe_enable_rx_drop
3894  *
3895  *   Enable the hardware to drop packets when the buffer is
3896  *   full. This is useful with multiqueue, so that no single
3897  *   queue being full stalls the entire RX engine. We only
3898  *   enable this when Multiqueue is enabled AND Flow Control
3899  *   is disabled.
3900  ************************************************************************/
3901 static void
3902 ixgbe_enable_rx_drop(struct adapter *adapter)
3903 {
3904 	struct ixgbe_hw *hw = &adapter->hw;
3905 	struct rx_ring  *rxr;
3906 	u32             srrctl;
3907 
3908 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3909 		rxr = &adapter->rx_queues[i].rxr;
3910 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3911 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3912 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3913 	}
3914 
3915 	/* enable drop for each vf */
3916 	for (int i = 0; i < adapter->num_vfs; i++) {
3917 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3918 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3919 		                IXGBE_QDE_ENABLE));
3920 	}
3921 } /* ixgbe_enable_rx_drop */
3922 
3923 /************************************************************************
3924  * ixgbe_disable_rx_drop
3925  ************************************************************************/
3926 static void
3927 ixgbe_disable_rx_drop(struct adapter *adapter)
3928 {
3929 	struct ixgbe_hw *hw = &adapter->hw;
3930 	struct rx_ring  *rxr;
3931 	u32             srrctl;
3932 
3933 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3934 		rxr = &adapter->rx_queues[i].rxr;
3935 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3936 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3937 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3938 	}
3939 
3940 	/* disable drop for each vf */
3941 	for (int i = 0; i < adapter->num_vfs; i++) {
3942 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3943 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3944 	}
3945 } /* ixgbe_disable_rx_drop */
3946 
3947 /************************************************************************
3948  * ixgbe_sysctl_advertise
3949  *
3950  *   SYSCTL wrapper around setting advertised speed
3951  ************************************************************************/
3952 static int
3953 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3954 {
3955 	struct adapter *adapter;
3956 	int            error, advertise;
3957 
3958 	adapter = (struct adapter *)arg1;
3959 	advertise = adapter->advertise;
3960 
3961 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3962 	if ((error) || (req->newptr == NULL))
3963 		return (error);
3964 
3965 	return ixgbe_set_advertise(adapter, advertise);
3966 } /* ixgbe_sysctl_advertise */
3967 
3968 /************************************************************************
3969  * ixgbe_set_advertise - Control advertised link speed
3970  *
3971  *   Flags:
3972  *     0x1 - advertise 100 Mb
3973  *     0x2 - advertise 1G
3974  *     0x4 - advertise 10G
3975  *     0x8 - advertise 10 Mb (yes, Mb)
3976  ************************************************************************/
3977 static int
3978 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3979 {
3980 	device_t         dev = iflib_get_dev(adapter->ctx);
3981 	struct ixgbe_hw  *hw;
3982 	ixgbe_link_speed speed = 0;
3983 	ixgbe_link_speed link_caps = 0;
3984 	s32              err = IXGBE_NOT_IMPLEMENTED;
3985 	bool             negotiate = FALSE;
3986 
3987 	/* Checks to validate new value */
3988 	if (adapter->advertise == advertise) /* no change */
3989 		return (0);
3990 
3991 	hw = &adapter->hw;
3992 
3993 	/* No speed changes for backplane media */
3994 	if (hw->phy.media_type == ixgbe_media_type_backplane)
3995 		return (ENODEV);
3996 
3997 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
3998 	      (hw->phy.multispeed_fiber))) {
3999 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4000 		return (EINVAL);
4001 	}
4002 
4003 	if (advertise < 0x1 || advertise > 0xF) {
4004 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4005 		return (EINVAL);
4006 	}
4007 
4008 	if (hw->mac.ops.get_link_capabilities) {
4009 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4010 		    &negotiate);
4011 		if (err != IXGBE_SUCCESS) {
4012 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4013 			return (ENODEV);
4014 		}
4015 	}
4016 
4017 	/* Set new value and report new advertised mode */
4018 	if (advertise & 0x1) {
4019 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4020 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4021 			return (EINVAL);
4022 		}
4023 		speed |= IXGBE_LINK_SPEED_100_FULL;
4024 	}
4025 	if (advertise & 0x2) {
4026 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4027 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4028 			return (EINVAL);
4029 		}
4030 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4031 	}
4032 	if (advertise & 0x4) {
4033 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4034 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4035 			return (EINVAL);
4036 		}
4037 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4038 	}
4039 	if (advertise & 0x8) {
4040 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4041 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4042 			return (EINVAL);
4043 		}
4044 		speed |= IXGBE_LINK_SPEED_10_FULL;
4045 	}
4046 
4047 	hw->mac.autotry_restart = TRUE;
4048 	hw->mac.ops.setup_link(hw, speed, TRUE);
4049 	adapter->advertise = advertise;
4050 
4051 	return (0);
4052 } /* ixgbe_set_advertise */
4053 
4054 /************************************************************************
4055  * ixgbe_get_advertise - Get current advertised speed settings
4056  *
4057  *   Formatted for sysctl usage.
4058  *   Flags:
4059  *     0x1 - advertise 100 Mb
4060  *     0x2 - advertise 1G
4061  *     0x4 - advertise 10G
4062  *     0x8 - advertise 10 Mb (yes, Mb)
4063  ************************************************************************/
4064 static int
4065 ixgbe_get_advertise(struct adapter *adapter)
4066 {
4067 	struct ixgbe_hw  *hw = &adapter->hw;
4068 	int              speed;
4069 	ixgbe_link_speed link_caps = 0;
4070 	s32              err;
4071 	bool             negotiate = FALSE;
4072 
4073 	/*
4074 	 * Advertised speed means nothing unless it's copper or
4075 	 * multi-speed fiber
4076 	 */
4077 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4078 	    !(hw->phy.multispeed_fiber))
4079 		return (0);
4080 
4081 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4082 	if (err != IXGBE_SUCCESS)
4083 		return (0);
4084 
4085 	speed =
4086 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4087 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4088 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4089 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4090 
4091 	return speed;
4092 } /* ixgbe_get_advertise */
4093 
4094 /************************************************************************
4095  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4096  *
4097  *   Control values:
4098  *     0/1 - off / on (use default value of 1000)
4099  *
4100  *     Legal timer values are:
4101  *     50,100,250,500,1000,2000,5000,10000
4102  *
4103  *     Turning off interrupt moderation will also turn this off.
4104  ************************************************************************/
4105 static int
4106 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4107 {
4108 	struct adapter *adapter = (struct adapter *)arg1;
4109 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4110 	int            error;
4111 	u16            newval;
4112 
4113 	newval = adapter->dmac;
4114 	error = sysctl_handle_16(oidp, &newval, 0, req);
4115 	if ((error) || (req->newptr == NULL))
4116 		return (error);
4117 
4118 	switch (newval) {
4119 	case 0:
4120 		/* Disabled */
4121 		adapter->dmac = 0;
4122 		break;
4123 	case 1:
4124 		/* Enable and use default */
4125 		adapter->dmac = 1000;
4126 		break;
4127 	case 50:
4128 	case 100:
4129 	case 250:
4130 	case 500:
4131 	case 1000:
4132 	case 2000:
4133 	case 5000:
4134 	case 10000:
4135 		/* Legal values - allow */
4136 		adapter->dmac = newval;
4137 		break;
4138 	default:
4139 		/* Do nothing, illegal value */
4140 		return (EINVAL);
4141 	}
4142 
4143 	/* Re-initialize hardware if it's already running */
4144 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4145 		ifp->if_init(ifp);
4146 
4147 	return (0);
4148 } /* ixgbe_sysctl_dmac */
4149 
4150 #ifdef IXGBE_DEBUG
4151 /************************************************************************
4152  * ixgbe_sysctl_power_state
4153  *
4154  *   Sysctl to test power states
4155  *   Values:
4156  *     0      - set device to D0
4157  *     3      - set device to D3
4158  *     (none) - get current device power state
4159  ************************************************************************/
4160 static int
4161 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4162 {
4163 	struct adapter *adapter = (struct adapter *)arg1;
4164 	device_t       dev = adapter->dev;
4165 	int            curr_ps, new_ps, error = 0;
4166 
4167 	curr_ps = new_ps = pci_get_powerstate(dev);
4168 
4169 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4170 	if ((error) || (req->newptr == NULL))
4171 		return (error);
4172 
4173 	if (new_ps == curr_ps)
4174 		return (0);
4175 
4176 	if (new_ps == 3 && curr_ps == 0)
4177 		error = DEVICE_SUSPEND(dev);
4178 	else if (new_ps == 0 && curr_ps == 3)
4179 		error = DEVICE_RESUME(dev);
4180 	else
4181 		return (EINVAL);
4182 
4183 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4184 
4185 	return (error);
4186 } /* ixgbe_sysctl_power_state */
4187 #endif
4188 
4189 /************************************************************************
4190  * ixgbe_sysctl_wol_enable
4191  *
4192  *   Sysctl to enable/disable the WoL capability,
4193  *   if supported by the adapter.
4194  *
4195  *   Values:
4196  *     0 - disabled
4197  *     1 - enabled
4198  ************************************************************************/
4199 static int
4200 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4201 {
4202 	struct adapter  *adapter = (struct adapter *)arg1;
4203 	struct ixgbe_hw *hw = &adapter->hw;
4204 	int             new_wol_enabled;
4205 	int             error = 0;
4206 
4207 	new_wol_enabled = hw->wol_enabled;
4208 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4209 	if ((error) || (req->newptr == NULL))
4210 		return (error);
4211 	new_wol_enabled = !!(new_wol_enabled);
4212 	if (new_wol_enabled == hw->wol_enabled)
4213 		return (0);
4214 
4215 	if (new_wol_enabled > 0 && !adapter->wol_support)
4216 		return (ENODEV);
4217 	else
4218 		hw->wol_enabled = new_wol_enabled;
4219 
4220 	return (0);
4221 } /* ixgbe_sysctl_wol_enable */
4222 
4223 /************************************************************************
4224  * ixgbe_sysctl_wufc - Wake Up Filter Control
4225  *
4226  *   Sysctl to enable/disable the types of packets that the
4227  *   adapter will wake up on upon receipt.
4228  *   Flags:
4229  *     0x1  - Link Status Change
4230  *     0x2  - Magic Packet
4231  *     0x4  - Direct Exact
4232  *     0x8  - Directed Multicast
4233  *     0x10 - Broadcast
4234  *     0x20 - ARP/IPv4 Request Packet
4235  *     0x40 - Direct IPv4 Packet
4236  *     0x80 - Direct IPv6 Packet
4237  *
4238  *   Settings not listed above will cause the sysctl to return an error.
4239  ************************************************************************/
4240 static int
4241 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4242 {
4243 	struct adapter *adapter = (struct adapter *)arg1;
4244 	int            error = 0;
4245 	u32            new_wufc;
4246 
4247 	new_wufc = adapter->wufc;
4248 
4249 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4250 	if ((error) || (req->newptr == NULL))
4251 		return (error);
4252 	if (new_wufc == adapter->wufc)
4253 		return (0);
4254 
4255 	if (new_wufc & 0xffffff00)
4256 		return (EINVAL);
4257 
4258 	new_wufc &= 0xff;
4259 	new_wufc |= (0xffffff & adapter->wufc);
4260 	adapter->wufc = new_wufc;
4261 
4262 	return (0);
4263 } /* ixgbe_sysctl_wufc */
4264 
4265 #ifdef IXGBE_DEBUG
4266 /************************************************************************
4267  * ixgbe_sysctl_print_rss_config
4268  ************************************************************************/
4269 static int
4270 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4271 {
4272 	struct adapter  *adapter = (struct adapter *)arg1;
4273 	struct ixgbe_hw *hw = &adapter->hw;
4274 	device_t        dev = adapter->dev;
4275 	struct sbuf     *buf;
4276 	int             error = 0, reta_size;
4277 	u32             reg;
4278 
4279 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4280 	if (!buf) {
4281 		device_printf(dev, "Could not allocate sbuf for output.\n");
4282 		return (ENOMEM);
4283 	}
4284 
4285 	// TODO: use sbufs to make a string to print out
4286 	/* Set multiplier for RETA setup and table size based on MAC */
4287 	switch (adapter->hw.mac.type) {
4288 	case ixgbe_mac_X550:
4289 	case ixgbe_mac_X550EM_x:
4290 	case ixgbe_mac_X550EM_a:
4291 		reta_size = 128;
4292 		break;
4293 	default:
4294 		reta_size = 32;
4295 		break;
4296 	}
4297 
4298 	/* Print out the redirection table */
4299 	sbuf_cat(buf, "\n");
4300 	for (int i = 0; i < reta_size; i++) {
4301 		if (i < 32) {
4302 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4303 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4304 		} else {
4305 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4306 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4307 		}
4308 	}
4309 
4310 	// TODO: print more config
4311 
4312 	error = sbuf_finish(buf);
4313 	if (error)
4314 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4315 
4316 	sbuf_delete(buf);
4317 
4318 	return (0);
4319 } /* ixgbe_sysctl_print_rss_config */
4320 #endif /* IXGBE_DEBUG */
4321 
4322 /************************************************************************
4323  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4324  *
4325  *   For X552/X557-AT devices using an external PHY
4326  ************************************************************************/
4327 static int
4328 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4329 {
4330 	struct adapter  *adapter = (struct adapter *)arg1;
4331 	struct ixgbe_hw *hw = &adapter->hw;
4332 	u16             reg;
4333 
4334 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4335 		device_printf(iflib_get_dev(adapter->ctx),
4336 		    "Device has no supported external thermal sensor.\n");
4337 		return (ENODEV);
4338 	}
4339 
4340 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4341 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4342 		device_printf(iflib_get_dev(adapter->ctx),
4343 		    "Error reading from PHY's current temperature register\n");
4344 		return (EAGAIN);
4345 	}
4346 
4347 	/* Shift temp for output */
4348 	reg = reg >> 8;
4349 
4350 	return (sysctl_handle_16(oidp, NULL, reg, req));
4351 } /* ixgbe_sysctl_phy_temp */
4352 
4353 /************************************************************************
4354  * ixgbe_sysctl_phy_overtemp_occurred
4355  *
4356  *   Reports (directly from the PHY) whether the current PHY
4357  *   temperature is over the overtemp threshold.
4358  ************************************************************************/
4359 static int
4360 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4361 {
4362 	struct adapter  *adapter = (struct adapter *)arg1;
4363 	struct ixgbe_hw *hw = &adapter->hw;
4364 	u16             reg;
4365 
4366 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4367 		device_printf(iflib_get_dev(adapter->ctx),
4368 		    "Device has no supported external thermal sensor.\n");
4369 		return (ENODEV);
4370 	}
4371 
4372 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4373 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4374 		device_printf(iflib_get_dev(adapter->ctx),
4375 		    "Error reading from PHY's temperature status register\n");
4376 		return (EAGAIN);
4377 	}
4378 
4379 	/* Get occurrence bit */
4380 	reg = !!(reg & 0x4000);
4381 
4382 	return (sysctl_handle_16(oidp, 0, reg, req));
4383 } /* ixgbe_sysctl_phy_overtemp_occurred */
4384 
4385 /************************************************************************
4386  * ixgbe_sysctl_eee_state
4387  *
4388  *   Sysctl to set EEE power saving feature
4389  *   Values:
4390  *     0      - disable EEE
4391  *     1      - enable EEE
4392  *     (none) - get current device EEE state
4393  ************************************************************************/
4394 static int
4395 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4396 {
4397 	struct adapter *adapter = (struct adapter *)arg1;
4398 	device_t       dev = adapter->dev;
4399 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4400 	int            curr_eee, new_eee, error = 0;
4401 	s32            retval;
4402 
4403 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4404 
4405 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4406 	if ((error) || (req->newptr == NULL))
4407 		return (error);
4408 
4409 	/* Nothing to do */
4410 	if (new_eee == curr_eee)
4411 		return (0);
4412 
4413 	/* Not supported */
4414 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4415 		return (EINVAL);
4416 
4417 	/* Bounds checking */
4418 	if ((new_eee < 0) || (new_eee > 1))
4419 		return (EINVAL);
4420 
4421 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4422 	if (retval) {
4423 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4424 		return (EINVAL);
4425 	}
4426 
4427 	/* Restart auto-neg */
4428 	ifp->if_init(ifp);
4429 
4430 	device_printf(dev, "New EEE state: %d\n", new_eee);
4431 
4432 	/* Cache new value */
4433 	if (new_eee)
4434 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4435 	else
4436 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4437 
4438 	return (error);
4439 } /* ixgbe_sysctl_eee_state */
4440 
4441 /************************************************************************
4442  * ixgbe_init_device_features
4443  ************************************************************************/
4444 static void
4445 ixgbe_init_device_features(struct adapter *adapter)
4446 {
4447 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4448 	                  | IXGBE_FEATURE_RSS
4449 	                  | IXGBE_FEATURE_MSI
4450 	                  | IXGBE_FEATURE_MSIX
4451 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4452 
4453 	/* Set capabilities first... */
4454 	switch (adapter->hw.mac.type) {
4455 	case ixgbe_mac_82598EB:
4456 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4457 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4458 		break;
4459 	case ixgbe_mac_X540:
4460 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4461 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4462 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4463 		    (adapter->hw.bus.func == 0))
4464 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4465 		break;
4466 	case ixgbe_mac_X550:
4467 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4468 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4469 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4470 		break;
4471 	case ixgbe_mac_X550EM_x:
4472 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4473 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4474 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4475 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4476 		break;
4477 	case ixgbe_mac_X550EM_a:
4478 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4479 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4480 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4481 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4482 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4483 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4484 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4485 		}
4486 		break;
4487 	case ixgbe_mac_82599EB:
4488 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4489 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4490 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4491 		    (adapter->hw.bus.func == 0))
4492 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4493 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4494 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4495 		break;
4496 	default:
4497 		break;
4498 	}
4499 
4500 	/* Enabled by default... */
4501 	/* Fan failure detection */
4502 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4503 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4504 	/* Netmap */
4505 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4506 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4507 	/* EEE */
4508 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4509 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4510 	/* Thermal Sensor */
4511 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4512 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4513 
4514 	/* Enabled via global sysctl... */
4515 	/* Flow Director */
4516 	if (ixgbe_enable_fdir) {
4517 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4518 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4519 		else
4520 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4521 	}
4522 	/*
4523 	 * Message Signal Interrupts - Extended (MSI-X)
4524 	 * Normal MSI is only enabled if MSI-X calls fail.
4525 	 */
4526 	if (!ixgbe_enable_msix)
4527 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4528 	/* Receive-Side Scaling (RSS) */
4529 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4530 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4531 
4532 	/* Disable features with unmet dependencies... */
4533 	/* No MSI-X */
4534 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4535 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4536 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4537 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4538 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4539 	}
4540 } /* ixgbe_init_device_features */
4541 
4542 /************************************************************************
4543  * ixgbe_check_fan_failure
4544  ************************************************************************/
4545 static void
4546 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4547 {
4548 	u32 mask;
4549 
4550 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4551 	    IXGBE_ESDP_SDP1;
4552 
4553 	if (reg & mask)
4554 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4555 } /* ixgbe_check_fan_failure */
4556