xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 31d62a73c2e6ac0ff413a7a17700ffc7dce254ef)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
124 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
125 static int  ixgbe_if_media_change(if_ctx_t ctx);
126 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
127 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
128 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
129 static void ixgbe_if_multi_set(if_ctx_t ctx);
130 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
131 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
132                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
133 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
134                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
135 static void ixgbe_if_queues_free(if_ctx_t ctx);
136 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
137 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
138 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
139 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
140 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
141 int ixgbe_intr(void *arg);
142 
143 /************************************************************************
144  * Function prototypes
145  ************************************************************************/
146 #if __FreeBSD_version >= 1100036
147 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
148 #endif
149 
150 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
151 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
153 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
154 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
155 
156 static void ixgbe_config_dmac(struct adapter *adapter);
157 static void ixgbe_configure_ivars(struct adapter *adapter);
158 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
159                            s8 type);
160 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
161 static bool ixgbe_sfp_probe(if_ctx_t ctx);
162 
163 static void ixgbe_free_pci_resources(if_ctx_t ctx);
164 
165 static int  ixgbe_msix_link(void *arg);
166 static int  ixgbe_msix_que(void *arg);
167 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
168 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
169 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
170 
171 static int  ixgbe_setup_interface(if_ctx_t ctx);
172 static void ixgbe_init_device_features(struct adapter *adapter);
173 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
174 static void ixgbe_add_media_types(if_ctx_t ctx);
175 static void ixgbe_update_stats_counters(struct adapter *adapter);
176 static void ixgbe_config_link(struct adapter *adapter);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static void ixgbe_check_wol_support(struct adapter *adapter);
179 static void ixgbe_enable_rx_drop(struct adapter *);
180 static void ixgbe_disable_rx_drop(struct adapter *);
181 
182 static void ixgbe_add_hw_stats(struct adapter *adapter);
183 static int  ixgbe_set_flowcntl(struct adapter *, int);
184 static int  ixgbe_set_advertise(struct adapter *, int);
185 static int  ixgbe_get_advertise(struct adapter *);
186 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
187 static void ixgbe_config_gpie(struct adapter *adapter);
188 static void ixgbe_config_delay_values(struct adapter *adapter);
189 
190 /* Sysctl handlers */
191 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
192 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
197 #ifdef IXGBE_DEBUG
198 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
199 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
200 #endif
201 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
202 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
208 
209 /* Deferred interrupt tasklets */
210 static void ixgbe_handle_msf(void *);
211 static void ixgbe_handle_mod(void *);
212 static void ixgbe_handle_phy(void *);
213 
214 /************************************************************************
215  *  FreeBSD Device Interface Entry Points
216  ************************************************************************/
217 static device_method_t ix_methods[] = {
218 	/* Device interface */
219 	DEVMETHOD(device_register, ixgbe_register),
220 	DEVMETHOD(device_probe, iflib_device_probe),
221 	DEVMETHOD(device_attach, iflib_device_attach),
222 	DEVMETHOD(device_detach, iflib_device_detach),
223 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
224 	DEVMETHOD(device_suspend, iflib_device_suspend),
225 	DEVMETHOD(device_resume, iflib_device_resume),
226 #ifdef PCI_IOV
227 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
228 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
229 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
230 #endif /* PCI_IOV */
231 	DEVMETHOD_END
232 };
233 
234 static driver_t ix_driver = {
235 	"ix", ix_methods, sizeof(struct adapter),
236 };
237 
238 devclass_t ix_devclass;
239 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
240 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
241 MODULE_DEPEND(ix, pci, 1, 1, 1);
242 MODULE_DEPEND(ix, ether, 1, 1, 1);
243 MODULE_DEPEND(ix, iflib, 1, 1, 1);
244 
245 static device_method_t ixgbe_if_methods[] = {
246 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
247 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
248 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
249 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
250 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
251 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
252 	DEVMETHOD(ifdi_init, ixgbe_if_init),
253 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
254 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
255 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
256 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
257 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
259 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
260 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
261 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
262 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
263 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
264 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
265 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
266 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
267 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
268 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
269 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
270 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
271 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
272 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
273 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
274 #ifdef PCI_IOV
275 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
278 #endif /* PCI_IOV */
279 	DEVMETHOD_END
280 };
281 
282 /*
283  * TUNEABLE PARAMETERS:
284  */
285 
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
287 static driver_t ixgbe_if_driver = {
288   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
289 };
290 
291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
293     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
294 
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
299 
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
304 
305 /*
306  * Smart speed setting, default to on
307  * this only works as a compile option
308  * right now as its during attach, set
309  * this to 'ixgbe_smart_speed_off' to
310  * disable.
311  */
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
313 
314 /*
315  * MSI-X should be the default for best performance,
316  * but this allows it to be forced off for testing.
317  */
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320     "Enable MSI-X interrupts");
321 
322 /*
323  * Defining this on will allow the use
324  * of unsupported SFP+ modules, note that
325  * doing so you are on your own :)
326  */
327 static int allow_unsupported_sfp = FALSE;
328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
329     &allow_unsupported_sfp, 0,
330     "Allow unsupported SFP modules...use at your own risk");
331 
332 /*
333  * Not sure if Flow Director is fully baked,
334  * so we'll default to turning it off.
335  */
336 static int ixgbe_enable_fdir = 0;
337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
338     "Enable Flow Director");
339 
340 /* Receive-Side Scaling */
341 static int ixgbe_enable_rss = 1;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
343     "Enable Receive-Side Scaling (RSS)");
344 
345 #if 0
346 /* Keep running tab on them for sanity check */
347 static int ixgbe_total_ports;
348 #endif
349 
350 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
351 
352 /*
353  * For Flow Director: this is the number of TX packets we sample
354  * for the filter pool, this means every 20th packet will be probed.
355  *
356  * This feature can be disabled by setting this to 0.
357  */
358 static int atr_sample_rate = 20;
359 
360 extern struct if_txrx ixgbe_txrx;
361 
362 static struct if_shared_ctx ixgbe_sctx_init = {
363 	.isc_magic = IFLIB_MAGIC,
364 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
365 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
366 	.isc_tx_maxsegsize = PAGE_SIZE,
367 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 	.isc_tso_maxsegsize = PAGE_SIZE,
369 	.isc_rx_maxsize = PAGE_SIZE*4,
370 	.isc_rx_nsegments = 1,
371 	.isc_rx_maxsegsize = PAGE_SIZE*4,
372 	.isc_nfl = 1,
373 	.isc_ntxqs = 1,
374 	.isc_nrxqs = 1,
375 
376 	.isc_admin_intrcnt = 1,
377 	.isc_vendor_info = ixgbe_vendor_info_array,
378 	.isc_driver_version = ixgbe_driver_version,
379 	.isc_driver = &ixgbe_if_driver,
380 
381 	.isc_nrxd_min = {MIN_RXD},
382 	.isc_ntxd_min = {MIN_TXD},
383 	.isc_nrxd_max = {MAX_RXD},
384 	.isc_ntxd_max = {MAX_TXD},
385 	.isc_nrxd_default = {DEFAULT_RXD},
386 	.isc_ntxd_default = {DEFAULT_TXD},
387 };
388 
389 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
390 
391 /************************************************************************
392  * ixgbe_if_tx_queues_alloc
393  ************************************************************************/
394 static int
395 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
396                          int ntxqs, int ntxqsets)
397 {
398 	struct adapter     *adapter = iflib_get_softc(ctx);
399 	if_softc_ctx_t     scctx = adapter->shared;
400 	struct ix_tx_queue *que;
401 	int                i, j, error;
402 
403 	MPASS(adapter->num_tx_queues > 0);
404 	MPASS(adapter->num_tx_queues == ntxqsets);
405 	MPASS(ntxqs == 1);
406 
407 	/* Allocate queue structure memory */
408 	adapter->tx_queues =
409 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
410 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
411 	if (!adapter->tx_queues) {
412 		device_printf(iflib_get_dev(ctx),
413 		    "Unable to allocate TX ring memory\n");
414 		return (ENOMEM);
415 	}
416 
417 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
418 		struct tx_ring *txr = &que->txr;
419 
420 		/* In case SR-IOV is enabled, align the index properly */
421 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
422 		    i);
423 
424 		txr->adapter = que->adapter = adapter;
425 		adapter->active_queues |= (u64)1 << txr->me;
426 
427 		/* Allocate report status array */
428 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
429 		if (txr->tx_rsq == NULL) {
430 			error = ENOMEM;
431 			goto fail;
432 		}
433 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
434 			txr->tx_rsq[j] = QIDX_INVALID;
435 		/* get the virtual and physical address of the hardware queues */
436 		txr->tail = IXGBE_TDT(txr->me);
437 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
438 		txr->tx_paddr = paddrs[i];
439 
440 		txr->bytes = 0;
441 		txr->total_packets = 0;
442 
443 		/* Set the rate at which we sample packets */
444 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
445 			txr->atr_sample = atr_sample_rate;
446 
447 	}
448 
449 	iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod,
450 	    "mod_task");
451 	iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf,
452 	    "msf_task");
453 	iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy,
454 	    "phy_task");
455 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
456 		iflib_config_gtask_init(ctx, &adapter->mbx_task,
457 		    ixgbe_handle_mbx, "mbx_task");
458 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
459 		iflib_config_gtask_init(ctx, &adapter->fdir_task,
460 		    ixgbe_reinit_fdir, "fdir_task");
461 
462 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
463 	    adapter->num_tx_queues);
464 
465 	return (0);
466 
467 fail:
468 	ixgbe_if_queues_free(ctx);
469 
470 	return (error);
471 } /* ixgbe_if_tx_queues_alloc */
472 
473 /************************************************************************
474  * ixgbe_if_rx_queues_alloc
475  ************************************************************************/
476 static int
477 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
478                          int nrxqs, int nrxqsets)
479 {
480 	struct adapter     *adapter = iflib_get_softc(ctx);
481 	struct ix_rx_queue *que;
482 	int                i;
483 
484 	MPASS(adapter->num_rx_queues > 0);
485 	MPASS(adapter->num_rx_queues == nrxqsets);
486 	MPASS(nrxqs == 1);
487 
488 	/* Allocate queue structure memory */
489 	adapter->rx_queues =
490 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
491 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
492 	if (!adapter->rx_queues) {
493 		device_printf(iflib_get_dev(ctx),
494 		    "Unable to allocate TX ring memory\n");
495 		return (ENOMEM);
496 	}
497 
498 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
499 		struct rx_ring *rxr = &que->rxr;
500 
501 		/* In case SR-IOV is enabled, align the index properly */
502 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
503 		    i);
504 
505 		rxr->adapter = que->adapter = adapter;
506 
507 		/* get the virtual and physical address of the hw queues */
508 		rxr->tail = IXGBE_RDT(rxr->me);
509 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
510 		rxr->rx_paddr = paddrs[i];
511 		rxr->bytes = 0;
512 		rxr->que = que;
513 	}
514 
515 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
516 	    adapter->num_rx_queues);
517 
518 	return (0);
519 } /* ixgbe_if_rx_queues_alloc */
520 
521 /************************************************************************
522  * ixgbe_if_queues_free
523  ************************************************************************/
524 static void
525 ixgbe_if_queues_free(if_ctx_t ctx)
526 {
527 	struct adapter     *adapter = iflib_get_softc(ctx);
528 	struct ix_tx_queue *tx_que = adapter->tx_queues;
529 	struct ix_rx_queue *rx_que = adapter->rx_queues;
530 	int                i;
531 
532 	if (tx_que != NULL) {
533 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
534 			struct tx_ring *txr = &tx_que->txr;
535 			if (txr->tx_rsq == NULL)
536 				break;
537 
538 			free(txr->tx_rsq, M_IXGBE);
539 			txr->tx_rsq = NULL;
540 		}
541 
542 		free(adapter->tx_queues, M_IXGBE);
543 		adapter->tx_queues = NULL;
544 	}
545 	if (rx_que != NULL) {
546 		free(adapter->rx_queues, M_IXGBE);
547 		adapter->rx_queues = NULL;
548 	}
549 } /* ixgbe_if_queues_free */
550 
551 /************************************************************************
552  * ixgbe_initialize_rss_mapping
553  ************************************************************************/
554 static void
555 ixgbe_initialize_rss_mapping(struct adapter *adapter)
556 {
557 	struct ixgbe_hw *hw = &adapter->hw;
558 	u32             reta = 0, mrqc, rss_key[10];
559 	int             queue_id, table_size, index_mult;
560 	int             i, j;
561 	u32             rss_hash_config;
562 
563 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
564 		/* Fetch the configured RSS key */
565 		rss_getkey((uint8_t *)&rss_key);
566 	} else {
567 		/* set up random bits */
568 		arc4rand(&rss_key, sizeof(rss_key), 0);
569 	}
570 
571 	/* Set multiplier for RETA setup and table size based on MAC */
572 	index_mult = 0x1;
573 	table_size = 128;
574 	switch (adapter->hw.mac.type) {
575 	case ixgbe_mac_82598EB:
576 		index_mult = 0x11;
577 		break;
578 	case ixgbe_mac_X550:
579 	case ixgbe_mac_X550EM_x:
580 	case ixgbe_mac_X550EM_a:
581 		table_size = 512;
582 		break;
583 	default:
584 		break;
585 	}
586 
587 	/* Set up the redirection table */
588 	for (i = 0, j = 0; i < table_size; i++, j++) {
589 		if (j == adapter->num_rx_queues)
590 			j = 0;
591 
592 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
593 			/*
594 			 * Fetch the RSS bucket id for the given indirection
595 			 * entry. Cap it at the number of configured buckets
596 			 * (which is num_rx_queues.)
597 			 */
598 			queue_id = rss_get_indirection_to_bucket(i);
599 			queue_id = queue_id % adapter->num_rx_queues;
600 		} else
601 			queue_id = (j * index_mult);
602 
603 		/*
604 		 * The low 8 bits are for hash value (n+0);
605 		 * The next 8 bits are for hash value (n+1), etc.
606 		 */
607 		reta = reta >> 8;
608 		reta = reta | (((uint32_t)queue_id) << 24);
609 		if ((i & 3) == 3) {
610 			if (i < 128)
611 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
612 			else
613 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
614 				    reta);
615 			reta = 0;
616 		}
617 	}
618 
619 	/* Now fill our hash function seeds */
620 	for (i = 0; i < 10; i++)
621 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
622 
623 	/* Perform hash on these packet types */
624 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
625 		rss_hash_config = rss_gethashconfig();
626 	else {
627 		/*
628 		 * Disable UDP - IP fragments aren't currently being handled
629 		 * and so we end up with a mix of 2-tuple and 4-tuple
630 		 * traffic.
631 		 */
632 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
633 		                | RSS_HASHTYPE_RSS_TCP_IPV4
634 		                | RSS_HASHTYPE_RSS_IPV6
635 		                | RSS_HASHTYPE_RSS_TCP_IPV6
636 		                | RSS_HASHTYPE_RSS_IPV6_EX
637 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
638 	}
639 
640 	mrqc = IXGBE_MRQC_RSSEN;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
649 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
650 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
651 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
652 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
653 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
654 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
655 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
656 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
657 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
658 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
659 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
660 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
661 } /* ixgbe_initialize_rss_mapping */
662 
663 /************************************************************************
664  * ixgbe_initialize_receive_units - Setup receive registers and features.
665  ************************************************************************/
666 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
667 
668 static void
669 ixgbe_initialize_receive_units(if_ctx_t ctx)
670 {
671 	struct adapter     *adapter = iflib_get_softc(ctx);
672 	if_softc_ctx_t     scctx = adapter->shared;
673 	struct ixgbe_hw    *hw = &adapter->hw;
674 	struct ifnet       *ifp = iflib_get_ifp(ctx);
675 	struct ix_rx_queue *que;
676 	int                i, j;
677 	u32                bufsz, fctrl, srrctl, rxcsum;
678 	u32                hlreg;
679 
680 	/*
681 	 * Make sure receives are disabled while
682 	 * setting up the descriptor ring
683 	 */
684 	ixgbe_disable_rx(hw);
685 
686 	/* Enable broadcasts */
687 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
688 	fctrl |= IXGBE_FCTRL_BAM;
689 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
690 		fctrl |= IXGBE_FCTRL_DPF;
691 		fctrl |= IXGBE_FCTRL_PMCF;
692 	}
693 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
694 
695 	/* Set for Jumbo Frames? */
696 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
697 	if (ifp->if_mtu > ETHERMTU)
698 		hlreg |= IXGBE_HLREG0_JUMBOEN;
699 	else
700 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
701 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
702 
703 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
704 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
705 
706 	/* Setup the Base and Length of the Rx Descriptor Ring */
707 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
708 		struct rx_ring *rxr = &que->rxr;
709 		u64            rdba = rxr->rx_paddr;
710 
711 		j = rxr->me;
712 
713 		/* Setup the Base and Length of the Rx Descriptor Ring */
714 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
715 		    (rdba & 0x00000000ffffffffULL));
716 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
717 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
718 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
719 
720 		/* Set up the SRRCTL register */
721 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
722 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
723 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
724 		srrctl |= bufsz;
725 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
726 
727 		/*
728 		 * Set DROP_EN iff we have no flow control and >1 queue.
729 		 * Note that srrctl was cleared shortly before during reset,
730 		 * so we do not need to clear the bit, but do it just in case
731 		 * this code is moved elsewhere.
732 		 */
733 		if (adapter->num_rx_queues > 1 &&
734 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
735 			srrctl |= IXGBE_SRRCTL_DROP_EN;
736 		} else {
737 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
738 		}
739 
740 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
741 
742 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
743 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
744 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
745 
746 		/* Set the driver rx tail address */
747 		rxr->tail =  IXGBE_RDT(rxr->me);
748 	}
749 
750 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
751 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
752 		            | IXGBE_PSRTYPE_UDPHDR
753 		            | IXGBE_PSRTYPE_IPV4HDR
754 		            | IXGBE_PSRTYPE_IPV6HDR;
755 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
756 	}
757 
758 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
759 
760 	ixgbe_initialize_rss_mapping(adapter);
761 
762 	if (adapter->num_rx_queues > 1) {
763 		/* RSS and RX IPP Checksum are mutually exclusive */
764 		rxcsum |= IXGBE_RXCSUM_PCSD;
765 	}
766 
767 	if (ifp->if_capenable & IFCAP_RXCSUM)
768 		rxcsum |= IXGBE_RXCSUM_PCSD;
769 
770 	/* This is useful for calculating UDP/IP fragment checksums */
771 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
772 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
773 
774 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
775 
776 } /* ixgbe_initialize_receive_units */
777 
778 /************************************************************************
779  * ixgbe_initialize_transmit_units - Enable transmit units.
780  ************************************************************************/
781 static void
782 ixgbe_initialize_transmit_units(if_ctx_t ctx)
783 {
784 	struct adapter     *adapter = iflib_get_softc(ctx);
785 	struct ixgbe_hw    *hw = &adapter->hw;
786 	if_softc_ctx_t     scctx = adapter->shared;
787 	struct ix_tx_queue *que;
788 	int i;
789 
790 	/* Setup the Base and Length of the Tx Descriptor Ring */
791 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
792 	    i++, que++) {
793 		struct tx_ring	   *txr = &que->txr;
794 		u64 tdba = txr->tx_paddr;
795 		u32 txctrl = 0;
796 		int j = txr->me;
797 
798 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
799 		    (tdba & 0x00000000ffffffffULL));
800 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
801 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
802 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
803 
804 		/* Setup the HW Tx Head and Tail descriptor pointers */
805 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
806 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
807 
808 		/* Cache the tail address */
809 		txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
810 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
811 			txr->tx_rsq[k] = QIDX_INVALID;
812 
813 		/* Disable Head Writeback */
814 		/*
815 		 * Note: for X550 series devices, these registers are actually
816 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
817 		 * fields remain the same.
818 		 */
819 		switch (hw->mac.type) {
820 		case ixgbe_mac_82598EB:
821 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
822 			break;
823 		default:
824 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
825 			break;
826 		}
827 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
828 		switch (hw->mac.type) {
829 		case ixgbe_mac_82598EB:
830 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
831 			break;
832 		default:
833 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
834 			break;
835 		}
836 
837 	}
838 
839 	if (hw->mac.type != ixgbe_mac_82598EB) {
840 		u32 dmatxctl, rttdcs;
841 
842 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
843 		dmatxctl |= IXGBE_DMATXCTL_TE;
844 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
845 		/* Disable arbiter to set MTQC */
846 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
847 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
848 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
849 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
850 		    ixgbe_get_mtqc(adapter->iov_mode));
851 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
852 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
853 	}
854 
855 } /* ixgbe_initialize_transmit_units */
856 
857 /************************************************************************
858  * ixgbe_register
859  ************************************************************************/
860 static void *
861 ixgbe_register(device_t dev)
862 {
863 	return (ixgbe_sctx);
864 } /* ixgbe_register */
865 
866 /************************************************************************
867  * ixgbe_if_attach_pre - Device initialization routine, part 1
868  *
869  *   Called when the driver is being loaded.
870  *   Identifies the type of hardware, initializes the hardware,
871  *   and initializes iflib structures.
872  *
873  *   return 0 on success, positive on failure
874  ************************************************************************/
875 static int
876 ixgbe_if_attach_pre(if_ctx_t ctx)
877 {
878 	struct adapter  *adapter;
879 	device_t        dev;
880 	if_softc_ctx_t  scctx;
881 	struct ixgbe_hw *hw;
882 	int             error = 0;
883 	u32             ctrl_ext;
884 
885 	INIT_DEBUGOUT("ixgbe_attach: begin");
886 
887 	/* Allocate, clear, and link in our adapter structure */
888 	dev = iflib_get_dev(ctx);
889 	adapter = iflib_get_softc(ctx);
890 	adapter->hw.back = adapter;
891 	adapter->ctx = ctx;
892 	adapter->dev = dev;
893 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
894 	adapter->media = iflib_get_media(ctx);
895 	hw = &adapter->hw;
896 
897 	/* Determine hardware revision */
898 	hw->vendor_id = pci_get_vendor(dev);
899 	hw->device_id = pci_get_device(dev);
900 	hw->revision_id = pci_get_revid(dev);
901 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
902 	hw->subsystem_device_id = pci_get_subdevice(dev);
903 
904 	/* Do base PCI setup - map BAR0 */
905 	if (ixgbe_allocate_pci_resources(ctx)) {
906 		device_printf(dev, "Allocation of PCI resources failed\n");
907 		return (ENXIO);
908 	}
909 
910 	/* let hardware know driver is loaded */
911 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
912 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
913 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
914 
915 	/*
916 	 * Initialize the shared code
917 	 */
918 	if (ixgbe_init_shared_code(hw) != 0) {
919 		device_printf(dev, "Unable to initialize the shared code\n");
920 		error = ENXIO;
921 		goto err_pci;
922 	}
923 
924 	if (hw->mbx.ops.init_params)
925 		hw->mbx.ops.init_params(hw);
926 
927 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
928 
929 	if (hw->mac.type != ixgbe_mac_82598EB)
930 		hw->phy.smart_speed = ixgbe_smart_speed;
931 
932 	ixgbe_init_device_features(adapter);
933 
934 	/* Enable WoL (if supported) */
935 	ixgbe_check_wol_support(adapter);
936 
937 	/* Verify adapter fan is still functional (if applicable) */
938 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
939 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
940 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
941 	}
942 
943 	/* Ensure SW/FW semaphore is free */
944 	ixgbe_init_swfw_semaphore(hw);
945 
946 	/* Set an initial default flow control value */
947 	hw->fc.requested_mode = ixgbe_flow_control;
948 
949 	hw->phy.reset_if_overtemp = TRUE;
950 	error = ixgbe_reset_hw(hw);
951 	hw->phy.reset_if_overtemp = FALSE;
952 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
953 		/*
954 		 * No optics in this port, set up
955 		 * so the timer routine will probe
956 		 * for later insertion.
957 		 */
958 		adapter->sfp_probe = TRUE;
959 		error = 0;
960 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
961 		device_printf(dev, "Unsupported SFP+ module detected!\n");
962 		error = EIO;
963 		goto err_pci;
964 	} else if (error) {
965 		device_printf(dev, "Hardware initialization failed\n");
966 		error = EIO;
967 		goto err_pci;
968 	}
969 
970 	/* Make sure we have a good EEPROM before we read from it */
971 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
972 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
973 		error = EIO;
974 		goto err_pci;
975 	}
976 
977 	error = ixgbe_start_hw(hw);
978 	switch (error) {
979 	case IXGBE_ERR_EEPROM_VERSION:
980 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
981 		break;
982 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
983 		device_printf(dev, "Unsupported SFP+ Module\n");
984 		error = EIO;
985 		goto err_pci;
986 	case IXGBE_ERR_SFP_NOT_PRESENT:
987 		device_printf(dev, "No SFP+ Module found\n");
988 		/* falls thru */
989 	default:
990 		break;
991 	}
992 
993 	/* Most of the iflib initialization... */
994 
995 	iflib_set_mac(ctx, hw->mac.addr);
996 	switch (adapter->hw.mac.type) {
997 	case ixgbe_mac_X550:
998 	case ixgbe_mac_X550EM_x:
999 	case ixgbe_mac_X550EM_a:
1000 		scctx->isc_rss_table_size = 512;
1001 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1002 		break;
1003 	default:
1004 		scctx->isc_rss_table_size = 128;
1005 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1006 	}
1007 
1008 	/* Allow legacy interrupts */
1009 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1010 
1011 	scctx->isc_txqsizes[0] =
1012 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1013 	    sizeof(u32), DBA_ALIGN),
1014 	scctx->isc_rxqsizes[0] =
1015 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1016 	    DBA_ALIGN);
1017 
1018 	/* XXX */
1019 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1020 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1021 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1022 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1023 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1024 	} else {
1025 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1026 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1027 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1028 	}
1029 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1030 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1031 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1032 
1033 	scctx->isc_txrx = &ixgbe_txrx;
1034 
1035 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1036 
1037 	return (0);
1038 
1039 err_pci:
1040 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1041 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1042 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1043 	ixgbe_free_pci_resources(ctx);
1044 
1045 	return (error);
1046 } /* ixgbe_if_attach_pre */
1047 
1048  /*********************************************************************
1049  * ixgbe_if_attach_post - Device initialization routine, part 2
1050  *
1051  *   Called during driver load, but after interrupts and
1052  *   resources have been allocated and configured.
1053  *   Sets up some data structures not relevant to iflib.
1054  *
1055  *   return 0 on success, positive on failure
1056  *********************************************************************/
1057 static int
1058 ixgbe_if_attach_post(if_ctx_t ctx)
1059 {
1060 	device_t dev;
1061 	struct adapter  *adapter;
1062 	struct ixgbe_hw *hw;
1063 	int             error = 0;
1064 
1065 	dev = iflib_get_dev(ctx);
1066 	adapter = iflib_get_softc(ctx);
1067 	hw = &adapter->hw;
1068 
1069 
1070 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1071 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1072 		device_printf(dev, "Device does not support legacy interrupts");
1073 		error = ENXIO;
1074 		goto err;
1075 	}
1076 
1077 	/* Allocate multicast array memory. */
1078 	adapter->mta = malloc(sizeof(*adapter->mta) *
1079 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1080 	if (adapter->mta == NULL) {
1081 		device_printf(dev, "Can not allocate multicast setup array\n");
1082 		error = ENOMEM;
1083 		goto err;
1084 	}
1085 
1086 	/* hw.ix defaults init */
1087 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1088 
1089 	/* Enable the optics for 82599 SFP+ fiber */
1090 	ixgbe_enable_tx_laser(hw);
1091 
1092 	/* Enable power to the phy. */
1093 	ixgbe_set_phy_power(hw, TRUE);
1094 
1095 	ixgbe_initialize_iov(adapter);
1096 
1097 	error = ixgbe_setup_interface(ctx);
1098 	if (error) {
1099 		device_printf(dev, "Interface setup failed: %d\n", error);
1100 		goto err;
1101 	}
1102 
1103 	ixgbe_if_update_admin_status(ctx);
1104 
1105 	/* Initialize statistics */
1106 	ixgbe_update_stats_counters(adapter);
1107 	ixgbe_add_hw_stats(adapter);
1108 
1109 	/* Check PCIE slot type/speed/width */
1110 	ixgbe_get_slot_info(adapter);
1111 
1112 	/*
1113 	 * Do time init and sysctl init here, but
1114 	 * only on the first port of a bypass adapter.
1115 	 */
1116 	ixgbe_bypass_init(adapter);
1117 
1118 	/* Set an initial dmac value */
1119 	adapter->dmac = 0;
1120 	/* Set initial advertised speeds (if applicable) */
1121 	adapter->advertise = ixgbe_get_advertise(adapter);
1122 
1123 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1124 		ixgbe_define_iov_schemas(dev, &error);
1125 
1126 	/* Add sysctls */
1127 	ixgbe_add_device_sysctls(ctx);
1128 
1129 	return (0);
1130 err:
1131 	return (error);
1132 } /* ixgbe_if_attach_post */
1133 
1134 /************************************************************************
1135  * ixgbe_check_wol_support
1136  *
1137  *   Checks whether the adapter's ports are capable of
1138  *   Wake On LAN by reading the adapter's NVM.
1139  *
1140  *   Sets each port's hw->wol_enabled value depending
1141  *   on the value read here.
1142  ************************************************************************/
1143 static void
1144 ixgbe_check_wol_support(struct adapter *adapter)
1145 {
1146 	struct ixgbe_hw *hw = &adapter->hw;
1147 	u16             dev_caps = 0;
1148 
1149 	/* Find out WoL support for port */
1150 	adapter->wol_support = hw->wol_enabled = 0;
1151 	ixgbe_get_device_caps(hw, &dev_caps);
1152 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1153 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1154 	     hw->bus.func == 0))
1155 		adapter->wol_support = hw->wol_enabled = 1;
1156 
1157 	/* Save initial wake up filter configuration */
1158 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1159 
1160 	return;
1161 } /* ixgbe_check_wol_support */
1162 
1163 /************************************************************************
1164  * ixgbe_setup_interface
1165  *
1166  *   Setup networking device structure and register an interface.
1167  ************************************************************************/
1168 static int
1169 ixgbe_setup_interface(if_ctx_t ctx)
1170 {
1171 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1172 	struct adapter *adapter = iflib_get_softc(ctx);
1173 
1174 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1175 
1176 	if_setbaudrate(ifp, IF_Gbps(10));
1177 
1178 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1179 
1180 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1181 
1182 	ixgbe_add_media_types(ctx);
1183 
1184 	/* Autoselect media by default */
1185 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1186 
1187 	return (0);
1188 } /* ixgbe_setup_interface */
1189 
1190 /************************************************************************
1191  * ixgbe_if_get_counter
1192  ************************************************************************/
1193 static uint64_t
1194 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1195 {
1196 	struct adapter *adapter = iflib_get_softc(ctx);
1197 	if_t           ifp = iflib_get_ifp(ctx);
1198 
1199 	switch (cnt) {
1200 	case IFCOUNTER_IPACKETS:
1201 		return (adapter->ipackets);
1202 	case IFCOUNTER_OPACKETS:
1203 		return (adapter->opackets);
1204 	case IFCOUNTER_IBYTES:
1205 		return (adapter->ibytes);
1206 	case IFCOUNTER_OBYTES:
1207 		return (adapter->obytes);
1208 	case IFCOUNTER_IMCASTS:
1209 		return (adapter->imcasts);
1210 	case IFCOUNTER_OMCASTS:
1211 		return (adapter->omcasts);
1212 	case IFCOUNTER_COLLISIONS:
1213 		return (0);
1214 	case IFCOUNTER_IQDROPS:
1215 		return (adapter->iqdrops);
1216 	case IFCOUNTER_OQDROPS:
1217 		return (0);
1218 	case IFCOUNTER_IERRORS:
1219 		return (adapter->ierrors);
1220 	default:
1221 		return (if_get_counter_default(ifp, cnt));
1222 	}
1223 } /* ixgbe_if_get_counter */
1224 
1225 /************************************************************************
1226  * ixgbe_if_i2c_req
1227  ************************************************************************/
1228 static int
1229 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1230 {
1231 	struct adapter		*adapter = iflib_get_softc(ctx);
1232 	struct ixgbe_hw 	*hw = &adapter->hw;
1233 	int 			i;
1234 
1235 
1236 	if (hw->phy.ops.read_i2c_byte == NULL)
1237 		return (ENXIO);
1238 	for (i = 0; i < req->len; i++)
1239 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1240 		    req->dev_addr, &req->data[i]);
1241 	return (0);
1242 } /* ixgbe_if_i2c_req */
1243 
1244 /************************************************************************
1245  * ixgbe_add_media_types
1246  ************************************************************************/
1247 static void
1248 ixgbe_add_media_types(if_ctx_t ctx)
1249 {
1250 	struct adapter  *adapter = iflib_get_softc(ctx);
1251 	struct ixgbe_hw *hw = &adapter->hw;
1252 	device_t        dev = iflib_get_dev(ctx);
1253 	u64             layer;
1254 
1255 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1256 
1257 	/* Media types with matching FreeBSD media defines */
1258 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1259 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1260 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1261 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1262 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1263 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1264 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1265 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1266 
1267 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1268 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1269 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1270 		    NULL);
1271 
1272 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1273 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1274 		if (hw->phy.multispeed_fiber)
1275 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1276 			    NULL);
1277 	}
1278 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1279 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1280 		if (hw->phy.multispeed_fiber)
1281 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1282 			    NULL);
1283 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1284 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1285 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1286 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1287 
1288 #ifdef IFM_ETH_XTYPE
1289 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1290 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1291 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1292 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1293 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1294 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1295 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1296 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1297 #else
1298 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1299 		device_printf(dev, "Media supported: 10GbaseKR\n");
1300 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1301 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1302 	}
1303 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1304 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1305 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1306 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1307 	}
1308 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1309 		device_printf(dev, "Media supported: 1000baseKX\n");
1310 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1311 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1312 	}
1313 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1314 		device_printf(dev, "Media supported: 2500baseKX\n");
1315 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1316 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1317 	}
1318 #endif
1319 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1320 		device_printf(dev, "Media supported: 1000baseBX\n");
1321 
1322 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1323 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1324 		    0, NULL);
1325 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1326 	}
1327 
1328 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1329 } /* ixgbe_add_media_types */
1330 
1331 /************************************************************************
1332  * ixgbe_is_sfp
1333  ************************************************************************/
1334 static inline bool
1335 ixgbe_is_sfp(struct ixgbe_hw *hw)
1336 {
1337 	switch (hw->mac.type) {
1338 	case ixgbe_mac_82598EB:
1339 		if (hw->phy.type == ixgbe_phy_nl)
1340 			return (TRUE);
1341 		return (FALSE);
1342 	case ixgbe_mac_82599EB:
1343 		switch (hw->mac.ops.get_media_type(hw)) {
1344 		case ixgbe_media_type_fiber:
1345 		case ixgbe_media_type_fiber_qsfp:
1346 			return (TRUE);
1347 		default:
1348 			return (FALSE);
1349 		}
1350 	case ixgbe_mac_X550EM_x:
1351 	case ixgbe_mac_X550EM_a:
1352 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1353 			return (TRUE);
1354 		return (FALSE);
1355 	default:
1356 		return (FALSE);
1357 	}
1358 } /* ixgbe_is_sfp */
1359 
1360 /************************************************************************
1361  * ixgbe_config_link
1362  ************************************************************************/
1363 static void
1364 ixgbe_config_link(struct adapter *adapter)
1365 {
1366 	struct ixgbe_hw *hw = &adapter->hw;
1367 	u32             autoneg, err = 0;
1368 	bool            sfp, negotiate;
1369 
1370 	sfp = ixgbe_is_sfp(hw);
1371 
1372 	if (sfp) {
1373 		GROUPTASK_ENQUEUE(&adapter->mod_task);
1374 	} else {
1375 		if (hw->mac.ops.check_link)
1376 			err = ixgbe_check_link(hw, &adapter->link_speed,
1377 			    &adapter->link_up, FALSE);
1378 		if (err)
1379 			return;
1380 		autoneg = hw->phy.autoneg_advertised;
1381 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1382 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1383 			    &negotiate);
1384 		if (err)
1385 			return;
1386 		if (hw->mac.ops.setup_link)
1387 			err = hw->mac.ops.setup_link(hw, autoneg,
1388 			    adapter->link_up);
1389 	}
1390 
1391 } /* ixgbe_config_link */
1392 
1393 /************************************************************************
1394  * ixgbe_update_stats_counters - Update board statistics counters.
1395  ************************************************************************/
1396 static void
1397 ixgbe_update_stats_counters(struct adapter *adapter)
1398 {
1399 	struct ixgbe_hw       *hw = &adapter->hw;
1400 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1401 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1402 	u64                   total_missed_rx = 0;
1403 
1404 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1405 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1406 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1407 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1408 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1409 
1410 	for (int i = 0; i < 16; i++) {
1411 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1412 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1413 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1414 	}
1415 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1416 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1417 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1418 
1419 	/* Hardware workaround, gprc counts missed packets */
1420 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1421 	stats->gprc -= missed_rx;
1422 
1423 	if (hw->mac.type != ixgbe_mac_82598EB) {
1424 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1425 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1426 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1427 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1428 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1429 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1430 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1431 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1432 	} else {
1433 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1434 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1435 		/* 82598 only has a counter in the high register */
1436 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1437 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1438 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1439 	}
1440 
1441 	/*
1442 	 * Workaround: mprc hardware is incorrectly counting
1443 	 * broadcasts, so for now we subtract those.
1444 	 */
1445 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1446 	stats->bprc += bprc;
1447 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1448 	if (hw->mac.type == ixgbe_mac_82598EB)
1449 		stats->mprc -= bprc;
1450 
1451 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1452 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1453 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1454 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1455 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1456 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1457 
1458 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1459 	stats->lxontxc += lxon;
1460 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1461 	stats->lxofftxc += lxoff;
1462 	total = lxon + lxoff;
1463 
1464 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1465 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1466 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1467 	stats->gptc -= total;
1468 	stats->mptc -= total;
1469 	stats->ptc64 -= total;
1470 	stats->gotc -= total * ETHER_MIN_LEN;
1471 
1472 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1473 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1474 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1475 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1476 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1477 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1478 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1479 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1480 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1481 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1482 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1483 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1484 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1485 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1486 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1487 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1488 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1489 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1490 	/* Only read FCOE on 82599 */
1491 	if (hw->mac.type != ixgbe_mac_82598EB) {
1492 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1493 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1494 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1495 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1496 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1497 	}
1498 
1499 	/* Fill out the OS statistics structure */
1500 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1501 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1502 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1503 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1504 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1505 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1506 	IXGBE_SET_COLLISIONS(adapter, 0);
1507 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1508 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1509 } /* ixgbe_update_stats_counters */
1510 
1511 /************************************************************************
1512  * ixgbe_add_hw_stats
1513  *
1514  *   Add sysctl variables, one per statistic, to the system.
1515  ************************************************************************/
1516 static void
1517 ixgbe_add_hw_stats(struct adapter *adapter)
1518 {
1519 	device_t               dev = iflib_get_dev(adapter->ctx);
1520 	struct ix_rx_queue     *rx_que;
1521 	struct ix_tx_queue     *tx_que;
1522 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1523 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1524 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1525 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1526 	struct sysctl_oid      *stat_node, *queue_node;
1527 	struct sysctl_oid_list *stat_list, *queue_list;
1528 	int                    i;
1529 
1530 #define QUEUE_NAME_LEN 32
1531 	char                   namebuf[QUEUE_NAME_LEN];
1532 
1533 	/* Driver Statistics */
1534 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1535 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1536 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1537 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1538 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1539 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1540 
1541 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1542 		struct tx_ring *txr = &tx_que->txr;
1543 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1544 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1545 		    CTLFLAG_RD, NULL, "Queue Name");
1546 		queue_list = SYSCTL_CHILDREN(queue_node);
1547 
1548 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1549 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1550 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1551 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1552 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1553 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1554 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1555 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1556 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1557 		    CTLFLAG_RD, &txr->total_packets,
1558 		    "Queue Packets Transmitted");
1559 	}
1560 
1561 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1562 		struct rx_ring *rxr = &rx_que->rxr;
1563 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1564 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1565 		    CTLFLAG_RD, NULL, "Queue Name");
1566 		queue_list = SYSCTL_CHILDREN(queue_node);
1567 
1568 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1569 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1570 		    sizeof(&adapter->rx_queues[i]),
1571 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1572 		    "Interrupt Rate");
1573 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1574 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1575 		    "irqs on this queue");
1576 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1577 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1578 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1579 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1580 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1581 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1582 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1583 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1584 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1585 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1586 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1587 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1588 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1589 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1590 	}
1591 
1592 	/* MAC stats get their own sub node */
1593 
1594 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1595 	    CTLFLAG_RD, NULL, "MAC Statistics");
1596 	stat_list = SYSCTL_CHILDREN(stat_node);
1597 
1598 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1599 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1600 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1601 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1602 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1603 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1604 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1605 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1606 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1607 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1608 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1609 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1610 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1611 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1612 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1613 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1614 
1615 	/* Flow Control stats */
1616 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1617 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1618 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1619 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1620 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1621 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1622 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1623 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1624 
1625 	/* Packet Reception Stats */
1626 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1627 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1628 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1629 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1630 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1631 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1632 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1633 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1634 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1635 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1636 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1637 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1638 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1639 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1640 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1641 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1642 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1643 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1644 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1645 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1646 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1647 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1648 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1649 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1650 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1651 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1652 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1653 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1654 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1655 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1656 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1657 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1658 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1659 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1660 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1661 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1662 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1663 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1664 
1665 	/* Packet Transmission Stats */
1666 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1667 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1668 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1669 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1670 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1671 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1672 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1673 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1674 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1675 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1676 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1677 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1678 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1679 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1680 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1681 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1682 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1683 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1684 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1685 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1686 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1687 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1688 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1689 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1690 } /* ixgbe_add_hw_stats */
1691 
1692 /************************************************************************
1693  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1694  *
1695  *   Retrieves the TDH value from the hardware
1696  ************************************************************************/
1697 static int
1698 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1699 {
1700 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1701 	int            error;
1702 	unsigned int   val;
1703 
1704 	if (!txr)
1705 		return (0);
1706 
1707 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1708 	error = sysctl_handle_int(oidp, &val, 0, req);
1709 	if (error || !req->newptr)
1710 		return error;
1711 
1712 	return (0);
1713 } /* ixgbe_sysctl_tdh_handler */
1714 
1715 /************************************************************************
1716  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1717  *
1718  *   Retrieves the TDT value from the hardware
1719  ************************************************************************/
1720 static int
1721 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1722 {
1723 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1724 	int            error;
1725 	unsigned int   val;
1726 
1727 	if (!txr)
1728 		return (0);
1729 
1730 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1731 	error = sysctl_handle_int(oidp, &val, 0, req);
1732 	if (error || !req->newptr)
1733 		return error;
1734 
1735 	return (0);
1736 } /* ixgbe_sysctl_tdt_handler */
1737 
1738 /************************************************************************
1739  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1740  *
1741  *   Retrieves the RDH value from the hardware
1742  ************************************************************************/
1743 static int
1744 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1745 {
1746 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1747 	int            error;
1748 	unsigned int   val;
1749 
1750 	if (!rxr)
1751 		return (0);
1752 
1753 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1754 	error = sysctl_handle_int(oidp, &val, 0, req);
1755 	if (error || !req->newptr)
1756 		return error;
1757 
1758 	return (0);
1759 } /* ixgbe_sysctl_rdh_handler */
1760 
1761 /************************************************************************
1762  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1763  *
1764  *   Retrieves the RDT value from the hardware
1765  ************************************************************************/
1766 static int
1767 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1768 {
1769 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1770 	int            error;
1771 	unsigned int   val;
1772 
1773 	if (!rxr)
1774 		return (0);
1775 
1776 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1777 	error = sysctl_handle_int(oidp, &val, 0, req);
1778 	if (error || !req->newptr)
1779 		return error;
1780 
1781 	return (0);
1782 } /* ixgbe_sysctl_rdt_handler */
1783 
1784 /************************************************************************
1785  * ixgbe_if_vlan_register
1786  *
1787  *   Run via vlan config EVENT, it enables us to use the
1788  *   HW Filter table since we can get the vlan id. This
1789  *   just creates the entry in the soft version of the
1790  *   VFTA, init will repopulate the real table.
1791  ************************************************************************/
1792 static void
1793 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1794 {
1795 	struct adapter *adapter = iflib_get_softc(ctx);
1796 	u16            index, bit;
1797 
1798 	index = (vtag >> 5) & 0x7F;
1799 	bit = vtag & 0x1F;
1800 	adapter->shadow_vfta[index] |= (1 << bit);
1801 	++adapter->num_vlans;
1802 	ixgbe_setup_vlan_hw_support(ctx);
1803 } /* ixgbe_if_vlan_register */
1804 
1805 /************************************************************************
1806  * ixgbe_if_vlan_unregister
1807  *
1808  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1809  ************************************************************************/
1810 static void
1811 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1812 {
1813 	struct adapter *adapter = iflib_get_softc(ctx);
1814 	u16            index, bit;
1815 
1816 	index = (vtag >> 5) & 0x7F;
1817 	bit = vtag & 0x1F;
1818 	adapter->shadow_vfta[index] &= ~(1 << bit);
1819 	--adapter->num_vlans;
1820 	/* Re-init to load the changes */
1821 	ixgbe_setup_vlan_hw_support(ctx);
1822 } /* ixgbe_if_vlan_unregister */
1823 
1824 /************************************************************************
1825  * ixgbe_setup_vlan_hw_support
1826  ************************************************************************/
1827 static void
1828 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1829 {
1830 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1831 	struct adapter  *adapter = iflib_get_softc(ctx);
1832 	struct ixgbe_hw *hw = &adapter->hw;
1833 	struct rx_ring  *rxr;
1834 	int             i;
1835 	u32             ctrl;
1836 
1837 
1838 	/*
1839 	 * We get here thru init_locked, meaning
1840 	 * a soft reset, this has already cleared
1841 	 * the VFTA and other state, so if there
1842 	 * have been no vlan's registered do nothing.
1843 	 */
1844 	if (adapter->num_vlans == 0)
1845 		return;
1846 
1847 	/* Setup the queues for vlans */
1848 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1849 		for (i = 0; i < adapter->num_rx_queues; i++) {
1850 			rxr = &adapter->rx_queues[i].rxr;
1851 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1852 			if (hw->mac.type != ixgbe_mac_82598EB) {
1853 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1854 				ctrl |= IXGBE_RXDCTL_VME;
1855 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1856 			}
1857 			rxr->vtag_strip = TRUE;
1858 		}
1859 	}
1860 
1861 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1862 		return;
1863 	/*
1864 	 * A soft reset zero's out the VFTA, so
1865 	 * we need to repopulate it now.
1866 	 */
1867 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1868 		if (adapter->shadow_vfta[i] != 0)
1869 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1870 			    adapter->shadow_vfta[i]);
1871 
1872 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1873 	/* Enable the Filter Table if enabled */
1874 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1875 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1876 		ctrl |= IXGBE_VLNCTRL_VFE;
1877 	}
1878 	if (hw->mac.type == ixgbe_mac_82598EB)
1879 		ctrl |= IXGBE_VLNCTRL_VME;
1880 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1881 } /* ixgbe_setup_vlan_hw_support */
1882 
1883 /************************************************************************
1884  * ixgbe_get_slot_info
1885  *
1886  *   Get the width and transaction speed of
1887  *   the slot this adapter is plugged into.
1888  ************************************************************************/
1889 static void
1890 ixgbe_get_slot_info(struct adapter *adapter)
1891 {
1892 	device_t        dev = iflib_get_dev(adapter->ctx);
1893 	struct ixgbe_hw *hw = &adapter->hw;
1894 	int             bus_info_valid = TRUE;
1895 	u32             offset;
1896 	u16             link;
1897 
1898 	/* Some devices are behind an internal bridge */
1899 	switch (hw->device_id) {
1900 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1901 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1902 		goto get_parent_info;
1903 	default:
1904 		break;
1905 	}
1906 
1907 	ixgbe_get_bus_info(hw);
1908 
1909 	/*
1910 	 * Some devices don't use PCI-E, but there is no need
1911 	 * to display "Unknown" for bus speed and width.
1912 	 */
1913 	switch (hw->mac.type) {
1914 	case ixgbe_mac_X550EM_x:
1915 	case ixgbe_mac_X550EM_a:
1916 		return;
1917 	default:
1918 		goto display;
1919 	}
1920 
1921 get_parent_info:
1922 	/*
1923 	 * For the Quad port adapter we need to parse back
1924 	 * up the PCI tree to find the speed of the expansion
1925 	 * slot into which this adapter is plugged. A bit more work.
1926 	 */
1927 	dev = device_get_parent(device_get_parent(dev));
1928 #ifdef IXGBE_DEBUG
1929 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1930 	    pci_get_slot(dev), pci_get_function(dev));
1931 #endif
1932 	dev = device_get_parent(device_get_parent(dev));
1933 #ifdef IXGBE_DEBUG
1934 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1935 	    pci_get_slot(dev), pci_get_function(dev));
1936 #endif
1937 	/* Now get the PCI Express Capabilities offset */
1938 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1939 		/*
1940 		 * Hmm...can't get PCI-Express capabilities.
1941 		 * Falling back to default method.
1942 		 */
1943 		bus_info_valid = FALSE;
1944 		ixgbe_get_bus_info(hw);
1945 		goto display;
1946 	}
1947 	/* ...and read the Link Status Register */
1948 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1949 	ixgbe_set_pci_config_data_generic(hw, link);
1950 
1951 display:
1952 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1953 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1954 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1955 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1956 	     "Unknown"),
1957 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1958 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1959 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1960 	     "Unknown"));
1961 
1962 	if (bus_info_valid) {
1963 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1964 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1965 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1966 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1967 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1968 		}
1969 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1970 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1971 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1972 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1973 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1974 		}
1975 	} else
1976 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1977 
1978 	return;
1979 } /* ixgbe_get_slot_info */
1980 
1981 /************************************************************************
1982  * ixgbe_if_msix_intr_assign
1983  *
1984  *   Setup MSI-X Interrupt resources and handlers
1985  ************************************************************************/
1986 static int
1987 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1988 {
1989 	struct adapter     *adapter = iflib_get_softc(ctx);
1990 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1991 	struct ix_tx_queue *tx_que;
1992 	int                error, rid, vector = 0;
1993 	int                cpu_id = 0;
1994 	char               buf[16];
1995 
1996 	/* Admin Que is vector 0*/
1997 	rid = vector + 1;
1998 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1999 		rid = vector + 1;
2000 
2001 		snprintf(buf, sizeof(buf), "rxq%d", i);
2002 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2003 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2004 
2005 		if (error) {
2006 			device_printf(iflib_get_dev(ctx),
2007 			    "Failed to allocate que int %d err: %d", i, error);
2008 			adapter->num_rx_queues = i + 1;
2009 			goto fail;
2010 		}
2011 
2012 		rx_que->msix = vector;
2013 		adapter->active_queues |= (u64)(1 << rx_que->msix);
2014 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2015 			/*
2016 			 * The queue ID is used as the RSS layer bucket ID.
2017 			 * We look up the queue ID -> RSS CPU ID and select
2018 			 * that.
2019 			 */
2020 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2021 		} else {
2022 			/*
2023 			 * Bind the msix vector, and thus the
2024 			 * rings to the corresponding cpu.
2025 			 *
2026 			 * This just happens to match the default RSS
2027 			 * round-robin bucket -> queue -> CPU allocation.
2028 			 */
2029 			if (adapter->num_rx_queues > 1)
2030 				cpu_id = i;
2031 		}
2032 
2033 	}
2034 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2035 		snprintf(buf, sizeof(buf), "txq%d", i);
2036 		tx_que = &adapter->tx_queues[i];
2037 		tx_que->msix = i % adapter->num_rx_queues;
2038 		iflib_softirq_alloc_generic(ctx,
2039 		    &adapter->rx_queues[tx_que->msix].que_irq,
2040 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2041 	}
2042 	rid = vector + 1;
2043 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2044 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2045 	if (error) {
2046 		device_printf(iflib_get_dev(ctx),
2047 		    "Failed to register admin handler");
2048 		return (error);
2049 	}
2050 
2051 	adapter->vector = vector;
2052 
2053 	return (0);
2054 fail:
2055 	iflib_irq_free(ctx, &adapter->irq);
2056 	rx_que = adapter->rx_queues;
2057 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2058 		iflib_irq_free(ctx, &rx_que->que_irq);
2059 
2060 	return (error);
2061 } /* ixgbe_if_msix_intr_assign */
2062 
2063 /*********************************************************************
2064  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2065  **********************************************************************/
2066 static int
2067 ixgbe_msix_que(void *arg)
2068 {
2069 	struct ix_rx_queue *que = arg;
2070 	struct adapter     *adapter = que->adapter;
2071 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2072 
2073 	/* Protect against spurious interrupts */
2074 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2075 		return 0;
2076 
2077 	ixgbe_disable_queue(adapter, que->msix);
2078 	++que->irqs;
2079 
2080 	return (FILTER_SCHEDULE_THREAD);
2081 } /* ixgbe_msix_que */
2082 
2083 /************************************************************************
2084  * ixgbe_media_status - Media Ioctl callback
2085  *
2086  *   Called whenever the user queries the status of
2087  *   the interface using ifconfig.
2088  ************************************************************************/
2089 static void
2090 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2091 {
2092 	struct adapter  *adapter = iflib_get_softc(ctx);
2093 	struct ixgbe_hw *hw = &adapter->hw;
2094 	int             layer;
2095 
2096 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2097 
2098 	iflib_admin_intr_deferred(ctx);
2099 
2100 	ifmr->ifm_status = IFM_AVALID;
2101 	ifmr->ifm_active = IFM_ETHER;
2102 
2103 	if (!adapter->link_active)
2104 		return;
2105 
2106 	ifmr->ifm_status |= IFM_ACTIVE;
2107 	layer = adapter->phy_layer;
2108 
2109 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2110 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2111 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2112 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2113 		switch (adapter->link_speed) {
2114 		case IXGBE_LINK_SPEED_10GB_FULL:
2115 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2116 			break;
2117 		case IXGBE_LINK_SPEED_1GB_FULL:
2118 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2119 			break;
2120 		case IXGBE_LINK_SPEED_100_FULL:
2121 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2122 			break;
2123 		case IXGBE_LINK_SPEED_10_FULL:
2124 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2125 			break;
2126 		}
2127 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2128 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2129 		switch (adapter->link_speed) {
2130 		case IXGBE_LINK_SPEED_10GB_FULL:
2131 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2132 			break;
2133 		}
2134 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2135 		switch (adapter->link_speed) {
2136 		case IXGBE_LINK_SPEED_10GB_FULL:
2137 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2138 			break;
2139 		case IXGBE_LINK_SPEED_1GB_FULL:
2140 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2141 			break;
2142 		}
2143 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2144 		switch (adapter->link_speed) {
2145 		case IXGBE_LINK_SPEED_10GB_FULL:
2146 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2147 			break;
2148 		case IXGBE_LINK_SPEED_1GB_FULL:
2149 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2150 			break;
2151 		}
2152 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2153 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2154 		switch (adapter->link_speed) {
2155 		case IXGBE_LINK_SPEED_10GB_FULL:
2156 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2157 			break;
2158 		case IXGBE_LINK_SPEED_1GB_FULL:
2159 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2160 			break;
2161 		}
2162 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2163 		switch (adapter->link_speed) {
2164 		case IXGBE_LINK_SPEED_10GB_FULL:
2165 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2166 			break;
2167 		}
2168 	/*
2169 	 * XXX: These need to use the proper media types once
2170 	 * they're added.
2171 	 */
2172 #ifndef IFM_ETH_XTYPE
2173 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2174 		switch (adapter->link_speed) {
2175 		case IXGBE_LINK_SPEED_10GB_FULL:
2176 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2177 			break;
2178 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2179 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2180 			break;
2181 		case IXGBE_LINK_SPEED_1GB_FULL:
2182 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2183 			break;
2184 		}
2185 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2186 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2187 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2188 		switch (adapter->link_speed) {
2189 		case IXGBE_LINK_SPEED_10GB_FULL:
2190 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2191 			break;
2192 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2193 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2194 			break;
2195 		case IXGBE_LINK_SPEED_1GB_FULL:
2196 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2197 			break;
2198 		}
2199 #else
2200 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2201 		switch (adapter->link_speed) {
2202 		case IXGBE_LINK_SPEED_10GB_FULL:
2203 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2204 			break;
2205 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2206 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2207 			break;
2208 		case IXGBE_LINK_SPEED_1GB_FULL:
2209 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2210 			break;
2211 		}
2212 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2213 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2214 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2215 		switch (adapter->link_speed) {
2216 		case IXGBE_LINK_SPEED_10GB_FULL:
2217 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2218 			break;
2219 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2220 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2221 			break;
2222 		case IXGBE_LINK_SPEED_1GB_FULL:
2223 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2224 			break;
2225 		}
2226 #endif
2227 
2228 	/* If nothing is recognized... */
2229 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2230 		ifmr->ifm_active |= IFM_UNKNOWN;
2231 
2232 	/* Display current flow control setting used on link */
2233 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2234 	    hw->fc.current_mode == ixgbe_fc_full)
2235 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2236 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2237 	    hw->fc.current_mode == ixgbe_fc_full)
2238 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2239 } /* ixgbe_media_status */
2240 
2241 /************************************************************************
2242  * ixgbe_media_change - Media Ioctl callback
2243  *
2244  *   Called when the user changes speed/duplex using
2245  *   media/mediopt option with ifconfig.
2246  ************************************************************************/
2247 static int
2248 ixgbe_if_media_change(if_ctx_t ctx)
2249 {
2250 	struct adapter   *adapter = iflib_get_softc(ctx);
2251 	struct ifmedia   *ifm = iflib_get_media(ctx);
2252 	struct ixgbe_hw  *hw = &adapter->hw;
2253 	ixgbe_link_speed speed = 0;
2254 
2255 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2256 
2257 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2258 		return (EINVAL);
2259 
2260 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2261 		return (EPERM);
2262 
2263 	/*
2264 	 * We don't actually need to check against the supported
2265 	 * media types of the adapter; ifmedia will take care of
2266 	 * that for us.
2267 	 */
2268 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2269 	case IFM_AUTO:
2270 	case IFM_10G_T:
2271 		speed |= IXGBE_LINK_SPEED_100_FULL;
2272 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2273 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2274 		break;
2275 	case IFM_10G_LRM:
2276 	case IFM_10G_LR:
2277 #ifndef IFM_ETH_XTYPE
2278 	case IFM_10G_SR: /* KR, too */
2279 	case IFM_10G_CX4: /* KX4 */
2280 #else
2281 	case IFM_10G_KR:
2282 	case IFM_10G_KX4:
2283 #endif
2284 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2285 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2286 		break;
2287 #ifndef IFM_ETH_XTYPE
2288 	case IFM_1000_CX: /* KX */
2289 #else
2290 	case IFM_1000_KX:
2291 #endif
2292 	case IFM_1000_LX:
2293 	case IFM_1000_SX:
2294 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2295 		break;
2296 	case IFM_1000_T:
2297 		speed |= IXGBE_LINK_SPEED_100_FULL;
2298 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2299 		break;
2300 	case IFM_10G_TWINAX:
2301 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2302 		break;
2303 	case IFM_100_TX:
2304 		speed |= IXGBE_LINK_SPEED_100_FULL;
2305 		break;
2306 	case IFM_10_T:
2307 		speed |= IXGBE_LINK_SPEED_10_FULL;
2308 		break;
2309 	default:
2310 		goto invalid;
2311 	}
2312 
2313 	hw->mac.autotry_restart = TRUE;
2314 	hw->mac.ops.setup_link(hw, speed, TRUE);
2315 	adapter->advertise =
2316 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2317 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2318 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2319 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2320 
2321 	return (0);
2322 
2323 invalid:
2324 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2325 
2326 	return (EINVAL);
2327 } /* ixgbe_if_media_change */
2328 
2329 /************************************************************************
2330  * ixgbe_set_promisc
2331  ************************************************************************/
2332 static int
2333 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2334 {
2335 	struct adapter *adapter = iflib_get_softc(ctx);
2336 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2337 	u32            rctl;
2338 	int            mcnt = 0;
2339 
2340 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2341 	rctl &= (~IXGBE_FCTRL_UPE);
2342 	if (ifp->if_flags & IFF_ALLMULTI)
2343 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2344 	else {
2345 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2346 	}
2347 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2348 		rctl &= (~IXGBE_FCTRL_MPE);
2349 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2350 
2351 	if (ifp->if_flags & IFF_PROMISC) {
2352 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2353 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2354 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2355 		rctl |= IXGBE_FCTRL_MPE;
2356 		rctl &= ~IXGBE_FCTRL_UPE;
2357 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2358 	}
2359 	return (0);
2360 } /* ixgbe_if_promisc_set */
2361 
2362 /************************************************************************
2363  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2364  ************************************************************************/
2365 static int
2366 ixgbe_msix_link(void *arg)
2367 {
2368 	struct adapter  *adapter = arg;
2369 	struct ixgbe_hw *hw = &adapter->hw;
2370 	u32             eicr, eicr_mask;
2371 	s32             retval;
2372 
2373 	++adapter->link_irq;
2374 
2375 	/* Pause other interrupts */
2376 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2377 
2378 	/* First get the cause */
2379 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2380 	/* Be sure the queue bits are not cleared */
2381 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2382 	/* Clear interrupt with write */
2383 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2384 
2385 	/* Link status change */
2386 	if (eicr & IXGBE_EICR_LSC) {
2387 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2388 		iflib_admin_intr_deferred(adapter->ctx);
2389 	}
2390 
2391 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2392 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2393 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2394 			/* This is probably overkill :) */
2395 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2396 				return (FILTER_HANDLED);
2397 			/* Disable the interrupt */
2398 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2399 			GROUPTASK_ENQUEUE(&adapter->fdir_task);
2400 		} else
2401 			if (eicr & IXGBE_EICR_ECC) {
2402 				device_printf(iflib_get_dev(adapter->ctx),
2403 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2404 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2405 			}
2406 
2407 		/* Check for over temp condition */
2408 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2409 			switch (adapter->hw.mac.type) {
2410 			case ixgbe_mac_X550EM_a:
2411 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2412 					break;
2413 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2414 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2415 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2416 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2417 				retval = hw->phy.ops.check_overtemp(hw);
2418 				if (retval != IXGBE_ERR_OVERTEMP)
2419 					break;
2420 				device_printf(iflib_get_dev(adapter->ctx),
2421 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2422 				device_printf(iflib_get_dev(adapter->ctx),
2423 				    "System shutdown required!\n");
2424 				break;
2425 			default:
2426 				if (!(eicr & IXGBE_EICR_TS))
2427 					break;
2428 				retval = hw->phy.ops.check_overtemp(hw);
2429 				if (retval != IXGBE_ERR_OVERTEMP)
2430 					break;
2431 				device_printf(iflib_get_dev(adapter->ctx),
2432 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2433 				device_printf(iflib_get_dev(adapter->ctx),
2434 				    "System shutdown required!\n");
2435 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2436 				break;
2437 			}
2438 		}
2439 
2440 		/* Check for VF message */
2441 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2442 		    (eicr & IXGBE_EICR_MAILBOX))
2443 			GROUPTASK_ENQUEUE(&adapter->mbx_task);
2444 	}
2445 
2446 	if (ixgbe_is_sfp(hw)) {
2447 		/* Pluggable optics-related interrupt */
2448 		if (hw->mac.type >= ixgbe_mac_X540)
2449 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2450 		else
2451 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2452 
2453 		if (eicr & eicr_mask) {
2454 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2455 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2456 				GROUPTASK_ENQUEUE(&adapter->mod_task);
2457 		}
2458 
2459 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2460 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2461 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2462 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2463 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2464 				GROUPTASK_ENQUEUE(&adapter->msf_task);
2465 		}
2466 	}
2467 
2468 	/* Check for fan failure */
2469 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2470 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2471 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2472 	}
2473 
2474 	/* External PHY interrupt */
2475 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2476 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2477 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2478 		GROUPTASK_ENQUEUE(&adapter->phy_task);
2479 	}
2480 
2481 	/* Re-enable other interrupts */
2482 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2483 
2484 	return (FILTER_HANDLED);
2485 } /* ixgbe_msix_link */
2486 
2487 /************************************************************************
2488  * ixgbe_sysctl_interrupt_rate_handler
2489  ************************************************************************/
2490 static int
2491 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2492 {
2493 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2494 	int                error;
2495 	unsigned int       reg, usec, rate;
2496 
2497 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2498 	usec = ((reg & 0x0FF8) >> 3);
2499 	if (usec > 0)
2500 		rate = 500000 / usec;
2501 	else
2502 		rate = 0;
2503 	error = sysctl_handle_int(oidp, &rate, 0, req);
2504 	if (error || !req->newptr)
2505 		return error;
2506 	reg &= ~0xfff; /* default, no limitation */
2507 	ixgbe_max_interrupt_rate = 0;
2508 	if (rate > 0 && rate < 500000) {
2509 		if (rate < 1000)
2510 			rate = 1000;
2511 		ixgbe_max_interrupt_rate = rate;
2512 		reg |= ((4000000/rate) & 0xff8);
2513 	}
2514 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2515 
2516 	return (0);
2517 } /* ixgbe_sysctl_interrupt_rate_handler */
2518 
2519 /************************************************************************
2520  * ixgbe_add_device_sysctls
2521  ************************************************************************/
2522 static void
2523 ixgbe_add_device_sysctls(if_ctx_t ctx)
2524 {
2525 	struct adapter         *adapter = iflib_get_softc(ctx);
2526 	device_t               dev = iflib_get_dev(ctx);
2527 	struct ixgbe_hw        *hw = &adapter->hw;
2528 	struct sysctl_oid_list *child;
2529 	struct sysctl_ctx_list *ctx_list;
2530 
2531 	ctx_list = device_get_sysctl_ctx(dev);
2532 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2533 
2534 	/* Sysctls for all devices */
2535 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2536 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2537 	    IXGBE_SYSCTL_DESC_SET_FC);
2538 
2539 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2540 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2541 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2542 
2543 #ifdef IXGBE_DEBUG
2544 	/* testing sysctls (for all devices) */
2545 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2546 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2547 	    "I", "PCI Power State");
2548 
2549 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2550 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2551 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2552 #endif
2553 	/* for X550 series devices */
2554 	if (hw->mac.type >= ixgbe_mac_X550)
2555 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2556 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2557 		    "I", "DMA Coalesce");
2558 
2559 	/* for WoL-capable devices */
2560 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2561 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2562 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2563 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2564 
2565 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2566 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2567 		    "I", "Enable/Disable Wake Up Filters");
2568 	}
2569 
2570 	/* for X552/X557-AT devices */
2571 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2572 		struct sysctl_oid *phy_node;
2573 		struct sysctl_oid_list *phy_list;
2574 
2575 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2576 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2577 		phy_list = SYSCTL_CHILDREN(phy_node);
2578 
2579 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2580 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2581 		    "I", "Current External PHY Temperature (Celsius)");
2582 
2583 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2584 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2585 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2586 		    "External PHY High Temperature Event Occurred");
2587 	}
2588 
2589 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2590 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2591 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2592 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2593 	}
2594 } /* ixgbe_add_device_sysctls */
2595 
2596 /************************************************************************
2597  * ixgbe_allocate_pci_resources
2598  ************************************************************************/
2599 static int
2600 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2601 {
2602 	struct adapter *adapter = iflib_get_softc(ctx);
2603 	device_t        dev = iflib_get_dev(ctx);
2604 	int             rid;
2605 
2606 	rid = PCIR_BAR(0);
2607 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2608 	    RF_ACTIVE);
2609 
2610 	if (!(adapter->pci_mem)) {
2611 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2612 		return (ENXIO);
2613 	}
2614 
2615 	/* Save bus_space values for READ/WRITE_REG macros */
2616 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2617 	adapter->osdep.mem_bus_space_handle =
2618 	    rman_get_bushandle(adapter->pci_mem);
2619 	/* Set hw values for shared code */
2620 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2621 
2622 	return (0);
2623 } /* ixgbe_allocate_pci_resources */
2624 
2625 /************************************************************************
2626  * ixgbe_detach - Device removal routine
2627  *
2628  *   Called when the driver is being removed.
2629  *   Stops the adapter and deallocates all the resources
2630  *   that were allocated for driver operation.
2631  *
2632  *   return 0 on success, positive on failure
2633  ************************************************************************/
2634 static int
2635 ixgbe_if_detach(if_ctx_t ctx)
2636 {
2637 	struct adapter *adapter = iflib_get_softc(ctx);
2638 	device_t       dev = iflib_get_dev(ctx);
2639 	u32            ctrl_ext;
2640 
2641 	INIT_DEBUGOUT("ixgbe_detach: begin");
2642 
2643 	if (ixgbe_pci_iov_detach(dev) != 0) {
2644 		device_printf(dev, "SR-IOV in use; detach first.\n");
2645 		return (EBUSY);
2646 	}
2647 
2648 	iflib_config_gtask_deinit(&adapter->mod_task);
2649 	iflib_config_gtask_deinit(&adapter->msf_task);
2650 	iflib_config_gtask_deinit(&adapter->phy_task);
2651 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2652 		iflib_config_gtask_deinit(&adapter->mbx_task);
2653 
2654 	ixgbe_setup_low_power_mode(ctx);
2655 
2656 	/* let hardware know driver is unloading */
2657 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2658 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2659 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2660 
2661 	ixgbe_free_pci_resources(ctx);
2662 	free(adapter->mta, M_IXGBE);
2663 
2664 	return (0);
2665 } /* ixgbe_if_detach */
2666 
2667 /************************************************************************
2668  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2669  *
2670  *   Prepare the adapter/port for LPLU and/or WoL
2671  ************************************************************************/
2672 static int
2673 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2674 {
2675 	struct adapter  *adapter = iflib_get_softc(ctx);
2676 	struct ixgbe_hw *hw = &adapter->hw;
2677 	device_t        dev = iflib_get_dev(ctx);
2678 	s32             error = 0;
2679 
2680 	if (!hw->wol_enabled)
2681 		ixgbe_set_phy_power(hw, FALSE);
2682 
2683 	/* Limit power management flow to X550EM baseT */
2684 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2685 	    hw->phy.ops.enter_lplu) {
2686 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2687 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2688 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2689 
2690 		/*
2691 		 * Clear Wake Up Status register to prevent any previous wakeup
2692 		 * events from waking us up immediately after we suspend.
2693 		 */
2694 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2695 
2696 		/*
2697 		 * Program the Wakeup Filter Control register with user filter
2698 		 * settings
2699 		 */
2700 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2701 
2702 		/* Enable wakeups and power management in Wakeup Control */
2703 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2704 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2705 
2706 		/* X550EM baseT adapters need a special LPLU flow */
2707 		hw->phy.reset_disable = TRUE;
2708 		ixgbe_if_stop(ctx);
2709 		error = hw->phy.ops.enter_lplu(hw);
2710 		if (error)
2711 			device_printf(dev, "Error entering LPLU: %d\n", error);
2712 		hw->phy.reset_disable = FALSE;
2713 	} else {
2714 		/* Just stop for other adapters */
2715 		ixgbe_if_stop(ctx);
2716 	}
2717 
2718 	return error;
2719 } /* ixgbe_setup_low_power_mode */
2720 
2721 /************************************************************************
2722  * ixgbe_shutdown - Shutdown entry point
2723  ************************************************************************/
2724 static int
2725 ixgbe_if_shutdown(if_ctx_t ctx)
2726 {
2727 	int error = 0;
2728 
2729 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2730 
2731 	error = ixgbe_setup_low_power_mode(ctx);
2732 
2733 	return (error);
2734 } /* ixgbe_if_shutdown */
2735 
2736 /************************************************************************
2737  * ixgbe_suspend
2738  *
2739  *   From D0 to D3
2740  ************************************************************************/
2741 static int
2742 ixgbe_if_suspend(if_ctx_t ctx)
2743 {
2744 	int error = 0;
2745 
2746 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2747 
2748 	error = ixgbe_setup_low_power_mode(ctx);
2749 
2750 	return (error);
2751 } /* ixgbe_if_suspend */
2752 
2753 /************************************************************************
2754  * ixgbe_resume
2755  *
2756  *   From D3 to D0
2757  ************************************************************************/
2758 static int
2759 ixgbe_if_resume(if_ctx_t ctx)
2760 {
2761 	struct adapter  *adapter = iflib_get_softc(ctx);
2762 	device_t        dev = iflib_get_dev(ctx);
2763 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2764 	struct ixgbe_hw *hw = &adapter->hw;
2765 	u32             wus;
2766 
2767 	INIT_DEBUGOUT("ixgbe_resume: begin");
2768 
2769 	/* Read & clear WUS register */
2770 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2771 	if (wus)
2772 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2773 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2774 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2775 	/* And clear WUFC until next low-power transition */
2776 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2777 
2778 	/*
2779 	 * Required after D3->D0 transition;
2780 	 * will re-advertise all previous advertised speeds
2781 	 */
2782 	if (ifp->if_flags & IFF_UP)
2783 		ixgbe_if_init(ctx);
2784 
2785 	return (0);
2786 } /* ixgbe_if_resume */
2787 
2788 /************************************************************************
2789  * ixgbe_if_mtu_set - Ioctl mtu entry point
2790  *
2791  *   Return 0 on success, EINVAL on failure
2792  ************************************************************************/
2793 static int
2794 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2795 {
2796 	struct adapter *adapter = iflib_get_softc(ctx);
2797 	int error = 0;
2798 
2799 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2800 
2801 	if (mtu > IXGBE_MAX_MTU) {
2802 		error = EINVAL;
2803 	} else {
2804 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2805 	}
2806 
2807 	return error;
2808 } /* ixgbe_if_mtu_set */
2809 
2810 /************************************************************************
2811  * ixgbe_if_crcstrip_set
2812  ************************************************************************/
2813 static void
2814 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2815 {
2816 	struct adapter *sc = iflib_get_softc(ctx);
2817 	struct ixgbe_hw *hw = &sc->hw;
2818 	/* crc stripping is set in two places:
2819 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2820 	 * IXGBE_RDRXCTL (set by the original driver in
2821 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2822 	 *	We disable the setting when netmap is compiled in).
2823 	 * We update the values here, but also in ixgbe.c because
2824 	 * init_locked sometimes is called outside our control.
2825 	 */
2826 	uint32_t hl, rxc;
2827 
2828 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2829 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2830 #ifdef NETMAP
2831 	if (netmap_verbose)
2832 		D("%s read  HLREG 0x%x rxc 0x%x",
2833 			onoff ? "enter" : "exit", hl, rxc);
2834 #endif
2835 	/* hw requirements ... */
2836 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2837 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2838 	if (onoff && !crcstrip) {
2839 		/* keep the crc. Fast rx */
2840 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2841 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2842 	} else {
2843 		/* reset default mode */
2844 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2845 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2846 	}
2847 #ifdef NETMAP
2848 	if (netmap_verbose)
2849 		D("%s write HLREG 0x%x rxc 0x%x",
2850 			onoff ? "enter" : "exit", hl, rxc);
2851 #endif
2852 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2853 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2854 } /* ixgbe_if_crcstrip_set */
2855 
2856 /*********************************************************************
2857  * ixgbe_if_init - Init entry point
2858  *
2859  *   Used in two ways: It is used by the stack as an init
2860  *   entry point in network interface structure. It is also
2861  *   used by the driver as a hw/sw initialization routine to
2862  *   get to a consistent state.
2863  *
2864  *   Return 0 on success, positive on failure
2865  **********************************************************************/
2866 void
2867 ixgbe_if_init(if_ctx_t ctx)
2868 {
2869 	struct adapter     *adapter = iflib_get_softc(ctx);
2870 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2871 	device_t           dev = iflib_get_dev(ctx);
2872 	struct ixgbe_hw *hw = &adapter->hw;
2873 	struct ix_rx_queue *rx_que;
2874 	struct ix_tx_queue *tx_que;
2875 	u32             txdctl, mhadd;
2876 	u32             rxdctl, rxctrl;
2877 	u32             ctrl_ext;
2878 
2879 	int             i, j, err;
2880 
2881 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2882 
2883 	/* Queue indices may change with IOV mode */
2884 	ixgbe_align_all_queue_indices(adapter);
2885 
2886 	/* reprogram the RAR[0] in case user changed it. */
2887 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2888 
2889 	/* Get the latest mac address, User can use a LAA */
2890 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2891 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2892 	hw->addr_ctrl.rar_used_count = 1;
2893 
2894 	ixgbe_init_hw(hw);
2895 
2896 	ixgbe_initialize_iov(adapter);
2897 
2898 	ixgbe_initialize_transmit_units(ctx);
2899 
2900 	/* Setup Multicast table */
2901 	ixgbe_if_multi_set(ctx);
2902 
2903 	/* Determine the correct mbuf pool, based on frame size */
2904 	if (adapter->max_frame_size <= MCLBYTES)
2905 		adapter->rx_mbuf_sz = MCLBYTES;
2906 	else
2907 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2908 
2909 	/* Configure RX settings */
2910 	ixgbe_initialize_receive_units(ctx);
2911 
2912 	/* Enable SDP & MSI-X interrupts based on adapter */
2913 	ixgbe_config_gpie(adapter);
2914 
2915 	/* Set MTU size */
2916 	if (ifp->if_mtu > ETHERMTU) {
2917 		/* aka IXGBE_MAXFRS on 82599 and newer */
2918 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2919 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2920 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2921 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2922 	}
2923 
2924 	/* Now enable all the queues */
2925 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2926 		struct tx_ring *txr = &tx_que->txr;
2927 
2928 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2929 		txdctl |= IXGBE_TXDCTL_ENABLE;
2930 		/* Set WTHRESH to 8, burst writeback */
2931 		txdctl |= (8 << 16);
2932 		/*
2933 		 * When the internal queue falls below PTHRESH (32),
2934 		 * start prefetching as long as there are at least
2935 		 * HTHRESH (1) buffers ready. The values are taken
2936 		 * from the Intel linux driver 3.8.21.
2937 		 * Prefetching enables tx line rate even with 1 queue.
2938 		 */
2939 		txdctl |= (32 << 0) | (1 << 8);
2940 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2941 	}
2942 
2943 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2944 		struct rx_ring *rxr = &rx_que->rxr;
2945 
2946 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2947 		if (hw->mac.type == ixgbe_mac_82598EB) {
2948 			/*
2949 			 * PTHRESH = 21
2950 			 * HTHRESH = 4
2951 			 * WTHRESH = 8
2952 			 */
2953 			rxdctl &= ~0x3FFFFF;
2954 			rxdctl |= 0x080420;
2955 		}
2956 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2957 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2958 		for (j = 0; j < 10; j++) {
2959 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2960 			    IXGBE_RXDCTL_ENABLE)
2961 				break;
2962 			else
2963 				msec_delay(1);
2964 		}
2965 		wmb();
2966 	}
2967 
2968 	/* Enable Receive engine */
2969 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2970 	if (hw->mac.type == ixgbe_mac_82598EB)
2971 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2972 	rxctrl |= IXGBE_RXCTRL_RXEN;
2973 	ixgbe_enable_rx_dma(hw, rxctrl);
2974 
2975 	/* Set up MSI/MSI-X routing */
2976 	if (ixgbe_enable_msix)  {
2977 		ixgbe_configure_ivars(adapter);
2978 		/* Set up auto-mask */
2979 		if (hw->mac.type == ixgbe_mac_82598EB)
2980 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2981 		else {
2982 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2983 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2984 		}
2985 	} else {  /* Simple settings for Legacy/MSI */
2986 		ixgbe_set_ivar(adapter, 0, 0, 0);
2987 		ixgbe_set_ivar(adapter, 0, 0, 1);
2988 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2989 	}
2990 
2991 	ixgbe_init_fdir(adapter);
2992 
2993 	/*
2994 	 * Check on any SFP devices that
2995 	 * need to be kick-started
2996 	 */
2997 	if (hw->phy.type == ixgbe_phy_none) {
2998 		err = hw->phy.ops.identify(hw);
2999 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3000 			device_printf(dev,
3001 			    "Unsupported SFP+ module type was detected.\n");
3002 			return;
3003 		}
3004 	}
3005 
3006 	/* Set moderation on the Link interrupt */
3007 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3008 
3009 	/* Enable power to the phy. */
3010 	ixgbe_set_phy_power(hw, TRUE);
3011 
3012 	/* Config/Enable Link */
3013 	ixgbe_config_link(adapter);
3014 
3015 	/* Hardware Packet Buffer & Flow Control setup */
3016 	ixgbe_config_delay_values(adapter);
3017 
3018 	/* Initialize the FC settings */
3019 	ixgbe_start_hw(hw);
3020 
3021 	/* Set up VLAN support and filter */
3022 	ixgbe_setup_vlan_hw_support(ctx);
3023 
3024 	/* Setup DMA Coalescing */
3025 	ixgbe_config_dmac(adapter);
3026 
3027 	/* And now turn on interrupts */
3028 	ixgbe_if_enable_intr(ctx);
3029 
3030 	/* Enable the use of the MBX by the VF's */
3031 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3032 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3033 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3034 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3035 	}
3036 
3037 } /* ixgbe_init_locked */
3038 
3039 /************************************************************************
3040  * ixgbe_set_ivar
3041  *
3042  *   Setup the correct IVAR register for a particular MSI-X interrupt
3043  *     (yes this is all very magic and confusing :)
3044  *    - entry is the register array entry
3045  *    - vector is the MSI-X vector for this queue
3046  *    - type is RX/TX/MISC
3047  ************************************************************************/
3048 static void
3049 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3050 {
3051 	struct ixgbe_hw *hw = &adapter->hw;
3052 	u32 ivar, index;
3053 
3054 	vector |= IXGBE_IVAR_ALLOC_VAL;
3055 
3056 	switch (hw->mac.type) {
3057 	case ixgbe_mac_82598EB:
3058 		if (type == -1)
3059 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3060 		else
3061 			entry += (type * 64);
3062 		index = (entry >> 2) & 0x1F;
3063 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3064 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3065 		ivar |= (vector << (8 * (entry & 0x3)));
3066 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3067 		break;
3068 	case ixgbe_mac_82599EB:
3069 	case ixgbe_mac_X540:
3070 	case ixgbe_mac_X550:
3071 	case ixgbe_mac_X550EM_x:
3072 	case ixgbe_mac_X550EM_a:
3073 		if (type == -1) { /* MISC IVAR */
3074 			index = (entry & 1) * 8;
3075 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3076 			ivar &= ~(0xFF << index);
3077 			ivar |= (vector << index);
3078 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3079 		} else {          /* RX/TX IVARS */
3080 			index = (16 * (entry & 1)) + (8 * type);
3081 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3082 			ivar &= ~(0xFF << index);
3083 			ivar |= (vector << index);
3084 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3085 		}
3086 	default:
3087 		break;
3088 	}
3089 } /* ixgbe_set_ivar */
3090 
3091 /************************************************************************
3092  * ixgbe_configure_ivars
3093  ************************************************************************/
3094 static void
3095 ixgbe_configure_ivars(struct adapter *adapter)
3096 {
3097 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3098 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3099 	u32                newitr;
3100 
3101 	if (ixgbe_max_interrupt_rate > 0)
3102 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3103 	else {
3104 		/*
3105 		 * Disable DMA coalescing if interrupt moderation is
3106 		 * disabled.
3107 		 */
3108 		adapter->dmac = 0;
3109 		newitr = 0;
3110 	}
3111 
3112 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3113 		struct rx_ring *rxr = &rx_que->rxr;
3114 
3115 		/* First the RX queue entry */
3116 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3117 
3118 		/* Set an Initial EITR value */
3119 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3120 	}
3121 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3122 		struct tx_ring *txr = &tx_que->txr;
3123 
3124 		/* ... and the TX */
3125 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3126 	}
3127 	/* For the Link interrupt */
3128 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3129 } /* ixgbe_configure_ivars */
3130 
3131 /************************************************************************
3132  * ixgbe_config_gpie
3133  ************************************************************************/
3134 static void
3135 ixgbe_config_gpie(struct adapter *adapter)
3136 {
3137 	struct ixgbe_hw *hw = &adapter->hw;
3138 	u32             gpie;
3139 
3140 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3141 
3142 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3143 		/* Enable Enhanced MSI-X mode */
3144 		gpie |= IXGBE_GPIE_MSIX_MODE
3145 		     |  IXGBE_GPIE_EIAME
3146 		     |  IXGBE_GPIE_PBA_SUPPORT
3147 		     |  IXGBE_GPIE_OCD;
3148 	}
3149 
3150 	/* Fan Failure Interrupt */
3151 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3152 		gpie |= IXGBE_SDP1_GPIEN;
3153 
3154 	/* Thermal Sensor Interrupt */
3155 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3156 		gpie |= IXGBE_SDP0_GPIEN_X540;
3157 
3158 	/* Link detection */
3159 	switch (hw->mac.type) {
3160 	case ixgbe_mac_82599EB:
3161 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3162 		break;
3163 	case ixgbe_mac_X550EM_x:
3164 	case ixgbe_mac_X550EM_a:
3165 		gpie |= IXGBE_SDP0_GPIEN_X540;
3166 		break;
3167 	default:
3168 		break;
3169 	}
3170 
3171 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3172 
3173 } /* ixgbe_config_gpie */
3174 
3175 /************************************************************************
3176  * ixgbe_config_delay_values
3177  *
3178  *   Requires adapter->max_frame_size to be set.
3179  ************************************************************************/
3180 static void
3181 ixgbe_config_delay_values(struct adapter *adapter)
3182 {
3183 	struct ixgbe_hw *hw = &adapter->hw;
3184 	u32             rxpb, frame, size, tmp;
3185 
3186 	frame = adapter->max_frame_size;
3187 
3188 	/* Calculate High Water */
3189 	switch (hw->mac.type) {
3190 	case ixgbe_mac_X540:
3191 	case ixgbe_mac_X550:
3192 	case ixgbe_mac_X550EM_x:
3193 	case ixgbe_mac_X550EM_a:
3194 		tmp = IXGBE_DV_X540(frame, frame);
3195 		break;
3196 	default:
3197 		tmp = IXGBE_DV(frame, frame);
3198 		break;
3199 	}
3200 	size = IXGBE_BT2KB(tmp);
3201 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3202 	hw->fc.high_water[0] = rxpb - size;
3203 
3204 	/* Now calculate Low Water */
3205 	switch (hw->mac.type) {
3206 	case ixgbe_mac_X540:
3207 	case ixgbe_mac_X550:
3208 	case ixgbe_mac_X550EM_x:
3209 	case ixgbe_mac_X550EM_a:
3210 		tmp = IXGBE_LOW_DV_X540(frame);
3211 		break;
3212 	default:
3213 		tmp = IXGBE_LOW_DV(frame);
3214 		break;
3215 	}
3216 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3217 
3218 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3219 	hw->fc.send_xon = TRUE;
3220 } /* ixgbe_config_delay_values */
3221 
3222 /************************************************************************
3223  * ixgbe_set_multi - Multicast Update
3224  *
3225  *   Called whenever multicast address list is updated.
3226  ************************************************************************/
3227 static int
3228 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3229 {
3230 	struct adapter *adapter = arg;
3231 	struct ixgbe_mc_addr *mta = adapter->mta;
3232 
3233 	if (ifma->ifma_addr->sa_family != AF_LINK)
3234 		return (0);
3235 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3236 		return (0);
3237 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3238 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3239 	mta[count].vmdq = adapter->pool;
3240 
3241 	return (1);
3242 } /* ixgbe_mc_filter_apply */
3243 
3244 static void
3245 ixgbe_if_multi_set(if_ctx_t ctx)
3246 {
3247 	struct adapter       *adapter = iflib_get_softc(ctx);
3248 	struct ixgbe_mc_addr *mta;
3249 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3250 	u8                   *update_ptr;
3251 	int                  mcnt = 0;
3252 	u32                  fctrl;
3253 
3254 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3255 
3256 	mta = adapter->mta;
3257 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3258 
3259 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3260 
3261 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3262 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3263 	if (ifp->if_flags & IFF_PROMISC)
3264 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3265 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3266 	    ifp->if_flags & IFF_ALLMULTI) {
3267 		fctrl |= IXGBE_FCTRL_MPE;
3268 		fctrl &= ~IXGBE_FCTRL_UPE;
3269 	} else
3270 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3271 
3272 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3273 
3274 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3275 		update_ptr = (u8 *)mta;
3276 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3277 		    ixgbe_mc_array_itr, TRUE);
3278 	}
3279 
3280 } /* ixgbe_if_multi_set */
3281 
3282 /************************************************************************
3283  * ixgbe_mc_array_itr
3284  *
3285  *   An iterator function needed by the multicast shared code.
3286  *   It feeds the shared code routine the addresses in the
3287  *   array of ixgbe_set_multi() one by one.
3288  ************************************************************************/
3289 static u8 *
3290 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3291 {
3292 	struct ixgbe_mc_addr *mta;
3293 
3294 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3295 	*vmdq = mta->vmdq;
3296 
3297 	*update_ptr = (u8*)(mta + 1);
3298 
3299 	return (mta->addr);
3300 } /* ixgbe_mc_array_itr */
3301 
3302 /************************************************************************
3303  * ixgbe_local_timer - Timer routine
3304  *
3305  *   Checks for link status, updates statistics,
3306  *   and runs the watchdog check.
3307  ************************************************************************/
3308 static void
3309 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3310 {
3311 	struct adapter *adapter = iflib_get_softc(ctx);
3312 
3313 	if (qid != 0)
3314 		return;
3315 
3316 	/* Check for pluggable optics */
3317 	if (adapter->sfp_probe)
3318 		if (!ixgbe_sfp_probe(ctx))
3319 			return; /* Nothing to do */
3320 
3321 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3322 	    &adapter->link_up, 0);
3323 
3324 	/* Fire off the adminq task */
3325 	iflib_admin_intr_deferred(ctx);
3326 
3327 } /* ixgbe_if_timer */
3328 
3329 /************************************************************************
3330  * ixgbe_sfp_probe
3331  *
3332  *   Determine if a port had optics inserted.
3333  ************************************************************************/
3334 static bool
3335 ixgbe_sfp_probe(if_ctx_t ctx)
3336 {
3337 	struct adapter  *adapter = iflib_get_softc(ctx);
3338 	struct ixgbe_hw *hw = &adapter->hw;
3339 	device_t        dev = iflib_get_dev(ctx);
3340 	bool            result = FALSE;
3341 
3342 	if ((hw->phy.type == ixgbe_phy_nl) &&
3343 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3344 		s32 ret = hw->phy.ops.identify_sfp(hw);
3345 		if (ret)
3346 			goto out;
3347 		ret = hw->phy.ops.reset(hw);
3348 		adapter->sfp_probe = FALSE;
3349 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3350 			device_printf(dev, "Unsupported SFP+ module detected!");
3351 			device_printf(dev,
3352 			    "Reload driver with supported module.\n");
3353 			goto out;
3354 		} else
3355 			device_printf(dev, "SFP+ module detected!\n");
3356 		/* We now have supported optics */
3357 		result = TRUE;
3358 	}
3359 out:
3360 
3361 	return (result);
3362 } /* ixgbe_sfp_probe */
3363 
3364 /************************************************************************
3365  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3366  ************************************************************************/
3367 static void
3368 ixgbe_handle_mod(void *context)
3369 {
3370 	if_ctx_t        ctx = context;
3371 	struct adapter  *adapter = iflib_get_softc(ctx);
3372 	struct ixgbe_hw *hw = &adapter->hw;
3373 	device_t        dev = iflib_get_dev(ctx);
3374 	u32             err, cage_full = 0;
3375 
3376 	adapter->sfp_reinit = 1;
3377 	if (adapter->hw.need_crosstalk_fix) {
3378 		switch (hw->mac.type) {
3379 		case ixgbe_mac_82599EB:
3380 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3381 			    IXGBE_ESDP_SDP2;
3382 			break;
3383 		case ixgbe_mac_X550EM_x:
3384 		case ixgbe_mac_X550EM_a:
3385 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3386 			    IXGBE_ESDP_SDP0;
3387 			break;
3388 		default:
3389 			break;
3390 		}
3391 
3392 		if (!cage_full)
3393 			goto handle_mod_out;
3394 	}
3395 
3396 	err = hw->phy.ops.identify_sfp(hw);
3397 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3398 		device_printf(dev,
3399 		    "Unsupported SFP+ module type was detected.\n");
3400 		goto handle_mod_out;
3401 	}
3402 
3403 	if (hw->mac.type == ixgbe_mac_82598EB)
3404 		err = hw->phy.ops.reset(hw);
3405 	else
3406 		err = hw->mac.ops.setup_sfp(hw);
3407 
3408 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3409 		device_printf(dev,
3410 		    "Setup failure - unsupported SFP+ module type.\n");
3411 		goto handle_mod_out;
3412 	}
3413 	GROUPTASK_ENQUEUE(&adapter->msf_task);
3414 	return;
3415 
3416 handle_mod_out:
3417 	adapter->sfp_reinit = 0;
3418 } /* ixgbe_handle_mod */
3419 
3420 
3421 /************************************************************************
3422  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3423  ************************************************************************/
3424 static void
3425 ixgbe_handle_msf(void *context)
3426 {
3427 	if_ctx_t        ctx = context;
3428 	struct adapter  *adapter = iflib_get_softc(ctx);
3429 	struct ixgbe_hw *hw = &adapter->hw;
3430 	u32             autoneg;
3431 	bool            negotiate;
3432 
3433 	if (adapter->sfp_reinit != 1)
3434 		return;
3435 
3436 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3437 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3438 
3439 	autoneg = hw->phy.autoneg_advertised;
3440 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3441 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3442 	if (hw->mac.ops.setup_link)
3443 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3444 
3445 	/* Adjust media types shown in ifconfig */
3446 	ifmedia_removeall(adapter->media);
3447 	ixgbe_add_media_types(adapter->ctx);
3448 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3449 
3450 	adapter->sfp_reinit = 0;
3451 } /* ixgbe_handle_msf */
3452 
3453 /************************************************************************
3454  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3455  ************************************************************************/
3456 static void
3457 ixgbe_handle_phy(void *context)
3458 {
3459 	if_ctx_t        ctx = context;
3460 	struct adapter  *adapter = iflib_get_softc(ctx);
3461 	struct ixgbe_hw *hw = &adapter->hw;
3462 	int             error;
3463 
3464 	error = hw->phy.ops.handle_lasi(hw);
3465 	if (error == IXGBE_ERR_OVERTEMP)
3466 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3467 	else if (error)
3468 		device_printf(adapter->dev,
3469 		    "Error handling LASI interrupt: %d\n", error);
3470 } /* ixgbe_handle_phy */
3471 
3472 /************************************************************************
3473  * ixgbe_if_stop - Stop the hardware
3474  *
3475  *   Disables all traffic on the adapter by issuing a
3476  *   global reset on the MAC and deallocates TX/RX buffers.
3477  ************************************************************************/
3478 static void
3479 ixgbe_if_stop(if_ctx_t ctx)
3480 {
3481 	struct adapter  *adapter = iflib_get_softc(ctx);
3482 	struct ixgbe_hw *hw = &adapter->hw;
3483 
3484 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3485 
3486 	ixgbe_reset_hw(hw);
3487 	hw->adapter_stopped = FALSE;
3488 	ixgbe_stop_adapter(hw);
3489 	if (hw->mac.type == ixgbe_mac_82599EB)
3490 		ixgbe_stop_mac_link_on_d3_82599(hw);
3491 	/* Turn off the laser - noop with no optics */
3492 	ixgbe_disable_tx_laser(hw);
3493 
3494 	/* Update the stack */
3495 	adapter->link_up = FALSE;
3496 	ixgbe_if_update_admin_status(ctx);
3497 
3498 	/* reprogram the RAR[0] in case user changed it. */
3499 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3500 
3501 	return;
3502 } /* ixgbe_if_stop */
3503 
3504 /************************************************************************
3505  * ixgbe_update_link_status - Update OS on link state
3506  *
3507  * Note: Only updates the OS on the cached link state.
3508  *       The real check of the hardware only happens with
3509  *       a link interrupt.
3510  ************************************************************************/
3511 static void
3512 ixgbe_if_update_admin_status(if_ctx_t ctx)
3513 {
3514 	struct adapter *adapter = iflib_get_softc(ctx);
3515 	device_t       dev = iflib_get_dev(ctx);
3516 
3517 	if (adapter->link_up) {
3518 		if (adapter->link_active == FALSE) {
3519 			if (bootverbose)
3520 				device_printf(dev, "Link is up %d Gbps %s \n",
3521 				    ((adapter->link_speed == 128) ? 10 : 1),
3522 				    "Full Duplex");
3523 			adapter->link_active = TRUE;
3524 			/* Update any Flow Control changes */
3525 			ixgbe_fc_enable(&adapter->hw);
3526 			/* Update DMA coalescing config */
3527 			ixgbe_config_dmac(adapter);
3528 			/* should actually be negotiated value */
3529 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3530 
3531 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3532 				ixgbe_ping_all_vfs(adapter);
3533 		}
3534 	} else { /* Link down */
3535 		if (adapter->link_active == TRUE) {
3536 			if (bootverbose)
3537 				device_printf(dev, "Link is Down\n");
3538 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3539 			adapter->link_active = FALSE;
3540 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3541 				ixgbe_ping_all_vfs(adapter);
3542 		}
3543 	}
3544 
3545 	ixgbe_update_stats_counters(adapter);
3546 
3547 	/* Re-enable link interrupts */
3548        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3549 } /* ixgbe_if_update_admin_status */
3550 
3551 /************************************************************************
3552  * ixgbe_config_dmac - Configure DMA Coalescing
3553  ************************************************************************/
3554 static void
3555 ixgbe_config_dmac(struct adapter *adapter)
3556 {
3557 	struct ixgbe_hw          *hw = &adapter->hw;
3558 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3559 
3560 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3561 		return;
3562 
3563 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3564 	    dcfg->link_speed ^ adapter->link_speed) {
3565 		dcfg->watchdog_timer = adapter->dmac;
3566 		dcfg->fcoe_en = FALSE;
3567 		dcfg->link_speed = adapter->link_speed;
3568 		dcfg->num_tcs = 1;
3569 
3570 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3571 		    dcfg->watchdog_timer, dcfg->link_speed);
3572 
3573 		hw->mac.ops.dmac_config(hw);
3574 	}
3575 } /* ixgbe_config_dmac */
3576 
3577 /************************************************************************
3578  * ixgbe_if_enable_intr
3579  ************************************************************************/
3580 void
3581 ixgbe_if_enable_intr(if_ctx_t ctx)
3582 {
3583 	struct adapter     *adapter = iflib_get_softc(ctx);
3584 	struct ixgbe_hw    *hw = &adapter->hw;
3585 	struct ix_rx_queue *que = adapter->rx_queues;
3586 	u32                mask, fwsm;
3587 
3588 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3589 
3590 	switch (adapter->hw.mac.type) {
3591 	case ixgbe_mac_82599EB:
3592 		mask |= IXGBE_EIMS_ECC;
3593 		/* Temperature sensor on some adapters */
3594 		mask |= IXGBE_EIMS_GPI_SDP0;
3595 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3596 		mask |= IXGBE_EIMS_GPI_SDP1;
3597 		mask |= IXGBE_EIMS_GPI_SDP2;
3598 		break;
3599 	case ixgbe_mac_X540:
3600 		/* Detect if Thermal Sensor is enabled */
3601 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3602 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3603 			mask |= IXGBE_EIMS_TS;
3604 		mask |= IXGBE_EIMS_ECC;
3605 		break;
3606 	case ixgbe_mac_X550:
3607 		/* MAC thermal sensor is automatically enabled */
3608 		mask |= IXGBE_EIMS_TS;
3609 		mask |= IXGBE_EIMS_ECC;
3610 		break;
3611 	case ixgbe_mac_X550EM_x:
3612 	case ixgbe_mac_X550EM_a:
3613 		/* Some devices use SDP0 for important information */
3614 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3615 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3616 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3617 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3618 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3619 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3620 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3621 		mask |= IXGBE_EIMS_ECC;
3622 		break;
3623 	default:
3624 		break;
3625 	}
3626 
3627 	/* Enable Fan Failure detection */
3628 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3629 		mask |= IXGBE_EIMS_GPI_SDP1;
3630 	/* Enable SR-IOV */
3631 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3632 		mask |= IXGBE_EIMS_MAILBOX;
3633 	/* Enable Flow Director */
3634 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3635 		mask |= IXGBE_EIMS_FLOW_DIR;
3636 
3637 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3638 
3639 	/* With MSI-X we use auto clear */
3640 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3641 		mask = IXGBE_EIMS_ENABLE_MASK;
3642 		/* Don't autoclear Link */
3643 		mask &= ~IXGBE_EIMS_OTHER;
3644 		mask &= ~IXGBE_EIMS_LSC;
3645 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3646 			mask &= ~IXGBE_EIMS_MAILBOX;
3647 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3648 	}
3649 
3650 	/*
3651 	 * Now enable all queues, this is done separately to
3652 	 * allow for handling the extended (beyond 32) MSI-X
3653 	 * vectors that can be used by 82599
3654 	 */
3655 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3656 		ixgbe_enable_queue(adapter, que->msix);
3657 
3658 	IXGBE_WRITE_FLUSH(hw);
3659 
3660 } /* ixgbe_if_enable_intr */
3661 
3662 /************************************************************************
3663  * ixgbe_disable_intr
3664  ************************************************************************/
3665 static void
3666 ixgbe_if_disable_intr(if_ctx_t ctx)
3667 {
3668 	struct adapter *adapter = iflib_get_softc(ctx);
3669 
3670 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3671 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3672 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3673 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3674 	} else {
3675 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3676 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3677 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3678 	}
3679 	IXGBE_WRITE_FLUSH(&adapter->hw);
3680 
3681 } /* ixgbe_if_disable_intr */
3682 
3683 /************************************************************************
3684  * ixgbe_if_rx_queue_intr_enable
3685  ************************************************************************/
3686 static int
3687 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3688 {
3689 	struct adapter     *adapter = iflib_get_softc(ctx);
3690 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3691 
3692 	ixgbe_enable_queue(adapter, que->rxr.me);
3693 
3694 	return (0);
3695 } /* ixgbe_if_rx_queue_intr_enable */
3696 
3697 /************************************************************************
3698  * ixgbe_enable_queue
3699  ************************************************************************/
3700 static void
3701 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3702 {
3703 	struct ixgbe_hw *hw = &adapter->hw;
3704 	u64             queue = (u64)(1 << vector);
3705 	u32             mask;
3706 
3707 	if (hw->mac.type == ixgbe_mac_82598EB) {
3708 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3709 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3710 	} else {
3711 		mask = (queue & 0xFFFFFFFF);
3712 		if (mask)
3713 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3714 		mask = (queue >> 32);
3715 		if (mask)
3716 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3717 	}
3718 } /* ixgbe_enable_queue */
3719 
3720 /************************************************************************
3721  * ixgbe_disable_queue
3722  ************************************************************************/
3723 static void
3724 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3725 {
3726 	struct ixgbe_hw *hw = &adapter->hw;
3727 	u64             queue = (u64)(1 << vector);
3728 	u32             mask;
3729 
3730 	if (hw->mac.type == ixgbe_mac_82598EB) {
3731 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3732 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3733 	} else {
3734 		mask = (queue & 0xFFFFFFFF);
3735 		if (mask)
3736 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3737 		mask = (queue >> 32);
3738 		if (mask)
3739 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3740 	}
3741 } /* ixgbe_disable_queue */
3742 
3743 /************************************************************************
3744  * ixgbe_intr - Legacy Interrupt Service Routine
3745  ************************************************************************/
3746 int
3747 ixgbe_intr(void *arg)
3748 {
3749 	struct adapter     *adapter = arg;
3750 	struct ix_rx_queue *que = adapter->rx_queues;
3751 	struct ixgbe_hw    *hw = &adapter->hw;
3752 	if_ctx_t           ctx = adapter->ctx;
3753 	u32                eicr, eicr_mask;
3754 
3755 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3756 
3757 	++que->irqs;
3758 	if (eicr == 0) {
3759 		ixgbe_if_enable_intr(ctx);
3760 		return (FILTER_HANDLED);
3761 	}
3762 
3763 	/* Check for fan failure */
3764 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3765 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3766 		device_printf(adapter->dev,
3767 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3768 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3769 	}
3770 
3771 	/* Link status change */
3772 	if (eicr & IXGBE_EICR_LSC) {
3773 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3774 		iflib_admin_intr_deferred(ctx);
3775 	}
3776 
3777 	if (ixgbe_is_sfp(hw)) {
3778 		/* Pluggable optics-related interrupt */
3779 		if (hw->mac.type >= ixgbe_mac_X540)
3780 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3781 		else
3782 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3783 
3784 		if (eicr & eicr_mask) {
3785 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3786 			GROUPTASK_ENQUEUE(&adapter->mod_task);
3787 		}
3788 
3789 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3790 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3791 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3792 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3793 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
3794 				GROUPTASK_ENQUEUE(&adapter->msf_task);
3795 		}
3796 	}
3797 
3798 	/* External PHY interrupt */
3799 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3800 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3801 		GROUPTASK_ENQUEUE(&adapter->phy_task);
3802 
3803 	return (FILTER_SCHEDULE_THREAD);
3804 } /* ixgbe_intr */
3805 
3806 /************************************************************************
3807  * ixgbe_free_pci_resources
3808  ************************************************************************/
3809 static void
3810 ixgbe_free_pci_resources(if_ctx_t ctx)
3811 {
3812 	struct adapter *adapter = iflib_get_softc(ctx);
3813 	struct         ix_rx_queue *que = adapter->rx_queues;
3814 	device_t       dev = iflib_get_dev(ctx);
3815 
3816 	/* Release all msix queue resources */
3817 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3818 		iflib_irq_free(ctx, &adapter->irq);
3819 
3820 	if (que != NULL) {
3821 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3822 			iflib_irq_free(ctx, &que->que_irq);
3823 		}
3824 	}
3825 
3826 	/*
3827 	 * Free link/admin interrupt
3828 	 */
3829 	if (adapter->pci_mem != NULL)
3830 		bus_release_resource(dev, SYS_RES_MEMORY,
3831 		                     PCIR_BAR(0), adapter->pci_mem);
3832 
3833 } /* ixgbe_free_pci_resources */
3834 
3835 /************************************************************************
3836  * ixgbe_sysctl_flowcntl
3837  *
3838  *   SYSCTL wrapper around setting Flow Control
3839  ************************************************************************/
3840 static int
3841 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3842 {
3843 	struct adapter *adapter;
3844 	int            error, fc;
3845 
3846 	adapter = (struct adapter *)arg1;
3847 	fc = adapter->hw.fc.current_mode;
3848 
3849 	error = sysctl_handle_int(oidp, &fc, 0, req);
3850 	if ((error) || (req->newptr == NULL))
3851 		return (error);
3852 
3853 	/* Don't bother if it's not changed */
3854 	if (fc == adapter->hw.fc.current_mode)
3855 		return (0);
3856 
3857 	return ixgbe_set_flowcntl(adapter, fc);
3858 } /* ixgbe_sysctl_flowcntl */
3859 
3860 /************************************************************************
3861  * ixgbe_set_flowcntl - Set flow control
3862  *
3863  *   Flow control values:
3864  *     0 - off
3865  *     1 - rx pause
3866  *     2 - tx pause
3867  *     3 - full
3868  ************************************************************************/
3869 static int
3870 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3871 {
3872 	switch (fc) {
3873 	case ixgbe_fc_rx_pause:
3874 	case ixgbe_fc_tx_pause:
3875 	case ixgbe_fc_full:
3876 		adapter->hw.fc.requested_mode = fc;
3877 		if (adapter->num_rx_queues > 1)
3878 			ixgbe_disable_rx_drop(adapter);
3879 		break;
3880 	case ixgbe_fc_none:
3881 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3882 		if (adapter->num_rx_queues > 1)
3883 			ixgbe_enable_rx_drop(adapter);
3884 		break;
3885 	default:
3886 		return (EINVAL);
3887 	}
3888 
3889 	/* Don't autoneg if forcing a value */
3890 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3891 	ixgbe_fc_enable(&adapter->hw);
3892 
3893 	return (0);
3894 } /* ixgbe_set_flowcntl */
3895 
3896 /************************************************************************
3897  * ixgbe_enable_rx_drop
3898  *
3899  *   Enable the hardware to drop packets when the buffer is
3900  *   full. This is useful with multiqueue, so that no single
3901  *   queue being full stalls the entire RX engine. We only
3902  *   enable this when Multiqueue is enabled AND Flow Control
3903  *   is disabled.
3904  ************************************************************************/
3905 static void
3906 ixgbe_enable_rx_drop(struct adapter *adapter)
3907 {
3908 	struct ixgbe_hw *hw = &adapter->hw;
3909 	struct rx_ring  *rxr;
3910 	u32             srrctl;
3911 
3912 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3913 		rxr = &adapter->rx_queues[i].rxr;
3914 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3915 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3916 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3917 	}
3918 
3919 	/* enable drop for each vf */
3920 	for (int i = 0; i < adapter->num_vfs; i++) {
3921 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3922 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3923 		                IXGBE_QDE_ENABLE));
3924 	}
3925 } /* ixgbe_enable_rx_drop */
3926 
3927 /************************************************************************
3928  * ixgbe_disable_rx_drop
3929  ************************************************************************/
3930 static void
3931 ixgbe_disable_rx_drop(struct adapter *adapter)
3932 {
3933 	struct ixgbe_hw *hw = &adapter->hw;
3934 	struct rx_ring  *rxr;
3935 	u32             srrctl;
3936 
3937 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3938 		rxr = &adapter->rx_queues[i].rxr;
3939 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3940 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3941 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3942 	}
3943 
3944 	/* disable drop for each vf */
3945 	for (int i = 0; i < adapter->num_vfs; i++) {
3946 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3947 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3948 	}
3949 } /* ixgbe_disable_rx_drop */
3950 
3951 /************************************************************************
3952  * ixgbe_sysctl_advertise
3953  *
3954  *   SYSCTL wrapper around setting advertised speed
3955  ************************************************************************/
3956 static int
3957 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3958 {
3959 	struct adapter *adapter;
3960 	int            error, advertise;
3961 
3962 	adapter = (struct adapter *)arg1;
3963 	advertise = adapter->advertise;
3964 
3965 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3966 	if ((error) || (req->newptr == NULL))
3967 		return (error);
3968 
3969 	return ixgbe_set_advertise(adapter, advertise);
3970 } /* ixgbe_sysctl_advertise */
3971 
3972 /************************************************************************
3973  * ixgbe_set_advertise - Control advertised link speed
3974  *
3975  *   Flags:
3976  *     0x1 - advertise 100 Mb
3977  *     0x2 - advertise 1G
3978  *     0x4 - advertise 10G
3979  *     0x8 - advertise 10 Mb (yes, Mb)
3980  ************************************************************************/
3981 static int
3982 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3983 {
3984 	device_t         dev = iflib_get_dev(adapter->ctx);
3985 	struct ixgbe_hw  *hw;
3986 	ixgbe_link_speed speed = 0;
3987 	ixgbe_link_speed link_caps = 0;
3988 	s32              err = IXGBE_NOT_IMPLEMENTED;
3989 	bool             negotiate = FALSE;
3990 
3991 	/* Checks to validate new value */
3992 	if (adapter->advertise == advertise) /* no change */
3993 		return (0);
3994 
3995 	hw = &adapter->hw;
3996 
3997 	/* No speed changes for backplane media */
3998 	if (hw->phy.media_type == ixgbe_media_type_backplane)
3999 		return (ENODEV);
4000 
4001 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4002 	      (hw->phy.multispeed_fiber))) {
4003 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4004 		return (EINVAL);
4005 	}
4006 
4007 	if (advertise < 0x1 || advertise > 0xF) {
4008 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4009 		return (EINVAL);
4010 	}
4011 
4012 	if (hw->mac.ops.get_link_capabilities) {
4013 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4014 		    &negotiate);
4015 		if (err != IXGBE_SUCCESS) {
4016 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4017 			return (ENODEV);
4018 		}
4019 	}
4020 
4021 	/* Set new value and report new advertised mode */
4022 	if (advertise & 0x1) {
4023 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4024 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4025 			return (EINVAL);
4026 		}
4027 		speed |= IXGBE_LINK_SPEED_100_FULL;
4028 	}
4029 	if (advertise & 0x2) {
4030 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4031 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4032 			return (EINVAL);
4033 		}
4034 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4035 	}
4036 	if (advertise & 0x4) {
4037 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4038 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4039 			return (EINVAL);
4040 		}
4041 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4042 	}
4043 	if (advertise & 0x8) {
4044 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4045 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4046 			return (EINVAL);
4047 		}
4048 		speed |= IXGBE_LINK_SPEED_10_FULL;
4049 	}
4050 
4051 	hw->mac.autotry_restart = TRUE;
4052 	hw->mac.ops.setup_link(hw, speed, TRUE);
4053 	adapter->advertise = advertise;
4054 
4055 	return (0);
4056 } /* ixgbe_set_advertise */
4057 
4058 /************************************************************************
4059  * ixgbe_get_advertise - Get current advertised speed settings
4060  *
4061  *   Formatted for sysctl usage.
4062  *   Flags:
4063  *     0x1 - advertise 100 Mb
4064  *     0x2 - advertise 1G
4065  *     0x4 - advertise 10G
4066  *     0x8 - advertise 10 Mb (yes, Mb)
4067  ************************************************************************/
4068 static int
4069 ixgbe_get_advertise(struct adapter *adapter)
4070 {
4071 	struct ixgbe_hw  *hw = &adapter->hw;
4072 	int              speed;
4073 	ixgbe_link_speed link_caps = 0;
4074 	s32              err;
4075 	bool             negotiate = FALSE;
4076 
4077 	/*
4078 	 * Advertised speed means nothing unless it's copper or
4079 	 * multi-speed fiber
4080 	 */
4081 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4082 	    !(hw->phy.multispeed_fiber))
4083 		return (0);
4084 
4085 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4086 	if (err != IXGBE_SUCCESS)
4087 		return (0);
4088 
4089 	speed =
4090 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4091 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4092 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4093 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4094 
4095 	return speed;
4096 } /* ixgbe_get_advertise */
4097 
4098 /************************************************************************
4099  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4100  *
4101  *   Control values:
4102  *     0/1 - off / on (use default value of 1000)
4103  *
4104  *     Legal timer values are:
4105  *     50,100,250,500,1000,2000,5000,10000
4106  *
4107  *     Turning off interrupt moderation will also turn this off.
4108  ************************************************************************/
4109 static int
4110 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4111 {
4112 	struct adapter *adapter = (struct adapter *)arg1;
4113 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4114 	int            error;
4115 	u16            newval;
4116 
4117 	newval = adapter->dmac;
4118 	error = sysctl_handle_16(oidp, &newval, 0, req);
4119 	if ((error) || (req->newptr == NULL))
4120 		return (error);
4121 
4122 	switch (newval) {
4123 	case 0:
4124 		/* Disabled */
4125 		adapter->dmac = 0;
4126 		break;
4127 	case 1:
4128 		/* Enable and use default */
4129 		adapter->dmac = 1000;
4130 		break;
4131 	case 50:
4132 	case 100:
4133 	case 250:
4134 	case 500:
4135 	case 1000:
4136 	case 2000:
4137 	case 5000:
4138 	case 10000:
4139 		/* Legal values - allow */
4140 		adapter->dmac = newval;
4141 		break;
4142 	default:
4143 		/* Do nothing, illegal value */
4144 		return (EINVAL);
4145 	}
4146 
4147 	/* Re-initialize hardware if it's already running */
4148 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4149 		ifp->if_init(ifp);
4150 
4151 	return (0);
4152 } /* ixgbe_sysctl_dmac */
4153 
4154 #ifdef IXGBE_DEBUG
4155 /************************************************************************
4156  * ixgbe_sysctl_power_state
4157  *
4158  *   Sysctl to test power states
4159  *   Values:
4160  *     0      - set device to D0
4161  *     3      - set device to D3
4162  *     (none) - get current device power state
4163  ************************************************************************/
4164 static int
4165 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4166 {
4167 	struct adapter *adapter = (struct adapter *)arg1;
4168 	device_t       dev = adapter->dev;
4169 	int            curr_ps, new_ps, error = 0;
4170 
4171 	curr_ps = new_ps = pci_get_powerstate(dev);
4172 
4173 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4174 	if ((error) || (req->newptr == NULL))
4175 		return (error);
4176 
4177 	if (new_ps == curr_ps)
4178 		return (0);
4179 
4180 	if (new_ps == 3 && curr_ps == 0)
4181 		error = DEVICE_SUSPEND(dev);
4182 	else if (new_ps == 0 && curr_ps == 3)
4183 		error = DEVICE_RESUME(dev);
4184 	else
4185 		return (EINVAL);
4186 
4187 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4188 
4189 	return (error);
4190 } /* ixgbe_sysctl_power_state */
4191 #endif
4192 
4193 /************************************************************************
4194  * ixgbe_sysctl_wol_enable
4195  *
4196  *   Sysctl to enable/disable the WoL capability,
4197  *   if supported by the adapter.
4198  *
4199  *   Values:
4200  *     0 - disabled
4201  *     1 - enabled
4202  ************************************************************************/
4203 static int
4204 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4205 {
4206 	struct adapter  *adapter = (struct adapter *)arg1;
4207 	struct ixgbe_hw *hw = &adapter->hw;
4208 	int             new_wol_enabled;
4209 	int             error = 0;
4210 
4211 	new_wol_enabled = hw->wol_enabled;
4212 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4213 	if ((error) || (req->newptr == NULL))
4214 		return (error);
4215 	new_wol_enabled = !!(new_wol_enabled);
4216 	if (new_wol_enabled == hw->wol_enabled)
4217 		return (0);
4218 
4219 	if (new_wol_enabled > 0 && !adapter->wol_support)
4220 		return (ENODEV);
4221 	else
4222 		hw->wol_enabled = new_wol_enabled;
4223 
4224 	return (0);
4225 } /* ixgbe_sysctl_wol_enable */
4226 
4227 /************************************************************************
4228  * ixgbe_sysctl_wufc - Wake Up Filter Control
4229  *
4230  *   Sysctl to enable/disable the types of packets that the
4231  *   adapter will wake up on upon receipt.
4232  *   Flags:
4233  *     0x1  - Link Status Change
4234  *     0x2  - Magic Packet
4235  *     0x4  - Direct Exact
4236  *     0x8  - Directed Multicast
4237  *     0x10 - Broadcast
4238  *     0x20 - ARP/IPv4 Request Packet
4239  *     0x40 - Direct IPv4 Packet
4240  *     0x80 - Direct IPv6 Packet
4241  *
4242  *   Settings not listed above will cause the sysctl to return an error.
4243  ************************************************************************/
4244 static int
4245 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4246 {
4247 	struct adapter *adapter = (struct adapter *)arg1;
4248 	int            error = 0;
4249 	u32            new_wufc;
4250 
4251 	new_wufc = adapter->wufc;
4252 
4253 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4254 	if ((error) || (req->newptr == NULL))
4255 		return (error);
4256 	if (new_wufc == adapter->wufc)
4257 		return (0);
4258 
4259 	if (new_wufc & 0xffffff00)
4260 		return (EINVAL);
4261 
4262 	new_wufc &= 0xff;
4263 	new_wufc |= (0xffffff & adapter->wufc);
4264 	adapter->wufc = new_wufc;
4265 
4266 	return (0);
4267 } /* ixgbe_sysctl_wufc */
4268 
4269 #ifdef IXGBE_DEBUG
4270 /************************************************************************
4271  * ixgbe_sysctl_print_rss_config
4272  ************************************************************************/
4273 static int
4274 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4275 {
4276 	struct adapter  *adapter = (struct adapter *)arg1;
4277 	struct ixgbe_hw *hw = &adapter->hw;
4278 	device_t        dev = adapter->dev;
4279 	struct sbuf     *buf;
4280 	int             error = 0, reta_size;
4281 	u32             reg;
4282 
4283 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4284 	if (!buf) {
4285 		device_printf(dev, "Could not allocate sbuf for output.\n");
4286 		return (ENOMEM);
4287 	}
4288 
4289 	// TODO: use sbufs to make a string to print out
4290 	/* Set multiplier for RETA setup and table size based on MAC */
4291 	switch (adapter->hw.mac.type) {
4292 	case ixgbe_mac_X550:
4293 	case ixgbe_mac_X550EM_x:
4294 	case ixgbe_mac_X550EM_a:
4295 		reta_size = 128;
4296 		break;
4297 	default:
4298 		reta_size = 32;
4299 		break;
4300 	}
4301 
4302 	/* Print out the redirection table */
4303 	sbuf_cat(buf, "\n");
4304 	for (int i = 0; i < reta_size; i++) {
4305 		if (i < 32) {
4306 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4307 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4308 		} else {
4309 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4310 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4311 		}
4312 	}
4313 
4314 	// TODO: print more config
4315 
4316 	error = sbuf_finish(buf);
4317 	if (error)
4318 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4319 
4320 	sbuf_delete(buf);
4321 
4322 	return (0);
4323 } /* ixgbe_sysctl_print_rss_config */
4324 #endif /* IXGBE_DEBUG */
4325 
4326 /************************************************************************
4327  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4328  *
4329  *   For X552/X557-AT devices using an external PHY
4330  ************************************************************************/
4331 static int
4332 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4333 {
4334 	struct adapter  *adapter = (struct adapter *)arg1;
4335 	struct ixgbe_hw *hw = &adapter->hw;
4336 	u16             reg;
4337 
4338 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4339 		device_printf(iflib_get_dev(adapter->ctx),
4340 		    "Device has no supported external thermal sensor.\n");
4341 		return (ENODEV);
4342 	}
4343 
4344 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4345 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4346 		device_printf(iflib_get_dev(adapter->ctx),
4347 		    "Error reading from PHY's current temperature register\n");
4348 		return (EAGAIN);
4349 	}
4350 
4351 	/* Shift temp for output */
4352 	reg = reg >> 8;
4353 
4354 	return (sysctl_handle_16(oidp, NULL, reg, req));
4355 } /* ixgbe_sysctl_phy_temp */
4356 
4357 /************************************************************************
4358  * ixgbe_sysctl_phy_overtemp_occurred
4359  *
4360  *   Reports (directly from the PHY) whether the current PHY
4361  *   temperature is over the overtemp threshold.
4362  ************************************************************************/
4363 static int
4364 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4365 {
4366 	struct adapter  *adapter = (struct adapter *)arg1;
4367 	struct ixgbe_hw *hw = &adapter->hw;
4368 	u16             reg;
4369 
4370 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4371 		device_printf(iflib_get_dev(adapter->ctx),
4372 		    "Device has no supported external thermal sensor.\n");
4373 		return (ENODEV);
4374 	}
4375 
4376 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4377 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4378 		device_printf(iflib_get_dev(adapter->ctx),
4379 		    "Error reading from PHY's temperature status register\n");
4380 		return (EAGAIN);
4381 	}
4382 
4383 	/* Get occurrence bit */
4384 	reg = !!(reg & 0x4000);
4385 
4386 	return (sysctl_handle_16(oidp, 0, reg, req));
4387 } /* ixgbe_sysctl_phy_overtemp_occurred */
4388 
4389 /************************************************************************
4390  * ixgbe_sysctl_eee_state
4391  *
4392  *   Sysctl to set EEE power saving feature
4393  *   Values:
4394  *     0      - disable EEE
4395  *     1      - enable EEE
4396  *     (none) - get current device EEE state
4397  ************************************************************************/
4398 static int
4399 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4400 {
4401 	struct adapter *adapter = (struct adapter *)arg1;
4402 	device_t       dev = adapter->dev;
4403 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4404 	int            curr_eee, new_eee, error = 0;
4405 	s32            retval;
4406 
4407 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4408 
4409 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4410 	if ((error) || (req->newptr == NULL))
4411 		return (error);
4412 
4413 	/* Nothing to do */
4414 	if (new_eee == curr_eee)
4415 		return (0);
4416 
4417 	/* Not supported */
4418 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4419 		return (EINVAL);
4420 
4421 	/* Bounds checking */
4422 	if ((new_eee < 0) || (new_eee > 1))
4423 		return (EINVAL);
4424 
4425 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4426 	if (retval) {
4427 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4428 		return (EINVAL);
4429 	}
4430 
4431 	/* Restart auto-neg */
4432 	ifp->if_init(ifp);
4433 
4434 	device_printf(dev, "New EEE state: %d\n", new_eee);
4435 
4436 	/* Cache new value */
4437 	if (new_eee)
4438 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4439 	else
4440 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4441 
4442 	return (error);
4443 } /* ixgbe_sysctl_eee_state */
4444 
4445 /************************************************************************
4446  * ixgbe_init_device_features
4447  ************************************************************************/
4448 static void
4449 ixgbe_init_device_features(struct adapter *adapter)
4450 {
4451 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4452 	                  | IXGBE_FEATURE_RSS
4453 	                  | IXGBE_FEATURE_MSI
4454 	                  | IXGBE_FEATURE_MSIX
4455 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4456 
4457 	/* Set capabilities first... */
4458 	switch (adapter->hw.mac.type) {
4459 	case ixgbe_mac_82598EB:
4460 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4461 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4462 		break;
4463 	case ixgbe_mac_X540:
4464 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4465 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4466 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4467 		    (adapter->hw.bus.func == 0))
4468 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4469 		break;
4470 	case ixgbe_mac_X550:
4471 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4472 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4473 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4474 		break;
4475 	case ixgbe_mac_X550EM_x:
4476 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4477 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4478 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4479 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4480 		break;
4481 	case ixgbe_mac_X550EM_a:
4482 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4483 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4484 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4485 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4486 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4487 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4488 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4489 		}
4490 		break;
4491 	case ixgbe_mac_82599EB:
4492 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4493 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4494 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4495 		    (adapter->hw.bus.func == 0))
4496 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4497 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4498 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4499 		break;
4500 	default:
4501 		break;
4502 	}
4503 
4504 	/* Enabled by default... */
4505 	/* Fan failure detection */
4506 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4507 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4508 	/* Netmap */
4509 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4510 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4511 	/* EEE */
4512 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4513 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4514 	/* Thermal Sensor */
4515 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4516 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4517 
4518 	/* Enabled via global sysctl... */
4519 	/* Flow Director */
4520 	if (ixgbe_enable_fdir) {
4521 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4522 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4523 		else
4524 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4525 	}
4526 	/*
4527 	 * Message Signal Interrupts - Extended (MSI-X)
4528 	 * Normal MSI is only enabled if MSI-X calls fail.
4529 	 */
4530 	if (!ixgbe_enable_msix)
4531 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4532 	/* Receive-Side Scaling (RSS) */
4533 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4534 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4535 
4536 	/* Disable features with unmet dependencies... */
4537 	/* No MSI-X */
4538 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4539 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4540 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4541 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4542 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4543 	}
4544 } /* ixgbe_init_device_features */
4545 
4546 /************************************************************************
4547  * ixgbe_check_fan_failure
4548  ************************************************************************/
4549 static void
4550 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4551 {
4552 	u32 mask;
4553 
4554 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4555 	    IXGBE_ESDP_SDP1;
4556 
4557 	if (reg & mask)
4558 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4559 } /* ixgbe_check_fan_failure */
4560