xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 74ca7bf1d4c7173d5575ba168bc4b5f6d181ff5a)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
124 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
125 static int  ixgbe_if_media_change(if_ctx_t ctx);
126 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
127 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
128 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
129 static void ixgbe_if_multi_set(if_ctx_t ctx);
130 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
131 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
132                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
133 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
134                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
135 static void ixgbe_if_queues_free(if_ctx_t ctx);
136 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
137 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
138 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
139 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
140 
141 int ixgbe_intr(void *arg);
142 
143 /************************************************************************
144  * Function prototypes
145  ************************************************************************/
146 #if __FreeBSD_version >= 1100036
147 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
148 #endif
149 
150 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
151 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
153 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
154 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
155 
156 static void ixgbe_config_dmac(struct adapter *adapter);
157 static void ixgbe_configure_ivars(struct adapter *adapter);
158 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
159                            s8 type);
160 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
161 static bool ixgbe_sfp_probe(if_ctx_t ctx);
162 
163 static void ixgbe_free_pci_resources(if_ctx_t ctx);
164 
165 static int  ixgbe_msix_link(void *arg);
166 static int  ixgbe_msix_que(void *arg);
167 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
168 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
169 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
170 
171 static int  ixgbe_setup_interface(if_ctx_t ctx);
172 static void ixgbe_init_device_features(struct adapter *adapter);
173 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
174 static void ixgbe_add_media_types(if_ctx_t ctx);
175 static void ixgbe_update_stats_counters(struct adapter *adapter);
176 static void ixgbe_config_link(struct adapter *adapter);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static void ixgbe_check_wol_support(struct adapter *adapter);
179 static void ixgbe_enable_rx_drop(struct adapter *);
180 static void ixgbe_disable_rx_drop(struct adapter *);
181 
182 static void ixgbe_add_hw_stats(struct adapter *adapter);
183 static int  ixgbe_set_flowcntl(struct adapter *, int);
184 static int  ixgbe_set_advertise(struct adapter *, int);
185 static int  ixgbe_get_advertise(struct adapter *);
186 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
187 static void ixgbe_config_gpie(struct adapter *adapter);
188 static void ixgbe_config_delay_values(struct adapter *adapter);
189 
190 /* Sysctl handlers */
191 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
192 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
197 #ifdef IXGBE_DEBUG
198 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
199 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
200 #endif
201 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
202 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
208 
209 /* Deferred interrupt tasklets */
210 static void ixgbe_handle_msf(void *);
211 static void ixgbe_handle_mod(void *);
212 static void ixgbe_handle_phy(void *);
213 
214 /************************************************************************
215  *  FreeBSD Device Interface Entry Points
216  ************************************************************************/
217 static device_method_t ix_methods[] = {
218 	/* Device interface */
219 	DEVMETHOD(device_register, ixgbe_register),
220 	DEVMETHOD(device_probe, iflib_device_probe),
221 	DEVMETHOD(device_attach, iflib_device_attach),
222 	DEVMETHOD(device_detach, iflib_device_detach),
223 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
224 	DEVMETHOD(device_suspend, iflib_device_suspend),
225 	DEVMETHOD(device_resume, iflib_device_resume),
226 #ifdef PCI_IOV
227 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
228 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
229 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
230 #endif /* PCI_IOV */
231 	DEVMETHOD_END
232 };
233 
234 static driver_t ix_driver = {
235 	"ix", ix_methods, sizeof(struct adapter),
236 };
237 
238 devclass_t ix_devclass;
239 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
240 
241 MODULE_DEPEND(ix, pci, 1, 1, 1);
242 MODULE_DEPEND(ix, ether, 1, 1, 1);
243 MODULE_DEPEND(ix, iflib, 1, 1, 1);
244 
245 static device_method_t ixgbe_if_methods[] = {
246 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
247 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
248 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
249 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
250 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
251 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
252 	DEVMETHOD(ifdi_init, ixgbe_if_init),
253 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
254 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
255 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
256 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
257 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
259 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
260 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
261 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
262 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
263 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
264 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
265 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
266 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
267 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
268 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
269 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
270 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
271 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
272 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
273 #ifdef PCI_IOV
274 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
275 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
276 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
277 #endif /* PCI_IOV */
278 	DEVMETHOD_END
279 };
280 
281 /*
282  * TUNEABLE PARAMETERS:
283  */
284 
285 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
286 static driver_t ixgbe_if_driver = {
287   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
288 };
289 
290 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
291 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
292     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
293 
294 /* Flow control setting, default to full */
295 static int ixgbe_flow_control = ixgbe_fc_full;
296 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
297     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
298 
299 /* Advertise Speed, default to 0 (auto) */
300 static int ixgbe_advertise_speed = 0;
301 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
302     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
303 
304 /*
305  * Smart speed setting, default to on
306  * this only works as a compile option
307  * right now as its during attach, set
308  * this to 'ixgbe_smart_speed_off' to
309  * disable.
310  */
311 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
312 
313 /*
314  * MSI-X should be the default for best performance,
315  * but this allows it to be forced off for testing.
316  */
317 static int ixgbe_enable_msix = 1;
318 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
319     "Enable MSI-X interrupts");
320 
321 /*
322  * Defining this on will allow the use
323  * of unsupported SFP+ modules, note that
324  * doing so you are on your own :)
325  */
326 static int allow_unsupported_sfp = FALSE;
327 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
328     &allow_unsupported_sfp, 0,
329     "Allow unsupported SFP modules...use at your own risk");
330 
331 /*
332  * Not sure if Flow Director is fully baked,
333  * so we'll default to turning it off.
334  */
335 static int ixgbe_enable_fdir = 0;
336 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
337     "Enable Flow Director");
338 
339 /* Receive-Side Scaling */
340 static int ixgbe_enable_rss = 1;
341 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
342     "Enable Receive-Side Scaling (RSS)");
343 
344 #if 0
345 /* Keep running tab on them for sanity check */
346 static int ixgbe_total_ports;
347 #endif
348 
349 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
350 
351 /*
352  * For Flow Director: this is the number of TX packets we sample
353  * for the filter pool, this means every 20th packet will be probed.
354  *
355  * This feature can be disabled by setting this to 0.
356  */
357 static int atr_sample_rate = 20;
358 
359 extern struct if_txrx ixgbe_txrx;
360 
361 static struct if_shared_ctx ixgbe_sctx_init = {
362 	.isc_magic = IFLIB_MAGIC,
363 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
364 	.isc_tx_maxsize = IXGBE_TSO_SIZE,
365 
366 	.isc_tx_maxsegsize = PAGE_SIZE,
367 
368 	.isc_rx_maxsize = PAGE_SIZE*4,
369 	.isc_rx_nsegments = 1,
370 	.isc_rx_maxsegsize = PAGE_SIZE*4,
371 	.isc_nfl = 1,
372 	.isc_ntxqs = 1,
373 	.isc_nrxqs = 1,
374 
375 	.isc_admin_intrcnt = 1,
376 	.isc_vendor_info = ixgbe_vendor_info_array,
377 	.isc_driver_version = ixgbe_driver_version,
378 	.isc_driver = &ixgbe_if_driver,
379 
380 	.isc_nrxd_min = {MIN_RXD},
381 	.isc_ntxd_min = {MIN_TXD},
382 	.isc_nrxd_max = {MAX_RXD},
383 	.isc_ntxd_max = {MAX_TXD},
384 	.isc_nrxd_default = {DEFAULT_RXD},
385 	.isc_ntxd_default = {DEFAULT_TXD},
386 };
387 
388 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
389 
390 /************************************************************************
391  * ixgbe_if_tx_queues_alloc
392  ************************************************************************/
393 static int
394 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
395                          int ntxqs, int ntxqsets)
396 {
397 	struct adapter     *adapter = iflib_get_softc(ctx);
398 	if_softc_ctx_t     scctx = adapter->shared;
399 	struct ix_tx_queue *que;
400 	int                i, j, error;
401 
402 	MPASS(adapter->num_tx_queues > 0);
403 	MPASS(adapter->num_tx_queues == ntxqsets);
404 	MPASS(ntxqs == 1);
405 
406 	/* Allocate queue structure memory */
407 	adapter->tx_queues =
408 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
409 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
410 	if (!adapter->tx_queues) {
411 		device_printf(iflib_get_dev(ctx),
412 		    "Unable to allocate TX ring memory\n");
413 		return (ENOMEM);
414 	}
415 
416 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
417 		struct tx_ring *txr = &que->txr;
418 
419 		/* In case SR-IOV is enabled, align the index properly */
420 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
421 		    i);
422 
423 		txr->adapter = que->adapter = adapter;
424 		adapter->active_queues |= (u64)1 << txr->me;
425 
426 		/* Allocate report status array */
427 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
428 		if (txr->tx_rsq == NULL) {
429 			error = ENOMEM;
430 			goto fail;
431 		}
432 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
433 			txr->tx_rsq[j] = QIDX_INVALID;
434 		/* get the virtual and physical address of the hardware queues */
435 		txr->tail = IXGBE_TDT(txr->me);
436 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
437 		txr->tx_paddr = paddrs[i];
438 
439 		txr->bytes = 0;
440 		txr->total_packets = 0;
441 
442 		/* Set the rate at which we sample packets */
443 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
444 			txr->atr_sample = atr_sample_rate;
445 
446 	}
447 
448 	iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod,
449 	    "mod_task");
450 	iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf,
451 	    "msf_task");
452 	iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy,
453 	    "phy_task");
454 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
455 		iflib_config_gtask_init(ctx, &adapter->mbx_task,
456 		    ixgbe_handle_mbx, "mbx_task");
457 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
458 		iflib_config_gtask_init(ctx, &adapter->fdir_task,
459 		    ixgbe_reinit_fdir, "fdir_task");
460 
461 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
462 	    adapter->num_tx_queues);
463 
464 	return (0);
465 
466 fail:
467 	ixgbe_if_queues_free(ctx);
468 
469 	return (error);
470 } /* ixgbe_if_tx_queues_alloc */
471 
472 /************************************************************************
473  * ixgbe_if_rx_queues_alloc
474  ************************************************************************/
475 static int
476 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
477                          int nrxqs, int nrxqsets)
478 {
479 	struct adapter     *adapter = iflib_get_softc(ctx);
480 	struct ix_rx_queue *que;
481 	int                i;
482 
483 	MPASS(adapter->num_rx_queues > 0);
484 	MPASS(adapter->num_rx_queues == nrxqsets);
485 	MPASS(nrxqs == 1);
486 
487 	/* Allocate queue structure memory */
488 	adapter->rx_queues =
489 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
490 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
491 	if (!adapter->rx_queues) {
492 		device_printf(iflib_get_dev(ctx),
493 		    "Unable to allocate TX ring memory\n");
494 		return (ENOMEM);
495 	}
496 
497 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
498 		struct rx_ring *rxr = &que->rxr;
499 
500 		/* In case SR-IOV is enabled, align the index properly */
501 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
502 		    i);
503 
504 		rxr->adapter = que->adapter = adapter;
505 
506 		/* get the virtual and physical address of the hw queues */
507 		rxr->tail = IXGBE_RDT(rxr->me);
508 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
509 		rxr->rx_paddr = paddrs[i];
510 		rxr->bytes = 0;
511 		rxr->que = que;
512 	}
513 
514 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
515 	    adapter->num_rx_queues);
516 
517 	return (0);
518 } /* ixgbe_if_rx_queues_alloc */
519 
520 /************************************************************************
521  * ixgbe_if_queues_free
522  ************************************************************************/
523 static void
524 ixgbe_if_queues_free(if_ctx_t ctx)
525 {
526 	struct adapter     *adapter = iflib_get_softc(ctx);
527 	struct ix_tx_queue *tx_que = adapter->tx_queues;
528 	struct ix_rx_queue *rx_que = adapter->rx_queues;
529 	int                i;
530 
531 	if (tx_que != NULL) {
532 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
533 			struct tx_ring *txr = &tx_que->txr;
534 			if (txr->tx_rsq == NULL)
535 				break;
536 
537 			free(txr->tx_rsq, M_IXGBE);
538 			txr->tx_rsq = NULL;
539 		}
540 
541 		free(adapter->tx_queues, M_IXGBE);
542 		adapter->tx_queues = NULL;
543 	}
544 	if (rx_que != NULL) {
545 		free(adapter->rx_queues, M_IXGBE);
546 		adapter->rx_queues = NULL;
547 	}
548 } /* ixgbe_if_queues_free */
549 
550 /************************************************************************
551  * ixgbe_initialize_rss_mapping
552  ************************************************************************/
553 static void
554 ixgbe_initialize_rss_mapping(struct adapter *adapter)
555 {
556 	struct ixgbe_hw *hw = &adapter->hw;
557 	u32             reta = 0, mrqc, rss_key[10];
558 	int             queue_id, table_size, index_mult;
559 	int             i, j;
560 	u32             rss_hash_config;
561 
562 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
563 		/* Fetch the configured RSS key */
564 		rss_getkey((uint8_t *)&rss_key);
565 	} else {
566 		/* set up random bits */
567 		arc4rand(&rss_key, sizeof(rss_key), 0);
568 	}
569 
570 	/* Set multiplier for RETA setup and table size based on MAC */
571 	index_mult = 0x1;
572 	table_size = 128;
573 	switch (adapter->hw.mac.type) {
574 	case ixgbe_mac_82598EB:
575 		index_mult = 0x11;
576 		break;
577 	case ixgbe_mac_X550:
578 	case ixgbe_mac_X550EM_x:
579 	case ixgbe_mac_X550EM_a:
580 		table_size = 512;
581 		break;
582 	default:
583 		break;
584 	}
585 
586 	/* Set up the redirection table */
587 	for (i = 0, j = 0; i < table_size; i++, j++) {
588 		if (j == adapter->num_rx_queues)
589 			j = 0;
590 
591 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
592 			/*
593 			 * Fetch the RSS bucket id for the given indirection
594 			 * entry. Cap it at the number of configured buckets
595 			 * (which is num_rx_queues.)
596 			 */
597 			queue_id = rss_get_indirection_to_bucket(i);
598 			queue_id = queue_id % adapter->num_rx_queues;
599 		} else
600 			queue_id = (j * index_mult);
601 
602 		/*
603 		 * The low 8 bits are for hash value (n+0);
604 		 * The next 8 bits are for hash value (n+1), etc.
605 		 */
606 		reta = reta >> 8;
607 		reta = reta | (((uint32_t)queue_id) << 24);
608 		if ((i & 3) == 3) {
609 			if (i < 128)
610 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
611 			else
612 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
613 				    reta);
614 			reta = 0;
615 		}
616 	}
617 
618 	/* Now fill our hash function seeds */
619 	for (i = 0; i < 10; i++)
620 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
621 
622 	/* Perform hash on these packet types */
623 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
624 		rss_hash_config = rss_gethashconfig();
625 	else {
626 		/*
627 		 * Disable UDP - IP fragments aren't currently being handled
628 		 * and so we end up with a mix of 2-tuple and 4-tuple
629 		 * traffic.
630 		 */
631 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
632 		                | RSS_HASHTYPE_RSS_TCP_IPV4
633 		                | RSS_HASHTYPE_RSS_IPV6
634 		                | RSS_HASHTYPE_RSS_TCP_IPV6
635 		                | RSS_HASHTYPE_RSS_IPV6_EX
636 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
637 	}
638 
639 	mrqc = IXGBE_MRQC_RSSEN;
640 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
641 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
642 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
643 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
644 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
645 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
646 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
647 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
648 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
649 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
650 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
651 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
652 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
653 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
654 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
655 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
656 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
657 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
658 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
659 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
660 } /* ixgbe_initialize_rss_mapping */
661 
662 /************************************************************************
663  * ixgbe_initialize_receive_units - Setup receive registers and features.
664  ************************************************************************/
665 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
666 
667 static void
668 ixgbe_initialize_receive_units(if_ctx_t ctx)
669 {
670 	struct adapter     *adapter = iflib_get_softc(ctx);
671 	if_softc_ctx_t     scctx = adapter->shared;
672 	struct ixgbe_hw    *hw = &adapter->hw;
673 	struct ifnet       *ifp = iflib_get_ifp(ctx);
674 	struct ix_rx_queue *que;
675 	int                i, j;
676 	u32                bufsz, fctrl, srrctl, rxcsum;
677 	u32                hlreg;
678 
679 	/*
680 	 * Make sure receives are disabled while
681 	 * setting up the descriptor ring
682 	 */
683 	ixgbe_disable_rx(hw);
684 
685 	/* Enable broadcasts */
686 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
687 	fctrl |= IXGBE_FCTRL_BAM;
688 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
689 		fctrl |= IXGBE_FCTRL_DPF;
690 		fctrl |= IXGBE_FCTRL_PMCF;
691 	}
692 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
693 
694 	/* Set for Jumbo Frames? */
695 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
696 	if (ifp->if_mtu > ETHERMTU)
697 		hlreg |= IXGBE_HLREG0_JUMBOEN;
698 	else
699 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
700 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
701 
702 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
703 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
704 
705 	/* Setup the Base and Length of the Rx Descriptor Ring */
706 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
707 		struct rx_ring *rxr = &que->rxr;
708 		u64            rdba = rxr->rx_paddr;
709 
710 		j = rxr->me;
711 
712 		/* Setup the Base and Length of the Rx Descriptor Ring */
713 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
714 		    (rdba & 0x00000000ffffffffULL));
715 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
716 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
717 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
718 
719 		/* Set up the SRRCTL register */
720 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
721 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
722 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
723 		srrctl |= bufsz;
724 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
725 
726 		/*
727 		 * Set DROP_EN iff we have no flow control and >1 queue.
728 		 * Note that srrctl was cleared shortly before during reset,
729 		 * so we do not need to clear the bit, but do it just in case
730 		 * this code is moved elsewhere.
731 		 */
732 		if (adapter->num_rx_queues > 1 &&
733 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
734 			srrctl |= IXGBE_SRRCTL_DROP_EN;
735 		} else {
736 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
737 		}
738 
739 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
740 
741 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
742 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
743 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
744 
745 		/* Set the driver rx tail address */
746 		rxr->tail =  IXGBE_RDT(rxr->me);
747 	}
748 
749 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
750 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
751 		            | IXGBE_PSRTYPE_UDPHDR
752 		            | IXGBE_PSRTYPE_IPV4HDR
753 		            | IXGBE_PSRTYPE_IPV6HDR;
754 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
755 	}
756 
757 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
758 
759 	ixgbe_initialize_rss_mapping(adapter);
760 
761 	if (adapter->num_rx_queues > 1) {
762 		/* RSS and RX IPP Checksum are mutually exclusive */
763 		rxcsum |= IXGBE_RXCSUM_PCSD;
764 	}
765 
766 	if (ifp->if_capenable & IFCAP_RXCSUM)
767 		rxcsum |= IXGBE_RXCSUM_PCSD;
768 
769 	/* This is useful for calculating UDP/IP fragment checksums */
770 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
771 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
772 
773 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
774 
775 } /* ixgbe_initialize_receive_units */
776 
777 /************************************************************************
778  * ixgbe_initialize_transmit_units - Enable transmit units.
779  ************************************************************************/
780 static void
781 ixgbe_initialize_transmit_units(if_ctx_t ctx)
782 {
783 	struct adapter     *adapter = iflib_get_softc(ctx);
784 	struct ixgbe_hw    *hw = &adapter->hw;
785 	if_softc_ctx_t     scctx = adapter->shared;
786 	struct ix_tx_queue *que;
787 	int i;
788 
789 	/* Setup the Base and Length of the Tx Descriptor Ring */
790 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
791 	    i++, que++) {
792 		struct tx_ring	   *txr = &que->txr;
793 		u64 tdba = txr->tx_paddr;
794 		u32 txctrl = 0;
795 		int j = txr->me;
796 
797 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
798 		    (tdba & 0x00000000ffffffffULL));
799 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
800 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
801 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
802 
803 		/* Setup the HW Tx Head and Tail descriptor pointers */
804 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
805 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
806 
807 		/* Cache the tail address */
808 		txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
809 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
810 			txr->tx_rsq[k] = QIDX_INVALID;
811 
812 		/* Disable Head Writeback */
813 		/*
814 		 * Note: for X550 series devices, these registers are actually
815 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
816 		 * fields remain the same.
817 		 */
818 		switch (hw->mac.type) {
819 		case ixgbe_mac_82598EB:
820 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
821 			break;
822 		default:
823 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
824 			break;
825 		}
826 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
827 		switch (hw->mac.type) {
828 		case ixgbe_mac_82598EB:
829 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
830 			break;
831 		default:
832 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
833 			break;
834 		}
835 
836 	}
837 
838 	if (hw->mac.type != ixgbe_mac_82598EB) {
839 		u32 dmatxctl, rttdcs;
840 
841 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
842 		dmatxctl |= IXGBE_DMATXCTL_TE;
843 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
844 		/* Disable arbiter to set MTQC */
845 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
846 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
847 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
848 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
849 		    ixgbe_get_mtqc(adapter->iov_mode));
850 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
851 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
852 	}
853 
854 } /* ixgbe_initialize_transmit_units */
855 
856 /************************************************************************
857  * ixgbe_register
858  ************************************************************************/
859 static void *
860 ixgbe_register(device_t dev)
861 {
862 	return (ixgbe_sctx);
863 } /* ixgbe_register */
864 
865 /************************************************************************
866  * ixgbe_if_attach_pre - Device initialization routine, part 1
867  *
868  *   Called when the driver is being loaded.
869  *   Identifies the type of hardware, initializes the hardware,
870  *   and initializes iflib structures.
871  *
872  *   return 0 on success, positive on failure
873  ************************************************************************/
874 static int
875 ixgbe_if_attach_pre(if_ctx_t ctx)
876 {
877 	struct adapter  *adapter;
878 	device_t        dev;
879 	if_softc_ctx_t  scctx;
880 	struct ixgbe_hw *hw;
881 	int             error = 0;
882 	u32             ctrl_ext;
883 
884 	INIT_DEBUGOUT("ixgbe_attach: begin");
885 
886 	/* Allocate, clear, and link in our adapter structure */
887 	dev = iflib_get_dev(ctx);
888 	adapter = iflib_get_softc(ctx);
889 	adapter->hw.back = adapter;
890 	adapter->ctx = ctx;
891 	adapter->dev = dev;
892 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
893 	adapter->media = iflib_get_media(ctx);
894 	hw = &adapter->hw;
895 
896 	/* Determine hardware revision */
897 	hw->vendor_id = pci_get_vendor(dev);
898 	hw->device_id = pci_get_device(dev);
899 	hw->revision_id = pci_get_revid(dev);
900 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
901 	hw->subsystem_device_id = pci_get_subdevice(dev);
902 
903 	/* Do base PCI setup - map BAR0 */
904 	if (ixgbe_allocate_pci_resources(ctx)) {
905 		device_printf(dev, "Allocation of PCI resources failed\n");
906 		return (ENXIO);
907 	}
908 
909 	/* let hardware know driver is loaded */
910 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
911 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
912 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
913 
914 	/*
915 	 * Initialize the shared code
916 	 */
917 	if (ixgbe_init_shared_code(hw) != 0) {
918 		device_printf(dev, "Unable to initialize the shared code\n");
919 		error = ENXIO;
920 		goto err_pci;
921 	}
922 
923 	if (hw->mbx.ops.init_params)
924 		hw->mbx.ops.init_params(hw);
925 
926 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
927 
928 	if (hw->mac.type != ixgbe_mac_82598EB)
929 		hw->phy.smart_speed = ixgbe_smart_speed;
930 
931 	ixgbe_init_device_features(adapter);
932 
933 	/* Enable WoL (if supported) */
934 	ixgbe_check_wol_support(adapter);
935 
936 	/* Verify adapter fan is still functional (if applicable) */
937 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
938 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
939 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
940 	}
941 
942 	/* Ensure SW/FW semaphore is free */
943 	ixgbe_init_swfw_semaphore(hw);
944 
945 	/* Set an initial default flow control value */
946 	hw->fc.requested_mode = ixgbe_flow_control;
947 
948 	hw->phy.reset_if_overtemp = TRUE;
949 	error = ixgbe_reset_hw(hw);
950 	hw->phy.reset_if_overtemp = FALSE;
951 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
952 		/*
953 		 * No optics in this port, set up
954 		 * so the timer routine will probe
955 		 * for later insertion.
956 		 */
957 		adapter->sfp_probe = TRUE;
958 		error = 0;
959 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
960 		device_printf(dev, "Unsupported SFP+ module detected!\n");
961 		error = EIO;
962 		goto err_pci;
963 	} else if (error) {
964 		device_printf(dev, "Hardware initialization failed\n");
965 		error = EIO;
966 		goto err_pci;
967 	}
968 
969 	/* Make sure we have a good EEPROM before we read from it */
970 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
971 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
972 		error = EIO;
973 		goto err_pci;
974 	}
975 
976 	error = ixgbe_start_hw(hw);
977 	switch (error) {
978 	case IXGBE_ERR_EEPROM_VERSION:
979 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
980 		break;
981 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
982 		device_printf(dev, "Unsupported SFP+ Module\n");
983 		error = EIO;
984 		goto err_pci;
985 	case IXGBE_ERR_SFP_NOT_PRESENT:
986 		device_printf(dev, "No SFP+ Module found\n");
987 		/* falls thru */
988 	default:
989 		break;
990 	}
991 
992 	/* Most of the iflib initialization... */
993 
994 	iflib_set_mac(ctx, hw->mac.addr);
995 	switch (adapter->hw.mac.type) {
996 	case ixgbe_mac_X550:
997 	case ixgbe_mac_X550EM_x:
998 	case ixgbe_mac_X550EM_a:
999 		scctx->isc_rss_table_size = 512;
1000 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1001 		break;
1002 	default:
1003 		scctx->isc_rss_table_size = 128;
1004 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1005 	}
1006 
1007 	/* Allow legacy interrupts */
1008 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1009 
1010 	scctx->isc_txqsizes[0] =
1011 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1012 	    sizeof(u32), DBA_ALIGN),
1013 	scctx->isc_rxqsizes[0] =
1014 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1015 	    DBA_ALIGN);
1016 
1017 	/* XXX */
1018 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1019 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1020 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1021 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1022 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1023 	} else {
1024 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1025 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1026 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1027 	}
1028 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1029 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1030 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1031 
1032 	scctx->isc_txrx = &ixgbe_txrx;
1033 
1034 	scctx->isc_capenable = IXGBE_CAPS;
1035 
1036 	return (0);
1037 
1038 err_pci:
1039 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1040 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1041 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1042 	ixgbe_free_pci_resources(ctx);
1043 
1044 	return (error);
1045 } /* ixgbe_if_attach_pre */
1046 
1047  /*********************************************************************
1048  * ixgbe_if_attach_post - Device initialization routine, part 2
1049  *
1050  *   Called during driver load, but after interrupts and
1051  *   resources have been allocated and configured.
1052  *   Sets up some data structures not relevant to iflib.
1053  *
1054  *   return 0 on success, positive on failure
1055  *********************************************************************/
1056 static int
1057 ixgbe_if_attach_post(if_ctx_t ctx)
1058 {
1059 	device_t dev;
1060 	struct adapter  *adapter;
1061 	struct ixgbe_hw *hw;
1062 	int             error = 0;
1063 
1064 	dev = iflib_get_dev(ctx);
1065 	adapter = iflib_get_softc(ctx);
1066 	hw = &adapter->hw;
1067 
1068 
1069 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1070 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1071 		device_printf(dev, "Device does not support legacy interrupts");
1072 		error = ENXIO;
1073 		goto err;
1074 	}
1075 
1076 	/* Allocate multicast array memory. */
1077 	adapter->mta = malloc(sizeof(*adapter->mta) *
1078 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1079 	if (adapter->mta == NULL) {
1080 		device_printf(dev, "Can not allocate multicast setup array\n");
1081 		error = ENOMEM;
1082 		goto err;
1083 	}
1084 
1085 	/* hw.ix defaults init */
1086 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1087 
1088 	/* Enable the optics for 82599 SFP+ fiber */
1089 	ixgbe_enable_tx_laser(hw);
1090 
1091 	/* Enable power to the phy. */
1092 	ixgbe_set_phy_power(hw, TRUE);
1093 
1094 	ixgbe_initialize_iov(adapter);
1095 
1096 	error = ixgbe_setup_interface(ctx);
1097 	if (error) {
1098 		device_printf(dev, "Interface setup failed: %d\n", error);
1099 		goto err;
1100 	}
1101 
1102 	ixgbe_if_update_admin_status(ctx);
1103 
1104 	/* Initialize statistics */
1105 	ixgbe_update_stats_counters(adapter);
1106 	ixgbe_add_hw_stats(adapter);
1107 
1108 	/* Check PCIE slot type/speed/width */
1109 	ixgbe_get_slot_info(adapter);
1110 
1111 	/*
1112 	 * Do time init and sysctl init here, but
1113 	 * only on the first port of a bypass adapter.
1114 	 */
1115 	ixgbe_bypass_init(adapter);
1116 
1117 	/* Set an initial dmac value */
1118 	adapter->dmac = 0;
1119 	/* Set initial advertised speeds (if applicable) */
1120 	adapter->advertise = ixgbe_get_advertise(adapter);
1121 
1122 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1123 		ixgbe_define_iov_schemas(dev, &error);
1124 
1125 	/* Add sysctls */
1126 	ixgbe_add_device_sysctls(ctx);
1127 
1128 	return (0);
1129 err:
1130 	return (error);
1131 } /* ixgbe_if_attach_post */
1132 
1133 /************************************************************************
1134  * ixgbe_check_wol_support
1135  *
1136  *   Checks whether the adapter's ports are capable of
1137  *   Wake On LAN by reading the adapter's NVM.
1138  *
1139  *   Sets each port's hw->wol_enabled value depending
1140  *   on the value read here.
1141  ************************************************************************/
1142 static void
1143 ixgbe_check_wol_support(struct adapter *adapter)
1144 {
1145 	struct ixgbe_hw *hw = &adapter->hw;
1146 	u16             dev_caps = 0;
1147 
1148 	/* Find out WoL support for port */
1149 	adapter->wol_support = hw->wol_enabled = 0;
1150 	ixgbe_get_device_caps(hw, &dev_caps);
1151 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1152 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1153 	     hw->bus.func == 0))
1154 		adapter->wol_support = hw->wol_enabled = 1;
1155 
1156 	/* Save initial wake up filter configuration */
1157 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1158 
1159 	return;
1160 } /* ixgbe_check_wol_support */
1161 
1162 /************************************************************************
1163  * ixgbe_setup_interface
1164  *
1165  *   Setup networking device structure and register an interface.
1166  ************************************************************************/
1167 static int
1168 ixgbe_setup_interface(if_ctx_t ctx)
1169 {
1170 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1171 	struct adapter *adapter = iflib_get_softc(ctx);
1172 
1173 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1174 
1175 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1176 	if_setbaudrate(ifp, IF_Gbps(10));
1177 
1178 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1179 
1180 	/*
1181 	 * Don't turn this on by default, if vlans are
1182 	 * created on another pseudo device (eg. lagg)
1183 	 * then vlan events are not passed thru, breaking
1184 	 * operation, but with HW FILTER off it works. If
1185 	 * using vlans directly on the ixgbe driver you can
1186 	 * enable this and get full hardware tag filtering.
1187 	 */
1188 	if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWFILTER);
1189 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1190 
1191 	ixgbe_add_media_types(ctx);
1192 
1193 	/* Autoselect media by default */
1194 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1195 
1196 	return (0);
1197 } /* ixgbe_setup_interface */
1198 
1199 /************************************************************************
1200  * ixgbe_if_get_counter
1201  ************************************************************************/
1202 static uint64_t
1203 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1204 {
1205 	struct adapter *adapter = iflib_get_softc(ctx);
1206 	if_t           ifp = iflib_get_ifp(ctx);
1207 
1208 	switch (cnt) {
1209 	case IFCOUNTER_IPACKETS:
1210 		return (adapter->ipackets);
1211 	case IFCOUNTER_OPACKETS:
1212 		return (adapter->opackets);
1213 	case IFCOUNTER_IBYTES:
1214 		return (adapter->ibytes);
1215 	case IFCOUNTER_OBYTES:
1216 		return (adapter->obytes);
1217 	case IFCOUNTER_IMCASTS:
1218 		return (adapter->imcasts);
1219 	case IFCOUNTER_OMCASTS:
1220 		return (adapter->omcasts);
1221 	case IFCOUNTER_COLLISIONS:
1222 		return (0);
1223 	case IFCOUNTER_IQDROPS:
1224 		return (adapter->iqdrops);
1225 	case IFCOUNTER_OQDROPS:
1226 		return (0);
1227 	case IFCOUNTER_IERRORS:
1228 		return (adapter->ierrors);
1229 	default:
1230 		return (if_get_counter_default(ifp, cnt));
1231 	}
1232 } /* ixgbe_if_get_counter */
1233 
1234 /************************************************************************
1235  * ixgbe_add_media_types
1236  ************************************************************************/
1237 static void
1238 ixgbe_add_media_types(if_ctx_t ctx)
1239 {
1240 	struct adapter  *adapter = iflib_get_softc(ctx);
1241 	struct ixgbe_hw *hw = &adapter->hw;
1242 	device_t        dev = iflib_get_dev(ctx);
1243 	u64             layer;
1244 
1245 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1246 
1247 	/* Media types with matching FreeBSD media defines */
1248 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1249 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1250 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1251 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1252 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1253 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1254 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1255 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1256 
1257 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1258 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1259 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1260 		    NULL);
1261 
1262 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1263 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1264 		if (hw->phy.multispeed_fiber)
1265 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1266 			    NULL);
1267 	}
1268 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1269 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1270 		if (hw->phy.multispeed_fiber)
1271 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1272 			    NULL);
1273 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1274 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1275 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1276 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1277 
1278 #ifdef IFM_ETH_XTYPE
1279 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1280 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1281 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1282 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1283 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1284 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1285 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1286 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1287 #else
1288 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1289 		device_printf(dev, "Media supported: 10GbaseKR\n");
1290 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1291 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1292 	}
1293 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1294 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1295 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1296 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1297 	}
1298 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1299 		device_printf(dev, "Media supported: 1000baseKX\n");
1300 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1301 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1302 	}
1303 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1304 		device_printf(dev, "Media supported: 2500baseKX\n");
1305 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1306 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1307 	}
1308 #endif
1309 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1310 		device_printf(dev, "Media supported: 1000baseBX\n");
1311 
1312 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1313 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1314 		    0, NULL);
1315 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1316 	}
1317 
1318 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1319 } /* ixgbe_add_media_types */
1320 
1321 /************************************************************************
1322  * ixgbe_is_sfp
1323  ************************************************************************/
1324 static inline bool
1325 ixgbe_is_sfp(struct ixgbe_hw *hw)
1326 {
1327 	switch (hw->mac.type) {
1328 	case ixgbe_mac_82598EB:
1329 		if (hw->phy.type == ixgbe_phy_nl)
1330 			return (TRUE);
1331 		return (FALSE);
1332 	case ixgbe_mac_82599EB:
1333 		switch (hw->mac.ops.get_media_type(hw)) {
1334 		case ixgbe_media_type_fiber:
1335 		case ixgbe_media_type_fiber_qsfp:
1336 			return (TRUE);
1337 		default:
1338 			return (FALSE);
1339 		}
1340 	case ixgbe_mac_X550EM_x:
1341 	case ixgbe_mac_X550EM_a:
1342 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1343 			return (TRUE);
1344 		return (FALSE);
1345 	default:
1346 		return (FALSE);
1347 	}
1348 } /* ixgbe_is_sfp */
1349 
1350 /************************************************************************
1351  * ixgbe_config_link
1352  ************************************************************************/
1353 static void
1354 ixgbe_config_link(struct adapter *adapter)
1355 {
1356 	struct ixgbe_hw *hw = &adapter->hw;
1357 	u32             autoneg, err = 0;
1358 	bool            sfp, negotiate;
1359 
1360 	sfp = ixgbe_is_sfp(hw);
1361 
1362 	if (sfp) {
1363 		GROUPTASK_ENQUEUE(&adapter->mod_task);
1364 	} else {
1365 		if (hw->mac.ops.check_link)
1366 			err = ixgbe_check_link(hw, &adapter->link_speed,
1367 			    &adapter->link_up, FALSE);
1368 		if (err)
1369 			return;
1370 		autoneg = hw->phy.autoneg_advertised;
1371 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1372 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1373 			    &negotiate);
1374 		if (err)
1375 			return;
1376 		if (hw->mac.ops.setup_link)
1377 			err = hw->mac.ops.setup_link(hw, autoneg,
1378 			    adapter->link_up);
1379 	}
1380 
1381 } /* ixgbe_config_link */
1382 
1383 /************************************************************************
1384  * ixgbe_update_stats_counters - Update board statistics counters.
1385  ************************************************************************/
1386 static void
1387 ixgbe_update_stats_counters(struct adapter *adapter)
1388 {
1389 	struct ixgbe_hw       *hw = &adapter->hw;
1390 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1391 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1392 	u64                   total_missed_rx = 0;
1393 
1394 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1395 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1396 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1397 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1398 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1399 
1400 	for (int i = 0; i < 16; i++) {
1401 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1402 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1403 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1404 	}
1405 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1406 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1407 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1408 
1409 	/* Hardware workaround, gprc counts missed packets */
1410 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1411 	stats->gprc -= missed_rx;
1412 
1413 	if (hw->mac.type != ixgbe_mac_82598EB) {
1414 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1415 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1416 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1417 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1418 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1419 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1420 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1421 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1422 	} else {
1423 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1424 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1425 		/* 82598 only has a counter in the high register */
1426 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1427 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1428 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1429 	}
1430 
1431 	/*
1432 	 * Workaround: mprc hardware is incorrectly counting
1433 	 * broadcasts, so for now we subtract those.
1434 	 */
1435 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1436 	stats->bprc += bprc;
1437 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1438 	if (hw->mac.type == ixgbe_mac_82598EB)
1439 		stats->mprc -= bprc;
1440 
1441 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1442 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1443 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1444 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1445 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1446 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1447 
1448 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1449 	stats->lxontxc += lxon;
1450 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1451 	stats->lxofftxc += lxoff;
1452 	total = lxon + lxoff;
1453 
1454 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1455 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1456 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1457 	stats->gptc -= total;
1458 	stats->mptc -= total;
1459 	stats->ptc64 -= total;
1460 	stats->gotc -= total * ETHER_MIN_LEN;
1461 
1462 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1463 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1464 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1465 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1466 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1467 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1468 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1469 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1470 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1471 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1472 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1473 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1474 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1475 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1476 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1477 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1478 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1479 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1480 	/* Only read FCOE on 82599 */
1481 	if (hw->mac.type != ixgbe_mac_82598EB) {
1482 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1483 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1484 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1485 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1486 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1487 	}
1488 
1489 	/* Fill out the OS statistics structure */
1490 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1491 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1492 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1493 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1494 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1495 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1496 	IXGBE_SET_COLLISIONS(adapter, 0);
1497 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1498 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1499 } /* ixgbe_update_stats_counters */
1500 
1501 /************************************************************************
1502  * ixgbe_add_hw_stats
1503  *
1504  *   Add sysctl variables, one per statistic, to the system.
1505  ************************************************************************/
1506 static void
1507 ixgbe_add_hw_stats(struct adapter *adapter)
1508 {
1509 	device_t               dev = iflib_get_dev(adapter->ctx);
1510 	struct ix_rx_queue     *rx_que;
1511 	struct ix_tx_queue     *tx_que;
1512 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1513 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1514 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1515 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1516 	struct sysctl_oid      *stat_node, *queue_node;
1517 	struct sysctl_oid_list *stat_list, *queue_list;
1518 	int                    i;
1519 
1520 #define QUEUE_NAME_LEN 32
1521 	char                   namebuf[QUEUE_NAME_LEN];
1522 
1523 	/* Driver Statistics */
1524 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1525 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1526 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1527 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1528 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1529 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1530 
1531 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1532 		struct tx_ring *txr = &tx_que->txr;
1533 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1534 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1535 		    CTLFLAG_RD, NULL, "Queue Name");
1536 		queue_list = SYSCTL_CHILDREN(queue_node);
1537 
1538 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1539 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1540 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1541 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1542 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1543 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1544 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1545 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1546 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1547 		    CTLFLAG_RD, &txr->total_packets,
1548 		    "Queue Packets Transmitted");
1549 	}
1550 
1551 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1552 		struct rx_ring *rxr = &rx_que->rxr;
1553 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1554 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1555 		    CTLFLAG_RD, NULL, "Queue Name");
1556 		queue_list = SYSCTL_CHILDREN(queue_node);
1557 
1558 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1559 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1560 		    sizeof(&adapter->rx_queues[i]),
1561 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1562 		    "Interrupt Rate");
1563 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1564 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1565 		    "irqs on this queue");
1566 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1567 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1568 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1569 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1570 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1571 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1572 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1573 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1574 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1575 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1576 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1577 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1578 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1579 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1580 	}
1581 
1582 	/* MAC stats get their own sub node */
1583 
1584 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1585 	    CTLFLAG_RD, NULL, "MAC Statistics");
1586 	stat_list = SYSCTL_CHILDREN(stat_node);
1587 
1588 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1589 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1590 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1591 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1592 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1593 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1594 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1595 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1596 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1597 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1598 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1599 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1600 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1601 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1602 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1603 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1604 
1605 	/* Flow Control stats */
1606 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1607 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1608 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1609 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1610 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1611 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1612 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1613 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1614 
1615 	/* Packet Reception Stats */
1616 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1617 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1618 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1619 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1620 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1621 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1622 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1623 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1624 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1625 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1626 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1627 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1628 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1629 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1630 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1631 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1632 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1633 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1634 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1635 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1636 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1637 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1638 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1639 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1640 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1641 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1642 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1643 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1644 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1645 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1646 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1647 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1648 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1649 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1650 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1651 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1652 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1653 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1654 
1655 	/* Packet Transmission Stats */
1656 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1657 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1658 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1659 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1660 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1661 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1662 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1663 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1664 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1665 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1666 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1667 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1668 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1669 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1670 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1671 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1672 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1673 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1674 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1675 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1676 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1677 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1678 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1679 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1680 } /* ixgbe_add_hw_stats */
1681 
1682 /************************************************************************
1683  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1684  *
1685  *   Retrieves the TDH value from the hardware
1686  ************************************************************************/
1687 static int
1688 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1689 {
1690 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1691 	int            error;
1692 	unsigned int   val;
1693 
1694 	if (!txr)
1695 		return (0);
1696 
1697 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1698 	error = sysctl_handle_int(oidp, &val, 0, req);
1699 	if (error || !req->newptr)
1700 		return error;
1701 
1702 	return (0);
1703 } /* ixgbe_sysctl_tdh_handler */
1704 
1705 /************************************************************************
1706  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1707  *
1708  *   Retrieves the TDT value from the hardware
1709  ************************************************************************/
1710 static int
1711 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1712 {
1713 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1714 	int            error;
1715 	unsigned int   val;
1716 
1717 	if (!txr)
1718 		return (0);
1719 
1720 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1721 	error = sysctl_handle_int(oidp, &val, 0, req);
1722 	if (error || !req->newptr)
1723 		return error;
1724 
1725 	return (0);
1726 } /* ixgbe_sysctl_tdt_handler */
1727 
1728 /************************************************************************
1729  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1730  *
1731  *   Retrieves the RDH value from the hardware
1732  ************************************************************************/
1733 static int
1734 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1735 {
1736 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1737 	int            error;
1738 	unsigned int   val;
1739 
1740 	if (!rxr)
1741 		return (0);
1742 
1743 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1744 	error = sysctl_handle_int(oidp, &val, 0, req);
1745 	if (error || !req->newptr)
1746 		return error;
1747 
1748 	return (0);
1749 } /* ixgbe_sysctl_rdh_handler */
1750 
1751 /************************************************************************
1752  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1753  *
1754  *   Retrieves the RDT value from the hardware
1755  ************************************************************************/
1756 static int
1757 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1758 {
1759 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1760 	int            error;
1761 	unsigned int   val;
1762 
1763 	if (!rxr)
1764 		return (0);
1765 
1766 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1767 	error = sysctl_handle_int(oidp, &val, 0, req);
1768 	if (error || !req->newptr)
1769 		return error;
1770 
1771 	return (0);
1772 } /* ixgbe_sysctl_rdt_handler */
1773 
1774 /************************************************************************
1775  * ixgbe_if_vlan_register
1776  *
1777  *   Run via vlan config EVENT, it enables us to use the
1778  *   HW Filter table since we can get the vlan id. This
1779  *   just creates the entry in the soft version of the
1780  *   VFTA, init will repopulate the real table.
1781  ************************************************************************/
1782 static void
1783 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1784 {
1785 	struct adapter *adapter = iflib_get_softc(ctx);
1786 	u16            index, bit;
1787 
1788 	index = (vtag >> 5) & 0x7F;
1789 	bit = vtag & 0x1F;
1790 	adapter->shadow_vfta[index] |= (1 << bit);
1791 	++adapter->num_vlans;
1792 	ixgbe_setup_vlan_hw_support(ctx);
1793 } /* ixgbe_if_vlan_register */
1794 
1795 /************************************************************************
1796  * ixgbe_if_vlan_unregister
1797  *
1798  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1799  ************************************************************************/
1800 static void
1801 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1802 {
1803 	struct adapter *adapter = iflib_get_softc(ctx);
1804 	u16            index, bit;
1805 
1806 	index = (vtag >> 5) & 0x7F;
1807 	bit = vtag & 0x1F;
1808 	adapter->shadow_vfta[index] &= ~(1 << bit);
1809 	--adapter->num_vlans;
1810 	/* Re-init to load the changes */
1811 	ixgbe_setup_vlan_hw_support(ctx);
1812 } /* ixgbe_if_vlan_unregister */
1813 
1814 /************************************************************************
1815  * ixgbe_setup_vlan_hw_support
1816  ************************************************************************/
1817 static void
1818 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1819 {
1820 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1821 	struct adapter  *adapter = iflib_get_softc(ctx);
1822 	struct ixgbe_hw *hw = &adapter->hw;
1823 	struct rx_ring  *rxr;
1824 	int             i;
1825 	u32             ctrl;
1826 
1827 
1828 	/*
1829 	 * We get here thru init_locked, meaning
1830 	 * a soft reset, this has already cleared
1831 	 * the VFTA and other state, so if there
1832 	 * have been no vlan's registered do nothing.
1833 	 */
1834 	if (adapter->num_vlans == 0)
1835 		return;
1836 
1837 	/* Setup the queues for vlans */
1838 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1839 		for (i = 0; i < adapter->num_rx_queues; i++) {
1840 			rxr = &adapter->rx_queues[i].rxr;
1841 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1842 			if (hw->mac.type != ixgbe_mac_82598EB) {
1843 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1844 				ctrl |= IXGBE_RXDCTL_VME;
1845 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1846 			}
1847 			rxr->vtag_strip = TRUE;
1848 		}
1849 	}
1850 
1851 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1852 		return;
1853 	/*
1854 	 * A soft reset zero's out the VFTA, so
1855 	 * we need to repopulate it now.
1856 	 */
1857 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1858 		if (adapter->shadow_vfta[i] != 0)
1859 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1860 			    adapter->shadow_vfta[i]);
1861 
1862 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1863 	/* Enable the Filter Table if enabled */
1864 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1865 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1866 		ctrl |= IXGBE_VLNCTRL_VFE;
1867 	}
1868 	if (hw->mac.type == ixgbe_mac_82598EB)
1869 		ctrl |= IXGBE_VLNCTRL_VME;
1870 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1871 } /* ixgbe_setup_vlan_hw_support */
1872 
1873 /************************************************************************
1874  * ixgbe_get_slot_info
1875  *
1876  *   Get the width and transaction speed of
1877  *   the slot this adapter is plugged into.
1878  ************************************************************************/
1879 static void
1880 ixgbe_get_slot_info(struct adapter *adapter)
1881 {
1882 	device_t        dev = iflib_get_dev(adapter->ctx);
1883 	struct ixgbe_hw *hw = &adapter->hw;
1884 	int             bus_info_valid = TRUE;
1885 	u32             offset;
1886 	u16             link;
1887 
1888 	/* Some devices are behind an internal bridge */
1889 	switch (hw->device_id) {
1890 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1891 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1892 		goto get_parent_info;
1893 	default:
1894 		break;
1895 	}
1896 
1897 	ixgbe_get_bus_info(hw);
1898 
1899 	/*
1900 	 * Some devices don't use PCI-E, but there is no need
1901 	 * to display "Unknown" for bus speed and width.
1902 	 */
1903 	switch (hw->mac.type) {
1904 	case ixgbe_mac_X550EM_x:
1905 	case ixgbe_mac_X550EM_a:
1906 		return;
1907 	default:
1908 		goto display;
1909 	}
1910 
1911 get_parent_info:
1912 	/*
1913 	 * For the Quad port adapter we need to parse back
1914 	 * up the PCI tree to find the speed of the expansion
1915 	 * slot into which this adapter is plugged. A bit more work.
1916 	 */
1917 	dev = device_get_parent(device_get_parent(dev));
1918 #ifdef IXGBE_DEBUG
1919 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1920 	    pci_get_slot(dev), pci_get_function(dev));
1921 #endif
1922 	dev = device_get_parent(device_get_parent(dev));
1923 #ifdef IXGBE_DEBUG
1924 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1925 	    pci_get_slot(dev), pci_get_function(dev));
1926 #endif
1927 	/* Now get the PCI Express Capabilities offset */
1928 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1929 		/*
1930 		 * Hmm...can't get PCI-Express capabilities.
1931 		 * Falling back to default method.
1932 		 */
1933 		bus_info_valid = FALSE;
1934 		ixgbe_get_bus_info(hw);
1935 		goto display;
1936 	}
1937 	/* ...and read the Link Status Register */
1938 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1939 	ixgbe_set_pci_config_data_generic(hw, link);
1940 
1941 display:
1942 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1943 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1944 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1945 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1946 	     "Unknown"),
1947 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1948 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1949 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1950 	     "Unknown"));
1951 
1952 	if (bus_info_valid) {
1953 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1954 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1955 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1956 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1957 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1958 		}
1959 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1960 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1961 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1962 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1963 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1964 		}
1965 	} else
1966 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1967 
1968 	return;
1969 } /* ixgbe_get_slot_info */
1970 
1971 /************************************************************************
1972  * ixgbe_if_msix_intr_assign
1973  *
1974  *   Setup MSI-X Interrupt resources and handlers
1975  ************************************************************************/
1976 static int
1977 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1978 {
1979 	struct adapter     *adapter = iflib_get_softc(ctx);
1980 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1981 	struct ix_tx_queue *tx_que;
1982 	int                error, rid, vector = 0;
1983 	int                cpu_id = 0;
1984 	char               buf[16];
1985 
1986 	/* Admin Que is vector 0*/
1987 	rid = vector + 1;
1988 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1989 		rid = vector + 1;
1990 
1991 		snprintf(buf, sizeof(buf), "rxq%d", i);
1992 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1993 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
1994 
1995 		if (error) {
1996 			device_printf(iflib_get_dev(ctx),
1997 			    "Failed to allocate que int %d err: %d", i, error);
1998 			adapter->num_rx_queues = i + 1;
1999 			goto fail;
2000 		}
2001 
2002 		rx_que->msix = vector;
2003 		adapter->active_queues |= (u64)(1 << rx_que->msix);
2004 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2005 			/*
2006 			 * The queue ID is used as the RSS layer bucket ID.
2007 			 * We look up the queue ID -> RSS CPU ID and select
2008 			 * that.
2009 			 */
2010 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2011 		} else {
2012 			/*
2013 			 * Bind the msix vector, and thus the
2014 			 * rings to the corresponding cpu.
2015 			 *
2016 			 * This just happens to match the default RSS
2017 			 * round-robin bucket -> queue -> CPU allocation.
2018 			 */
2019 			if (adapter->num_rx_queues > 1)
2020 				cpu_id = i;
2021 		}
2022 
2023 	}
2024 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2025 		snprintf(buf, sizeof(buf), "txq%d", i);
2026 		tx_que = &adapter->tx_queues[i];
2027 		tx_que->msix = i % adapter->num_rx_queues;
2028 		iflib_softirq_alloc_generic(ctx,
2029 		    &adapter->rx_queues[tx_que->msix].que_irq,
2030 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2031 	}
2032 	rid = vector + 1;
2033 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2034 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2035 	if (error) {
2036 		device_printf(iflib_get_dev(ctx),
2037 		    "Failed to register admin handler");
2038 		return (error);
2039 	}
2040 
2041 	adapter->vector = vector;
2042 
2043 	return (0);
2044 fail:
2045 	iflib_irq_free(ctx, &adapter->irq);
2046 	rx_que = adapter->rx_queues;
2047 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2048 		iflib_irq_free(ctx, &rx_que->que_irq);
2049 
2050 	return (error);
2051 } /* ixgbe_if_msix_intr_assign */
2052 
2053 /*********************************************************************
2054  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2055  **********************************************************************/
2056 static int
2057 ixgbe_msix_que(void *arg)
2058 {
2059 	struct ix_rx_queue *que = arg;
2060 	struct adapter     *adapter = que->adapter;
2061 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2062 
2063 	/* Protect against spurious interrupts */
2064 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2065 		return 0;
2066 
2067 	ixgbe_disable_queue(adapter, que->msix);
2068 	++que->irqs;
2069 
2070 	return (FILTER_SCHEDULE_THREAD);
2071 } /* ixgbe_msix_que */
2072 
2073 /************************************************************************
2074  * ixgbe_media_status - Media Ioctl callback
2075  *
2076  *   Called whenever the user queries the status of
2077  *   the interface using ifconfig.
2078  ************************************************************************/
2079 static void
2080 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2081 {
2082 	struct adapter  *adapter = iflib_get_softc(ctx);
2083 	struct ixgbe_hw *hw = &adapter->hw;
2084 	int             layer;
2085 
2086 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2087 
2088 	iflib_admin_intr_deferred(ctx);
2089 
2090 	ifmr->ifm_status = IFM_AVALID;
2091 	ifmr->ifm_active = IFM_ETHER;
2092 
2093 	if (!adapter->link_active)
2094 		return;
2095 
2096 	ifmr->ifm_status |= IFM_ACTIVE;
2097 	layer = adapter->phy_layer;
2098 
2099 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2100 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2101 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2102 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2103 		switch (adapter->link_speed) {
2104 		case IXGBE_LINK_SPEED_10GB_FULL:
2105 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2106 			break;
2107 		case IXGBE_LINK_SPEED_1GB_FULL:
2108 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2109 			break;
2110 		case IXGBE_LINK_SPEED_100_FULL:
2111 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2112 			break;
2113 		case IXGBE_LINK_SPEED_10_FULL:
2114 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2115 			break;
2116 		}
2117 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2118 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2119 		switch (adapter->link_speed) {
2120 		case IXGBE_LINK_SPEED_10GB_FULL:
2121 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2122 			break;
2123 		}
2124 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2125 		switch (adapter->link_speed) {
2126 		case IXGBE_LINK_SPEED_10GB_FULL:
2127 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2128 			break;
2129 		case IXGBE_LINK_SPEED_1GB_FULL:
2130 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2131 			break;
2132 		}
2133 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2134 		switch (adapter->link_speed) {
2135 		case IXGBE_LINK_SPEED_10GB_FULL:
2136 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2137 			break;
2138 		case IXGBE_LINK_SPEED_1GB_FULL:
2139 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2140 			break;
2141 		}
2142 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2143 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2144 		switch (adapter->link_speed) {
2145 		case IXGBE_LINK_SPEED_10GB_FULL:
2146 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2147 			break;
2148 		case IXGBE_LINK_SPEED_1GB_FULL:
2149 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2150 			break;
2151 		}
2152 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2153 		switch (adapter->link_speed) {
2154 		case IXGBE_LINK_SPEED_10GB_FULL:
2155 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2156 			break;
2157 		}
2158 	/*
2159 	 * XXX: These need to use the proper media types once
2160 	 * they're added.
2161 	 */
2162 #ifndef IFM_ETH_XTYPE
2163 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2164 		switch (adapter->link_speed) {
2165 		case IXGBE_LINK_SPEED_10GB_FULL:
2166 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2167 			break;
2168 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2169 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2170 			break;
2171 		case IXGBE_LINK_SPEED_1GB_FULL:
2172 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2173 			break;
2174 		}
2175 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2176 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2177 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2178 		switch (adapter->link_speed) {
2179 		case IXGBE_LINK_SPEED_10GB_FULL:
2180 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2181 			break;
2182 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2183 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2184 			break;
2185 		case IXGBE_LINK_SPEED_1GB_FULL:
2186 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2187 			break;
2188 		}
2189 #else
2190 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2191 		switch (adapter->link_speed) {
2192 		case IXGBE_LINK_SPEED_10GB_FULL:
2193 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2194 			break;
2195 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2196 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2197 			break;
2198 		case IXGBE_LINK_SPEED_1GB_FULL:
2199 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2200 			break;
2201 		}
2202 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2203 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2204 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2205 		switch (adapter->link_speed) {
2206 		case IXGBE_LINK_SPEED_10GB_FULL:
2207 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2208 			break;
2209 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2210 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2211 			break;
2212 		case IXGBE_LINK_SPEED_1GB_FULL:
2213 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2214 			break;
2215 		}
2216 #endif
2217 
2218 	/* If nothing is recognized... */
2219 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2220 		ifmr->ifm_active |= IFM_UNKNOWN;
2221 
2222 	/* Display current flow control setting used on link */
2223 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2224 	    hw->fc.current_mode == ixgbe_fc_full)
2225 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2226 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2227 	    hw->fc.current_mode == ixgbe_fc_full)
2228 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2229 } /* ixgbe_media_status */
2230 
2231 /************************************************************************
2232  * ixgbe_media_change - Media Ioctl callback
2233  *
2234  *   Called when the user changes speed/duplex using
2235  *   media/mediopt option with ifconfig.
2236  ************************************************************************/
2237 static int
2238 ixgbe_if_media_change(if_ctx_t ctx)
2239 {
2240 	struct adapter   *adapter = iflib_get_softc(ctx);
2241 	struct ifmedia   *ifm = iflib_get_media(ctx);
2242 	struct ixgbe_hw  *hw = &adapter->hw;
2243 	ixgbe_link_speed speed = 0;
2244 
2245 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2246 
2247 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2248 		return (EINVAL);
2249 
2250 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2251 		return (EPERM);
2252 
2253 	/*
2254 	 * We don't actually need to check against the supported
2255 	 * media types of the adapter; ifmedia will take care of
2256 	 * that for us.
2257 	 */
2258 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2259 	case IFM_AUTO:
2260 	case IFM_10G_T:
2261 		speed |= IXGBE_LINK_SPEED_100_FULL;
2262 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2263 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2264 		break;
2265 	case IFM_10G_LRM:
2266 	case IFM_10G_LR:
2267 #ifndef IFM_ETH_XTYPE
2268 	case IFM_10G_SR: /* KR, too */
2269 	case IFM_10G_CX4: /* KX4 */
2270 #else
2271 	case IFM_10G_KR:
2272 	case IFM_10G_KX4:
2273 #endif
2274 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2275 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2276 		break;
2277 #ifndef IFM_ETH_XTYPE
2278 	case IFM_1000_CX: /* KX */
2279 #else
2280 	case IFM_1000_KX:
2281 #endif
2282 	case IFM_1000_LX:
2283 	case IFM_1000_SX:
2284 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2285 		break;
2286 	case IFM_1000_T:
2287 		speed |= IXGBE_LINK_SPEED_100_FULL;
2288 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2289 		break;
2290 	case IFM_10G_TWINAX:
2291 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2292 		break;
2293 	case IFM_100_TX:
2294 		speed |= IXGBE_LINK_SPEED_100_FULL;
2295 		break;
2296 	case IFM_10_T:
2297 		speed |= IXGBE_LINK_SPEED_10_FULL;
2298 		break;
2299 	default:
2300 		goto invalid;
2301 	}
2302 
2303 	hw->mac.autotry_restart = TRUE;
2304 	hw->mac.ops.setup_link(hw, speed, TRUE);
2305 	adapter->advertise =
2306 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2307 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2308 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2309 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2310 
2311 	return (0);
2312 
2313 invalid:
2314 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2315 
2316 	return (EINVAL);
2317 } /* ixgbe_if_media_change */
2318 
2319 /************************************************************************
2320  * ixgbe_set_promisc
2321  ************************************************************************/
2322 static int
2323 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2324 {
2325 	struct adapter *adapter = iflib_get_softc(ctx);
2326 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2327 	u32            rctl;
2328 	int            mcnt = 0;
2329 
2330 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2331 	rctl &= (~IXGBE_FCTRL_UPE);
2332 	if (ifp->if_flags & IFF_ALLMULTI)
2333 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2334 	else {
2335 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2336 	}
2337 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2338 		rctl &= (~IXGBE_FCTRL_MPE);
2339 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2340 
2341 	if (ifp->if_flags & IFF_PROMISC) {
2342 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2343 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2344 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2345 		rctl |= IXGBE_FCTRL_MPE;
2346 		rctl &= ~IXGBE_FCTRL_UPE;
2347 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2348 	}
2349 	return (0);
2350 } /* ixgbe_if_promisc_set */
2351 
2352 /************************************************************************
2353  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2354  ************************************************************************/
2355 static int
2356 ixgbe_msix_link(void *arg)
2357 {
2358 	struct adapter  *adapter = arg;
2359 	struct ixgbe_hw *hw = &adapter->hw;
2360 	u32             eicr, eicr_mask;
2361 	s32             retval;
2362 
2363 	++adapter->link_irq;
2364 
2365 	/* Pause other interrupts */
2366 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2367 
2368 	/* First get the cause */
2369 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2370 	/* Be sure the queue bits are not cleared */
2371 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2372 	/* Clear interrupt with write */
2373 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2374 
2375 	/* Link status change */
2376 	if (eicr & IXGBE_EICR_LSC) {
2377 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2378 		iflib_admin_intr_deferred(adapter->ctx);
2379 	}
2380 
2381 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2382 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2383 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2384 			/* This is probably overkill :) */
2385 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2386 				return (FILTER_HANDLED);
2387 			/* Disable the interrupt */
2388 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2389 			GROUPTASK_ENQUEUE(&adapter->fdir_task);
2390 		} else
2391 			if (eicr & IXGBE_EICR_ECC) {
2392 				device_printf(iflib_get_dev(adapter->ctx),
2393 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2394 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2395 			}
2396 
2397 		/* Check for over temp condition */
2398 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2399 			switch (adapter->hw.mac.type) {
2400 			case ixgbe_mac_X550EM_a:
2401 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2402 					break;
2403 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2404 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2405 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2406 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2407 				retval = hw->phy.ops.check_overtemp(hw);
2408 				if (retval != IXGBE_ERR_OVERTEMP)
2409 					break;
2410 				device_printf(iflib_get_dev(adapter->ctx),
2411 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2412 				device_printf(iflib_get_dev(adapter->ctx),
2413 				    "System shutdown required!\n");
2414 				break;
2415 			default:
2416 				if (!(eicr & IXGBE_EICR_TS))
2417 					break;
2418 				retval = hw->phy.ops.check_overtemp(hw);
2419 				if (retval != IXGBE_ERR_OVERTEMP)
2420 					break;
2421 				device_printf(iflib_get_dev(adapter->ctx),
2422 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2423 				device_printf(iflib_get_dev(adapter->ctx),
2424 				    "System shutdown required!\n");
2425 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2426 				break;
2427 			}
2428 		}
2429 
2430 		/* Check for VF message */
2431 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2432 		    (eicr & IXGBE_EICR_MAILBOX))
2433 			GROUPTASK_ENQUEUE(&adapter->mbx_task);
2434 	}
2435 
2436 	if (ixgbe_is_sfp(hw)) {
2437 		/* Pluggable optics-related interrupt */
2438 		if (hw->mac.type >= ixgbe_mac_X540)
2439 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2440 		else
2441 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2442 
2443 		if (eicr & eicr_mask) {
2444 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2445 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2446 				GROUPTASK_ENQUEUE(&adapter->mod_task);
2447 		}
2448 
2449 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2450 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2451 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2452 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2453 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2454 				GROUPTASK_ENQUEUE(&adapter->msf_task);
2455 		}
2456 	}
2457 
2458 	/* Check for fan failure */
2459 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2460 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2461 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2462 	}
2463 
2464 	/* External PHY interrupt */
2465 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2466 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2467 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2468 		GROUPTASK_ENQUEUE(&adapter->phy_task);
2469 	}
2470 
2471 	/* Re-enable other interrupts */
2472 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2473 
2474 	return (FILTER_HANDLED);
2475 } /* ixgbe_msix_link */
2476 
2477 /************************************************************************
2478  * ixgbe_sysctl_interrupt_rate_handler
2479  ************************************************************************/
2480 static int
2481 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2482 {
2483 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2484 	int                error;
2485 	unsigned int       reg, usec, rate;
2486 
2487 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2488 	usec = ((reg & 0x0FF8) >> 3);
2489 	if (usec > 0)
2490 		rate = 500000 / usec;
2491 	else
2492 		rate = 0;
2493 	error = sysctl_handle_int(oidp, &rate, 0, req);
2494 	if (error || !req->newptr)
2495 		return error;
2496 	reg &= ~0xfff; /* default, no limitation */
2497 	ixgbe_max_interrupt_rate = 0;
2498 	if (rate > 0 && rate < 500000) {
2499 		if (rate < 1000)
2500 			rate = 1000;
2501 		ixgbe_max_interrupt_rate = rate;
2502 		reg |= ((4000000/rate) & 0xff8);
2503 	}
2504 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2505 
2506 	return (0);
2507 } /* ixgbe_sysctl_interrupt_rate_handler */
2508 
2509 /************************************************************************
2510  * ixgbe_add_device_sysctls
2511  ************************************************************************/
2512 static void
2513 ixgbe_add_device_sysctls(if_ctx_t ctx)
2514 {
2515 	struct adapter         *adapter = iflib_get_softc(ctx);
2516 	device_t               dev = iflib_get_dev(ctx);
2517 	struct ixgbe_hw        *hw = &adapter->hw;
2518 	struct sysctl_oid_list *child;
2519 	struct sysctl_ctx_list *ctx_list;
2520 
2521 	ctx_list = device_get_sysctl_ctx(dev);
2522 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2523 
2524 	/* Sysctls for all devices */
2525 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2526 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2527 	    IXGBE_SYSCTL_DESC_SET_FC);
2528 
2529 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2530 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2531 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2532 
2533 #ifdef IXGBE_DEBUG
2534 	/* testing sysctls (for all devices) */
2535 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2536 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2537 	    "I", "PCI Power State");
2538 
2539 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2540 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2541 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2542 #endif
2543 	/* for X550 series devices */
2544 	if (hw->mac.type >= ixgbe_mac_X550)
2545 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2546 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2547 		    "I", "DMA Coalesce");
2548 
2549 	/* for WoL-capable devices */
2550 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2551 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2552 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2553 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2554 
2555 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2556 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2557 		    "I", "Enable/Disable Wake Up Filters");
2558 	}
2559 
2560 	/* for X552/X557-AT devices */
2561 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2562 		struct sysctl_oid *phy_node;
2563 		struct sysctl_oid_list *phy_list;
2564 
2565 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2566 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2567 		phy_list = SYSCTL_CHILDREN(phy_node);
2568 
2569 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2570 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2571 		    "I", "Current External PHY Temperature (Celsius)");
2572 
2573 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2574 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2575 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2576 		    "External PHY High Temperature Event Occurred");
2577 	}
2578 
2579 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2580 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2581 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2582 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2583 	}
2584 } /* ixgbe_add_device_sysctls */
2585 
2586 /************************************************************************
2587  * ixgbe_allocate_pci_resources
2588  ************************************************************************/
2589 static int
2590 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2591 {
2592 	struct adapter *adapter = iflib_get_softc(ctx);
2593 	device_t        dev = iflib_get_dev(ctx);
2594 	int             rid;
2595 
2596 	rid = PCIR_BAR(0);
2597 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2598 	    RF_ACTIVE);
2599 
2600 	if (!(adapter->pci_mem)) {
2601 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2602 		return (ENXIO);
2603 	}
2604 
2605 	/* Save bus_space values for READ/WRITE_REG macros */
2606 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2607 	adapter->osdep.mem_bus_space_handle =
2608 	    rman_get_bushandle(adapter->pci_mem);
2609 	/* Set hw values for shared code */
2610 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2611 
2612 	return (0);
2613 } /* ixgbe_allocate_pci_resources */
2614 
2615 /************************************************************************
2616  * ixgbe_detach - Device removal routine
2617  *
2618  *   Called when the driver is being removed.
2619  *   Stops the adapter and deallocates all the resources
2620  *   that were allocated for driver operation.
2621  *
2622  *   return 0 on success, positive on failure
2623  ************************************************************************/
2624 static int
2625 ixgbe_if_detach(if_ctx_t ctx)
2626 {
2627 	struct adapter *adapter = iflib_get_softc(ctx);
2628 	device_t       dev = iflib_get_dev(ctx);
2629 	u32            ctrl_ext;
2630 
2631 	INIT_DEBUGOUT("ixgbe_detach: begin");
2632 
2633 	if (ixgbe_pci_iov_detach(dev) != 0) {
2634 		device_printf(dev, "SR-IOV in use; detach first.\n");
2635 		return (EBUSY);
2636 	}
2637 
2638 	iflib_config_gtask_deinit(&adapter->mod_task);
2639 	iflib_config_gtask_deinit(&adapter->msf_task);
2640 	iflib_config_gtask_deinit(&adapter->phy_task);
2641 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2642 		iflib_config_gtask_deinit(&adapter->mbx_task);
2643 
2644 	ixgbe_setup_low_power_mode(ctx);
2645 
2646 	/* let hardware know driver is unloading */
2647 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2648 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2649 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2650 
2651 	ixgbe_free_pci_resources(ctx);
2652 	free(adapter->mta, M_IXGBE);
2653 
2654 	return (0);
2655 } /* ixgbe_if_detach */
2656 
2657 /************************************************************************
2658  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2659  *
2660  *   Prepare the adapter/port for LPLU and/or WoL
2661  ************************************************************************/
2662 static int
2663 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2664 {
2665 	struct adapter  *adapter = iflib_get_softc(ctx);
2666 	struct ixgbe_hw *hw = &adapter->hw;
2667 	device_t        dev = iflib_get_dev(ctx);
2668 	s32             error = 0;
2669 
2670 	if (!hw->wol_enabled)
2671 		ixgbe_set_phy_power(hw, FALSE);
2672 
2673 	/* Limit power management flow to X550EM baseT */
2674 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2675 	    hw->phy.ops.enter_lplu) {
2676 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2677 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2678 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2679 
2680 		/*
2681 		 * Clear Wake Up Status register to prevent any previous wakeup
2682 		 * events from waking us up immediately after we suspend.
2683 		 */
2684 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2685 
2686 		/*
2687 		 * Program the Wakeup Filter Control register with user filter
2688 		 * settings
2689 		 */
2690 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2691 
2692 		/* Enable wakeups and power management in Wakeup Control */
2693 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2694 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2695 
2696 		/* X550EM baseT adapters need a special LPLU flow */
2697 		hw->phy.reset_disable = TRUE;
2698 		ixgbe_if_stop(ctx);
2699 		error = hw->phy.ops.enter_lplu(hw);
2700 		if (error)
2701 			device_printf(dev, "Error entering LPLU: %d\n", error);
2702 		hw->phy.reset_disable = FALSE;
2703 	} else {
2704 		/* Just stop for other adapters */
2705 		ixgbe_if_stop(ctx);
2706 	}
2707 
2708 	return error;
2709 } /* ixgbe_setup_low_power_mode */
2710 
2711 /************************************************************************
2712  * ixgbe_shutdown - Shutdown entry point
2713  ************************************************************************/
2714 static int
2715 ixgbe_if_shutdown(if_ctx_t ctx)
2716 {
2717 	int error = 0;
2718 
2719 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2720 
2721 	error = ixgbe_setup_low_power_mode(ctx);
2722 
2723 	return (error);
2724 } /* ixgbe_if_shutdown */
2725 
2726 /************************************************************************
2727  * ixgbe_suspend
2728  *
2729  *   From D0 to D3
2730  ************************************************************************/
2731 static int
2732 ixgbe_if_suspend(if_ctx_t ctx)
2733 {
2734 	int error = 0;
2735 
2736 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2737 
2738 	error = ixgbe_setup_low_power_mode(ctx);
2739 
2740 	return (error);
2741 } /* ixgbe_if_suspend */
2742 
2743 /************************************************************************
2744  * ixgbe_resume
2745  *
2746  *   From D3 to D0
2747  ************************************************************************/
2748 static int
2749 ixgbe_if_resume(if_ctx_t ctx)
2750 {
2751 	struct adapter  *adapter = iflib_get_softc(ctx);
2752 	device_t        dev = iflib_get_dev(ctx);
2753 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2754 	struct ixgbe_hw *hw = &adapter->hw;
2755 	u32             wus;
2756 
2757 	INIT_DEBUGOUT("ixgbe_resume: begin");
2758 
2759 	/* Read & clear WUS register */
2760 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2761 	if (wus)
2762 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2763 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2764 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2765 	/* And clear WUFC until next low-power transition */
2766 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2767 
2768 	/*
2769 	 * Required after D3->D0 transition;
2770 	 * will re-advertise all previous advertised speeds
2771 	 */
2772 	if (ifp->if_flags & IFF_UP)
2773 		ixgbe_if_init(ctx);
2774 
2775 	return (0);
2776 } /* ixgbe_if_resume */
2777 
2778 /************************************************************************
2779  * ixgbe_if_mtu_set - Ioctl mtu entry point
2780  *
2781  *   Return 0 on success, EINVAL on failure
2782  ************************************************************************/
2783 static int
2784 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2785 {
2786 	struct adapter *adapter = iflib_get_softc(ctx);
2787 	int error = 0;
2788 
2789 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2790 
2791 	if (mtu > IXGBE_MAX_MTU) {
2792 		error = EINVAL;
2793 	} else {
2794 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2795 	}
2796 
2797 	return error;
2798 } /* ixgbe_if_mtu_set */
2799 
2800 /************************************************************************
2801  * ixgbe_if_crcstrip_set
2802  ************************************************************************/
2803 static void
2804 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2805 {
2806 	struct adapter *sc = iflib_get_softc(ctx);
2807 	struct ixgbe_hw *hw = &sc->hw;
2808 	/* crc stripping is set in two places:
2809 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2810 	 * IXGBE_RDRXCTL (set by the original driver in
2811 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2812 	 *	We disable the setting when netmap is compiled in).
2813 	 * We update the values here, but also in ixgbe.c because
2814 	 * init_locked sometimes is called outside our control.
2815 	 */
2816 	uint32_t hl, rxc;
2817 
2818 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2819 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2820 #ifdef NETMAP
2821 	if (netmap_verbose)
2822 		D("%s read  HLREG 0x%x rxc 0x%x",
2823 			onoff ? "enter" : "exit", hl, rxc);
2824 #endif
2825 	/* hw requirements ... */
2826 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2827 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2828 	if (onoff && !crcstrip) {
2829 		/* keep the crc. Fast rx */
2830 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2831 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2832 	} else {
2833 		/* reset default mode */
2834 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2835 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2836 	}
2837 #ifdef NETMAP
2838 	if (netmap_verbose)
2839 		D("%s write HLREG 0x%x rxc 0x%x",
2840 			onoff ? "enter" : "exit", hl, rxc);
2841 #endif
2842 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2843 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2844 } /* ixgbe_if_crcstrip_set */
2845 
2846 /*********************************************************************
2847  * ixgbe_if_init - Init entry point
2848  *
2849  *   Used in two ways: It is used by the stack as an init
2850  *   entry point in network interface structure. It is also
2851  *   used by the driver as a hw/sw initialization routine to
2852  *   get to a consistent state.
2853  *
2854  *   Return 0 on success, positive on failure
2855  **********************************************************************/
2856 void
2857 ixgbe_if_init(if_ctx_t ctx)
2858 {
2859 	struct adapter     *adapter = iflib_get_softc(ctx);
2860 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2861 	device_t           dev = iflib_get_dev(ctx);
2862 	struct ixgbe_hw *hw = &adapter->hw;
2863 	struct ix_rx_queue *rx_que;
2864 	struct ix_tx_queue *tx_que;
2865 	u32             txdctl, mhadd;
2866 	u32             rxdctl, rxctrl;
2867 	u32             ctrl_ext;
2868 
2869 	int             i, j, err;
2870 
2871 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2872 
2873 	/* Queue indices may change with IOV mode */
2874 	ixgbe_align_all_queue_indices(adapter);
2875 
2876 	/* reprogram the RAR[0] in case user changed it. */
2877 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2878 
2879 	/* Get the latest mac address, User can use a LAA */
2880 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2881 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2882 	hw->addr_ctrl.rar_used_count = 1;
2883 
2884 	ixgbe_init_hw(hw);
2885 
2886 	ixgbe_initialize_iov(adapter);
2887 
2888 	ixgbe_initialize_transmit_units(ctx);
2889 
2890 	/* Setup Multicast table */
2891 	ixgbe_if_multi_set(ctx);
2892 
2893 	/* Determine the correct mbuf pool, based on frame size */
2894 	if (adapter->max_frame_size <= MCLBYTES)
2895 		adapter->rx_mbuf_sz = MCLBYTES;
2896 	else
2897 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2898 
2899 	/* Configure RX settings */
2900 	ixgbe_initialize_receive_units(ctx);
2901 
2902 	/* Enable SDP & MSI-X interrupts based on adapter */
2903 	ixgbe_config_gpie(adapter);
2904 
2905 	/* Set MTU size */
2906 	if (ifp->if_mtu > ETHERMTU) {
2907 		/* aka IXGBE_MAXFRS on 82599 and newer */
2908 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2909 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2910 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2911 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2912 	}
2913 
2914 	/* Now enable all the queues */
2915 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2916 		struct tx_ring *txr = &tx_que->txr;
2917 
2918 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2919 		txdctl |= IXGBE_TXDCTL_ENABLE;
2920 		/* Set WTHRESH to 8, burst writeback */
2921 		txdctl |= (8 << 16);
2922 		/*
2923 		 * When the internal queue falls below PTHRESH (32),
2924 		 * start prefetching as long as there are at least
2925 		 * HTHRESH (1) buffers ready. The values are taken
2926 		 * from the Intel linux driver 3.8.21.
2927 		 * Prefetching enables tx line rate even with 1 queue.
2928 		 */
2929 		txdctl |= (32 << 0) | (1 << 8);
2930 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2931 	}
2932 
2933 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2934 		struct rx_ring *rxr = &rx_que->rxr;
2935 
2936 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2937 		if (hw->mac.type == ixgbe_mac_82598EB) {
2938 			/*
2939 			 * PTHRESH = 21
2940 			 * HTHRESH = 4
2941 			 * WTHRESH = 8
2942 			 */
2943 			rxdctl &= ~0x3FFFFF;
2944 			rxdctl |= 0x080420;
2945 		}
2946 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2947 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2948 		for (j = 0; j < 10; j++) {
2949 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2950 			    IXGBE_RXDCTL_ENABLE)
2951 				break;
2952 			else
2953 				msec_delay(1);
2954 		}
2955 		wmb();
2956 	}
2957 
2958 	/* Enable Receive engine */
2959 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2960 	if (hw->mac.type == ixgbe_mac_82598EB)
2961 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2962 	rxctrl |= IXGBE_RXCTRL_RXEN;
2963 	ixgbe_enable_rx_dma(hw, rxctrl);
2964 
2965 	/* Set up MSI/MSI-X routing */
2966 	if (ixgbe_enable_msix)  {
2967 		ixgbe_configure_ivars(adapter);
2968 		/* Set up auto-mask */
2969 		if (hw->mac.type == ixgbe_mac_82598EB)
2970 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2971 		else {
2972 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2973 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2974 		}
2975 	} else {  /* Simple settings for Legacy/MSI */
2976 		ixgbe_set_ivar(adapter, 0, 0, 0);
2977 		ixgbe_set_ivar(adapter, 0, 0, 1);
2978 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2979 	}
2980 
2981 	ixgbe_init_fdir(adapter);
2982 
2983 	/*
2984 	 * Check on any SFP devices that
2985 	 * need to be kick-started
2986 	 */
2987 	if (hw->phy.type == ixgbe_phy_none) {
2988 		err = hw->phy.ops.identify(hw);
2989 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2990 			device_printf(dev,
2991 			    "Unsupported SFP+ module type was detected.\n");
2992 			return;
2993 		}
2994 	}
2995 
2996 	/* Set moderation on the Link interrupt */
2997 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
2998 
2999 	/* Enable power to the phy. */
3000 	ixgbe_set_phy_power(hw, TRUE);
3001 
3002 	/* Config/Enable Link */
3003 	ixgbe_config_link(adapter);
3004 
3005 	/* Hardware Packet Buffer & Flow Control setup */
3006 	ixgbe_config_delay_values(adapter);
3007 
3008 	/* Initialize the FC settings */
3009 	ixgbe_start_hw(hw);
3010 
3011 	/* Set up VLAN support and filter */
3012 	ixgbe_setup_vlan_hw_support(ctx);
3013 
3014 	/* Setup DMA Coalescing */
3015 	ixgbe_config_dmac(adapter);
3016 
3017 	/* And now turn on interrupts */
3018 	ixgbe_if_enable_intr(ctx);
3019 
3020 	/* Enable the use of the MBX by the VF's */
3021 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3022 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3023 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3024 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3025 	}
3026 
3027 } /* ixgbe_init_locked */
3028 
3029 /************************************************************************
3030  * ixgbe_set_ivar
3031  *
3032  *   Setup the correct IVAR register for a particular MSI-X interrupt
3033  *     (yes this is all very magic and confusing :)
3034  *    - entry is the register array entry
3035  *    - vector is the MSI-X vector for this queue
3036  *    - type is RX/TX/MISC
3037  ************************************************************************/
3038 static void
3039 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3040 {
3041 	struct ixgbe_hw *hw = &adapter->hw;
3042 	u32 ivar, index;
3043 
3044 	vector |= IXGBE_IVAR_ALLOC_VAL;
3045 
3046 	switch (hw->mac.type) {
3047 	case ixgbe_mac_82598EB:
3048 		if (type == -1)
3049 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3050 		else
3051 			entry += (type * 64);
3052 		index = (entry >> 2) & 0x1F;
3053 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3054 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3055 		ivar |= (vector << (8 * (entry & 0x3)));
3056 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3057 		break;
3058 	case ixgbe_mac_82599EB:
3059 	case ixgbe_mac_X540:
3060 	case ixgbe_mac_X550:
3061 	case ixgbe_mac_X550EM_x:
3062 	case ixgbe_mac_X550EM_a:
3063 		if (type == -1) { /* MISC IVAR */
3064 			index = (entry & 1) * 8;
3065 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3066 			ivar &= ~(0xFF << index);
3067 			ivar |= (vector << index);
3068 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3069 		} else {          /* RX/TX IVARS */
3070 			index = (16 * (entry & 1)) + (8 * type);
3071 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3072 			ivar &= ~(0xFF << index);
3073 			ivar |= (vector << index);
3074 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3075 		}
3076 	default:
3077 		break;
3078 	}
3079 } /* ixgbe_set_ivar */
3080 
3081 /************************************************************************
3082  * ixgbe_configure_ivars
3083  ************************************************************************/
3084 static void
3085 ixgbe_configure_ivars(struct adapter *adapter)
3086 {
3087 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3088 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3089 	u32                newitr;
3090 
3091 	if (ixgbe_max_interrupt_rate > 0)
3092 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3093 	else {
3094 		/*
3095 		 * Disable DMA coalescing if interrupt moderation is
3096 		 * disabled.
3097 		 */
3098 		adapter->dmac = 0;
3099 		newitr = 0;
3100 	}
3101 
3102 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3103 		struct rx_ring *rxr = &rx_que->rxr;
3104 
3105 		/* First the RX queue entry */
3106 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3107 
3108 		/* Set an Initial EITR value */
3109 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3110 	}
3111 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3112 		struct tx_ring *txr = &tx_que->txr;
3113 
3114 		/* ... and the TX */
3115 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3116 	}
3117 	/* For the Link interrupt */
3118 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3119 } /* ixgbe_configure_ivars */
3120 
3121 /************************************************************************
3122  * ixgbe_config_gpie
3123  ************************************************************************/
3124 static void
3125 ixgbe_config_gpie(struct adapter *adapter)
3126 {
3127 	struct ixgbe_hw *hw = &adapter->hw;
3128 	u32             gpie;
3129 
3130 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3131 
3132 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3133 		/* Enable Enhanced MSI-X mode */
3134 		gpie |= IXGBE_GPIE_MSIX_MODE
3135 		     |  IXGBE_GPIE_EIAME
3136 		     |  IXGBE_GPIE_PBA_SUPPORT
3137 		     |  IXGBE_GPIE_OCD;
3138 	}
3139 
3140 	/* Fan Failure Interrupt */
3141 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3142 		gpie |= IXGBE_SDP1_GPIEN;
3143 
3144 	/* Thermal Sensor Interrupt */
3145 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3146 		gpie |= IXGBE_SDP0_GPIEN_X540;
3147 
3148 	/* Link detection */
3149 	switch (hw->mac.type) {
3150 	case ixgbe_mac_82599EB:
3151 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3152 		break;
3153 	case ixgbe_mac_X550EM_x:
3154 	case ixgbe_mac_X550EM_a:
3155 		gpie |= IXGBE_SDP0_GPIEN_X540;
3156 		break;
3157 	default:
3158 		break;
3159 	}
3160 
3161 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3162 
3163 } /* ixgbe_config_gpie */
3164 
3165 /************************************************************************
3166  * ixgbe_config_delay_values
3167  *
3168  *   Requires adapter->max_frame_size to be set.
3169  ************************************************************************/
3170 static void
3171 ixgbe_config_delay_values(struct adapter *adapter)
3172 {
3173 	struct ixgbe_hw *hw = &adapter->hw;
3174 	u32             rxpb, frame, size, tmp;
3175 
3176 	frame = adapter->max_frame_size;
3177 
3178 	/* Calculate High Water */
3179 	switch (hw->mac.type) {
3180 	case ixgbe_mac_X540:
3181 	case ixgbe_mac_X550:
3182 	case ixgbe_mac_X550EM_x:
3183 	case ixgbe_mac_X550EM_a:
3184 		tmp = IXGBE_DV_X540(frame, frame);
3185 		break;
3186 	default:
3187 		tmp = IXGBE_DV(frame, frame);
3188 		break;
3189 	}
3190 	size = IXGBE_BT2KB(tmp);
3191 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3192 	hw->fc.high_water[0] = rxpb - size;
3193 
3194 	/* Now calculate Low Water */
3195 	switch (hw->mac.type) {
3196 	case ixgbe_mac_X540:
3197 	case ixgbe_mac_X550:
3198 	case ixgbe_mac_X550EM_x:
3199 	case ixgbe_mac_X550EM_a:
3200 		tmp = IXGBE_LOW_DV_X540(frame);
3201 		break;
3202 	default:
3203 		tmp = IXGBE_LOW_DV(frame);
3204 		break;
3205 	}
3206 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3207 
3208 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3209 	hw->fc.send_xon = TRUE;
3210 } /* ixgbe_config_delay_values */
3211 
3212 /************************************************************************
3213  * ixgbe_set_multi - Multicast Update
3214  *
3215  *   Called whenever multicast address list is updated.
3216  ************************************************************************/
3217 static int
3218 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3219 {
3220 	struct adapter *adapter = arg;
3221 	struct ixgbe_mc_addr *mta = adapter->mta;
3222 
3223 	if (ifma->ifma_addr->sa_family != AF_LINK)
3224 		return (0);
3225 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3226 		return (0);
3227 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3228 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3229 	mta[count].vmdq = adapter->pool;
3230 
3231 	return (1);
3232 } /* ixgbe_mc_filter_apply */
3233 
3234 static void
3235 ixgbe_if_multi_set(if_ctx_t ctx)
3236 {
3237 	struct adapter       *adapter = iflib_get_softc(ctx);
3238 	struct ixgbe_mc_addr *mta;
3239 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3240 	u8                   *update_ptr;
3241 	int                  mcnt = 0;
3242 	u32                  fctrl;
3243 
3244 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3245 
3246 	mta = adapter->mta;
3247 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3248 
3249 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3250 
3251 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3252 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3253 	if (ifp->if_flags & IFF_PROMISC)
3254 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3255 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3256 	    ifp->if_flags & IFF_ALLMULTI) {
3257 		fctrl |= IXGBE_FCTRL_MPE;
3258 		fctrl &= ~IXGBE_FCTRL_UPE;
3259 	} else
3260 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3261 
3262 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3263 
3264 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3265 		update_ptr = (u8 *)mta;
3266 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3267 		    ixgbe_mc_array_itr, TRUE);
3268 	}
3269 
3270 } /* ixgbe_if_multi_set */
3271 
3272 /************************************************************************
3273  * ixgbe_mc_array_itr
3274  *
3275  *   An iterator function needed by the multicast shared code.
3276  *   It feeds the shared code routine the addresses in the
3277  *   array of ixgbe_set_multi() one by one.
3278  ************************************************************************/
3279 static u8 *
3280 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3281 {
3282 	struct ixgbe_mc_addr *mta;
3283 
3284 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3285 	*vmdq = mta->vmdq;
3286 
3287 	*update_ptr = (u8*)(mta + 1);
3288 
3289 	return (mta->addr);
3290 } /* ixgbe_mc_array_itr */
3291 
3292 /************************************************************************
3293  * ixgbe_local_timer - Timer routine
3294  *
3295  *   Checks for link status, updates statistics,
3296  *   and runs the watchdog check.
3297  ************************************************************************/
3298 static void
3299 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3300 {
3301 	struct adapter *adapter = iflib_get_softc(ctx);
3302 
3303 	if (qid != 0)
3304 		return;
3305 
3306 	/* Check for pluggable optics */
3307 	if (adapter->sfp_probe)
3308 		if (!ixgbe_sfp_probe(ctx))
3309 			return; /* Nothing to do */
3310 
3311 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3312 	    &adapter->link_up, 0);
3313 
3314 	/* Fire off the adminq task */
3315 	iflib_admin_intr_deferred(ctx);
3316 
3317 } /* ixgbe_if_timer */
3318 
3319 /************************************************************************
3320  * ixgbe_sfp_probe
3321  *
3322  *   Determine if a port had optics inserted.
3323  ************************************************************************/
3324 static bool
3325 ixgbe_sfp_probe(if_ctx_t ctx)
3326 {
3327 	struct adapter  *adapter = iflib_get_softc(ctx);
3328 	struct ixgbe_hw *hw = &adapter->hw;
3329 	device_t        dev = iflib_get_dev(ctx);
3330 	bool            result = FALSE;
3331 
3332 	if ((hw->phy.type == ixgbe_phy_nl) &&
3333 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3334 		s32 ret = hw->phy.ops.identify_sfp(hw);
3335 		if (ret)
3336 			goto out;
3337 		ret = hw->phy.ops.reset(hw);
3338 		adapter->sfp_probe = FALSE;
3339 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3340 			device_printf(dev, "Unsupported SFP+ module detected!");
3341 			device_printf(dev,
3342 			    "Reload driver with supported module.\n");
3343 			goto out;
3344 		} else
3345 			device_printf(dev, "SFP+ module detected!\n");
3346 		/* We now have supported optics */
3347 		result = TRUE;
3348 	}
3349 out:
3350 
3351 	return (result);
3352 } /* ixgbe_sfp_probe */
3353 
3354 /************************************************************************
3355  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3356  ************************************************************************/
3357 static void
3358 ixgbe_handle_mod(void *context)
3359 {
3360 	if_ctx_t        ctx = context;
3361 	struct adapter  *adapter = iflib_get_softc(ctx);
3362 	struct ixgbe_hw *hw = &adapter->hw;
3363 	device_t        dev = iflib_get_dev(ctx);
3364 	u32             err, cage_full = 0;
3365 
3366 	adapter->sfp_reinit = 1;
3367 	if (adapter->hw.need_crosstalk_fix) {
3368 		switch (hw->mac.type) {
3369 		case ixgbe_mac_82599EB:
3370 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3371 			    IXGBE_ESDP_SDP2;
3372 			break;
3373 		case ixgbe_mac_X550EM_x:
3374 		case ixgbe_mac_X550EM_a:
3375 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3376 			    IXGBE_ESDP_SDP0;
3377 			break;
3378 		default:
3379 			break;
3380 		}
3381 
3382 		if (!cage_full)
3383 			goto handle_mod_out;
3384 	}
3385 
3386 	err = hw->phy.ops.identify_sfp(hw);
3387 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3388 		device_printf(dev,
3389 		    "Unsupported SFP+ module type was detected.\n");
3390 		goto handle_mod_out;
3391 	}
3392 
3393 	if (hw->mac.type == ixgbe_mac_82598EB)
3394 		err = hw->phy.ops.reset(hw);
3395 	else
3396 		err = hw->mac.ops.setup_sfp(hw);
3397 
3398 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3399 		device_printf(dev,
3400 		    "Setup failure - unsupported SFP+ module type.\n");
3401 		goto handle_mod_out;
3402 	}
3403 	GROUPTASK_ENQUEUE(&adapter->msf_task);
3404 	return;
3405 
3406 handle_mod_out:
3407 	adapter->sfp_reinit = 0;
3408 } /* ixgbe_handle_mod */
3409 
3410 
3411 /************************************************************************
3412  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3413  ************************************************************************/
3414 static void
3415 ixgbe_handle_msf(void *context)
3416 {
3417 	if_ctx_t        ctx = context;
3418 	struct adapter  *adapter = iflib_get_softc(ctx);
3419 	struct ixgbe_hw *hw = &adapter->hw;
3420 	u32             autoneg;
3421 	bool            negotiate;
3422 
3423 	if (adapter->sfp_reinit != 1)
3424 		return;
3425 
3426 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3427 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3428 
3429 	autoneg = hw->phy.autoneg_advertised;
3430 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3431 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3432 	if (hw->mac.ops.setup_link)
3433 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3434 
3435 	/* Adjust media types shown in ifconfig */
3436 	ifmedia_removeall(adapter->media);
3437 	ixgbe_add_media_types(adapter->ctx);
3438 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3439 
3440 	adapter->sfp_reinit = 0;
3441 } /* ixgbe_handle_msf */
3442 
3443 /************************************************************************
3444  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3445  ************************************************************************/
3446 static void
3447 ixgbe_handle_phy(void *context)
3448 {
3449 	if_ctx_t        ctx = context;
3450 	struct adapter  *adapter = iflib_get_softc(ctx);
3451 	struct ixgbe_hw *hw = &adapter->hw;
3452 	int             error;
3453 
3454 	error = hw->phy.ops.handle_lasi(hw);
3455 	if (error == IXGBE_ERR_OVERTEMP)
3456 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3457 	else if (error)
3458 		device_printf(adapter->dev,
3459 		    "Error handling LASI interrupt: %d\n", error);
3460 } /* ixgbe_handle_phy */
3461 
3462 /************************************************************************
3463  * ixgbe_if_stop - Stop the hardware
3464  *
3465  *   Disables all traffic on the adapter by issuing a
3466  *   global reset on the MAC and deallocates TX/RX buffers.
3467  ************************************************************************/
3468 static void
3469 ixgbe_if_stop(if_ctx_t ctx)
3470 {
3471 	struct adapter  *adapter = iflib_get_softc(ctx);
3472 	struct ixgbe_hw *hw = &adapter->hw;
3473 
3474 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3475 
3476 	ixgbe_reset_hw(hw);
3477 	hw->adapter_stopped = FALSE;
3478 	ixgbe_stop_adapter(hw);
3479 	if (hw->mac.type == ixgbe_mac_82599EB)
3480 		ixgbe_stop_mac_link_on_d3_82599(hw);
3481 	/* Turn off the laser - noop with no optics */
3482 	ixgbe_disable_tx_laser(hw);
3483 
3484 	/* Update the stack */
3485 	adapter->link_up = FALSE;
3486 	ixgbe_if_update_admin_status(ctx);
3487 
3488 	/* reprogram the RAR[0] in case user changed it. */
3489 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3490 
3491 	return;
3492 } /* ixgbe_if_stop */
3493 
3494 /************************************************************************
3495  * ixgbe_update_link_status - Update OS on link state
3496  *
3497  * Note: Only updates the OS on the cached link state.
3498  *       The real check of the hardware only happens with
3499  *       a link interrupt.
3500  ************************************************************************/
3501 static void
3502 ixgbe_if_update_admin_status(if_ctx_t ctx)
3503 {
3504 	struct adapter *adapter = iflib_get_softc(ctx);
3505 	device_t       dev = iflib_get_dev(ctx);
3506 
3507 	if (adapter->link_up) {
3508 		if (adapter->link_active == FALSE) {
3509 			if (bootverbose)
3510 				device_printf(dev, "Link is up %d Gbps %s \n",
3511 				    ((adapter->link_speed == 128) ? 10 : 1),
3512 				    "Full Duplex");
3513 			adapter->link_active = TRUE;
3514 			/* Update any Flow Control changes */
3515 			ixgbe_fc_enable(&adapter->hw);
3516 			/* Update DMA coalescing config */
3517 			ixgbe_config_dmac(adapter);
3518 			/* should actually be negotiated value */
3519 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3520 
3521 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3522 				ixgbe_ping_all_vfs(adapter);
3523 		}
3524 	} else { /* Link down */
3525 		if (adapter->link_active == TRUE) {
3526 			if (bootverbose)
3527 				device_printf(dev, "Link is Down\n");
3528 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3529 			adapter->link_active = FALSE;
3530 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3531 				ixgbe_ping_all_vfs(adapter);
3532 		}
3533 	}
3534 
3535 	ixgbe_update_stats_counters(adapter);
3536 
3537 	/* Re-enable link interrupts */
3538        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3539 } /* ixgbe_if_update_admin_status */
3540 
3541 /************************************************************************
3542  * ixgbe_config_dmac - Configure DMA Coalescing
3543  ************************************************************************/
3544 static void
3545 ixgbe_config_dmac(struct adapter *adapter)
3546 {
3547 	struct ixgbe_hw          *hw = &adapter->hw;
3548 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3549 
3550 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3551 		return;
3552 
3553 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3554 	    dcfg->link_speed ^ adapter->link_speed) {
3555 		dcfg->watchdog_timer = adapter->dmac;
3556 		dcfg->fcoe_en = FALSE;
3557 		dcfg->link_speed = adapter->link_speed;
3558 		dcfg->num_tcs = 1;
3559 
3560 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3561 		    dcfg->watchdog_timer, dcfg->link_speed);
3562 
3563 		hw->mac.ops.dmac_config(hw);
3564 	}
3565 } /* ixgbe_config_dmac */
3566 
3567 /************************************************************************
3568  * ixgbe_if_enable_intr
3569  ************************************************************************/
3570 void
3571 ixgbe_if_enable_intr(if_ctx_t ctx)
3572 {
3573 	struct adapter     *adapter = iflib_get_softc(ctx);
3574 	struct ixgbe_hw    *hw = &adapter->hw;
3575 	struct ix_rx_queue *que = adapter->rx_queues;
3576 	u32                mask, fwsm;
3577 
3578 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3579 
3580 	switch (adapter->hw.mac.type) {
3581 	case ixgbe_mac_82599EB:
3582 		mask |= IXGBE_EIMS_ECC;
3583 		/* Temperature sensor on some adapters */
3584 		mask |= IXGBE_EIMS_GPI_SDP0;
3585 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3586 		mask |= IXGBE_EIMS_GPI_SDP1;
3587 		mask |= IXGBE_EIMS_GPI_SDP2;
3588 		break;
3589 	case ixgbe_mac_X540:
3590 		/* Detect if Thermal Sensor is enabled */
3591 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3592 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3593 			mask |= IXGBE_EIMS_TS;
3594 		mask |= IXGBE_EIMS_ECC;
3595 		break;
3596 	case ixgbe_mac_X550:
3597 		/* MAC thermal sensor is automatically enabled */
3598 		mask |= IXGBE_EIMS_TS;
3599 		mask |= IXGBE_EIMS_ECC;
3600 		break;
3601 	case ixgbe_mac_X550EM_x:
3602 	case ixgbe_mac_X550EM_a:
3603 		/* Some devices use SDP0 for important information */
3604 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3605 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3606 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3607 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3608 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3609 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3610 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3611 		mask |= IXGBE_EIMS_ECC;
3612 		break;
3613 	default:
3614 		break;
3615 	}
3616 
3617 	/* Enable Fan Failure detection */
3618 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3619 		mask |= IXGBE_EIMS_GPI_SDP1;
3620 	/* Enable SR-IOV */
3621 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3622 		mask |= IXGBE_EIMS_MAILBOX;
3623 	/* Enable Flow Director */
3624 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3625 		mask |= IXGBE_EIMS_FLOW_DIR;
3626 
3627 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3628 
3629 	/* With MSI-X we use auto clear */
3630 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3631 		mask = IXGBE_EIMS_ENABLE_MASK;
3632 		/* Don't autoclear Link */
3633 		mask &= ~IXGBE_EIMS_OTHER;
3634 		mask &= ~IXGBE_EIMS_LSC;
3635 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3636 			mask &= ~IXGBE_EIMS_MAILBOX;
3637 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3638 	}
3639 
3640 	/*
3641 	 * Now enable all queues, this is done separately to
3642 	 * allow for handling the extended (beyond 32) MSI-X
3643 	 * vectors that can be used by 82599
3644 	 */
3645 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3646 		ixgbe_enable_queue(adapter, que->msix);
3647 
3648 	IXGBE_WRITE_FLUSH(hw);
3649 
3650 } /* ixgbe_if_enable_intr */
3651 
3652 /************************************************************************
3653  * ixgbe_disable_intr
3654  ************************************************************************/
3655 static void
3656 ixgbe_if_disable_intr(if_ctx_t ctx)
3657 {
3658 	struct adapter *adapter = iflib_get_softc(ctx);
3659 
3660 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3661 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3662 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3663 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3664 	} else {
3665 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3666 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3667 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3668 	}
3669 	IXGBE_WRITE_FLUSH(&adapter->hw);
3670 
3671 } /* ixgbe_if_disable_intr */
3672 
3673 /************************************************************************
3674  * ixgbe_if_rx_queue_intr_enable
3675  ************************************************************************/
3676 static int
3677 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3678 {
3679 	struct adapter     *adapter = iflib_get_softc(ctx);
3680 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3681 
3682 	ixgbe_enable_queue(adapter, que->rxr.me);
3683 
3684 	return (0);
3685 } /* ixgbe_if_rx_queue_intr_enable */
3686 
3687 /************************************************************************
3688  * ixgbe_enable_queue
3689  ************************************************************************/
3690 static void
3691 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3692 {
3693 	struct ixgbe_hw *hw = &adapter->hw;
3694 	u64             queue = (u64)(1 << vector);
3695 	u32             mask;
3696 
3697 	if (hw->mac.type == ixgbe_mac_82598EB) {
3698 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3699 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3700 	} else {
3701 		mask = (queue & 0xFFFFFFFF);
3702 		if (mask)
3703 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3704 		mask = (queue >> 32);
3705 		if (mask)
3706 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3707 	}
3708 } /* ixgbe_enable_queue */
3709 
3710 /************************************************************************
3711  * ixgbe_disable_queue
3712  ************************************************************************/
3713 static void
3714 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3715 {
3716 	struct ixgbe_hw *hw = &adapter->hw;
3717 	u64             queue = (u64)(1 << vector);
3718 	u32             mask;
3719 
3720 	if (hw->mac.type == ixgbe_mac_82598EB) {
3721 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3722 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3723 	} else {
3724 		mask = (queue & 0xFFFFFFFF);
3725 		if (mask)
3726 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3727 		mask = (queue >> 32);
3728 		if (mask)
3729 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3730 	}
3731 } /* ixgbe_disable_queue */
3732 
3733 /************************************************************************
3734  * ixgbe_intr - Legacy Interrupt Service Routine
3735  ************************************************************************/
3736 int
3737 ixgbe_intr(void *arg)
3738 {
3739 	struct adapter     *adapter = arg;
3740 	struct ix_rx_queue *que = adapter->rx_queues;
3741 	struct ixgbe_hw    *hw = &adapter->hw;
3742 	if_ctx_t           ctx = adapter->ctx;
3743 	u32                eicr, eicr_mask;
3744 
3745 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3746 
3747 	++que->irqs;
3748 	if (eicr == 0) {
3749 		ixgbe_if_enable_intr(ctx);
3750 		return (FILTER_HANDLED);
3751 	}
3752 
3753 	/* Check for fan failure */
3754 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3755 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3756 		device_printf(adapter->dev,
3757 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3758 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3759 	}
3760 
3761 	/* Link status change */
3762 	if (eicr & IXGBE_EICR_LSC) {
3763 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3764 		iflib_admin_intr_deferred(ctx);
3765 	}
3766 
3767 	if (ixgbe_is_sfp(hw)) {
3768 		/* Pluggable optics-related interrupt */
3769 		if (hw->mac.type >= ixgbe_mac_X540)
3770 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3771 		else
3772 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3773 
3774 		if (eicr & eicr_mask) {
3775 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3776 			GROUPTASK_ENQUEUE(&adapter->mod_task);
3777 		}
3778 
3779 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3780 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3781 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3782 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3783 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
3784 				GROUPTASK_ENQUEUE(&adapter->msf_task);
3785 		}
3786 	}
3787 
3788 	/* External PHY interrupt */
3789 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3790 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3791 		GROUPTASK_ENQUEUE(&adapter->phy_task);
3792 
3793 	return (FILTER_SCHEDULE_THREAD);
3794 } /* ixgbe_intr */
3795 
3796 /************************************************************************
3797  * ixgbe_free_pci_resources
3798  ************************************************************************/
3799 static void
3800 ixgbe_free_pci_resources(if_ctx_t ctx)
3801 {
3802 	struct adapter *adapter = iflib_get_softc(ctx);
3803 	struct         ix_rx_queue *que = adapter->rx_queues;
3804 	device_t       dev = iflib_get_dev(ctx);
3805 
3806 	/* Release all msix queue resources */
3807 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3808 		iflib_irq_free(ctx, &adapter->irq);
3809 
3810 	if (que != NULL) {
3811 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3812 			iflib_irq_free(ctx, &que->que_irq);
3813 		}
3814 	}
3815 
3816 	/*
3817 	 * Free link/admin interrupt
3818 	 */
3819 	if (adapter->pci_mem != NULL)
3820 		bus_release_resource(dev, SYS_RES_MEMORY,
3821 		                     PCIR_BAR(0), adapter->pci_mem);
3822 
3823 } /* ixgbe_free_pci_resources */
3824 
3825 /************************************************************************
3826  * ixgbe_sysctl_flowcntl
3827  *
3828  *   SYSCTL wrapper around setting Flow Control
3829  ************************************************************************/
3830 static int
3831 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3832 {
3833 	struct adapter *adapter;
3834 	int            error, fc;
3835 
3836 	adapter = (struct adapter *)arg1;
3837 	fc = adapter->hw.fc.current_mode;
3838 
3839 	error = sysctl_handle_int(oidp, &fc, 0, req);
3840 	if ((error) || (req->newptr == NULL))
3841 		return (error);
3842 
3843 	/* Don't bother if it's not changed */
3844 	if (fc == adapter->hw.fc.current_mode)
3845 		return (0);
3846 
3847 	return ixgbe_set_flowcntl(adapter, fc);
3848 } /* ixgbe_sysctl_flowcntl */
3849 
3850 /************************************************************************
3851  * ixgbe_set_flowcntl - Set flow control
3852  *
3853  *   Flow control values:
3854  *     0 - off
3855  *     1 - rx pause
3856  *     2 - tx pause
3857  *     3 - full
3858  ************************************************************************/
3859 static int
3860 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3861 {
3862 	switch (fc) {
3863 	case ixgbe_fc_rx_pause:
3864 	case ixgbe_fc_tx_pause:
3865 	case ixgbe_fc_full:
3866 		adapter->hw.fc.requested_mode = fc;
3867 		if (adapter->num_rx_queues > 1)
3868 			ixgbe_disable_rx_drop(adapter);
3869 		break;
3870 	case ixgbe_fc_none:
3871 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3872 		if (adapter->num_rx_queues > 1)
3873 			ixgbe_enable_rx_drop(adapter);
3874 		break;
3875 	default:
3876 		return (EINVAL);
3877 	}
3878 
3879 	/* Don't autoneg if forcing a value */
3880 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3881 	ixgbe_fc_enable(&adapter->hw);
3882 
3883 	return (0);
3884 } /* ixgbe_set_flowcntl */
3885 
3886 /************************************************************************
3887  * ixgbe_enable_rx_drop
3888  *
3889  *   Enable the hardware to drop packets when the buffer is
3890  *   full. This is useful with multiqueue, so that no single
3891  *   queue being full stalls the entire RX engine. We only
3892  *   enable this when Multiqueue is enabled AND Flow Control
3893  *   is disabled.
3894  ************************************************************************/
3895 static void
3896 ixgbe_enable_rx_drop(struct adapter *adapter)
3897 {
3898 	struct ixgbe_hw *hw = &adapter->hw;
3899 	struct rx_ring  *rxr;
3900 	u32             srrctl;
3901 
3902 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3903 		rxr = &adapter->rx_queues[i].rxr;
3904 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3905 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3906 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3907 	}
3908 
3909 	/* enable drop for each vf */
3910 	for (int i = 0; i < adapter->num_vfs; i++) {
3911 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3912 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3913 		                IXGBE_QDE_ENABLE));
3914 	}
3915 } /* ixgbe_enable_rx_drop */
3916 
3917 /************************************************************************
3918  * ixgbe_disable_rx_drop
3919  ************************************************************************/
3920 static void
3921 ixgbe_disable_rx_drop(struct adapter *adapter)
3922 {
3923 	struct ixgbe_hw *hw = &adapter->hw;
3924 	struct rx_ring  *rxr;
3925 	u32             srrctl;
3926 
3927 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3928 		rxr = &adapter->rx_queues[i].rxr;
3929 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3930 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3931 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3932 	}
3933 
3934 	/* disable drop for each vf */
3935 	for (int i = 0; i < adapter->num_vfs; i++) {
3936 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3937 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3938 	}
3939 } /* ixgbe_disable_rx_drop */
3940 
3941 /************************************************************************
3942  * ixgbe_sysctl_advertise
3943  *
3944  *   SYSCTL wrapper around setting advertised speed
3945  ************************************************************************/
3946 static int
3947 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3948 {
3949 	struct adapter *adapter;
3950 	int            error, advertise;
3951 
3952 	adapter = (struct adapter *)arg1;
3953 	advertise = adapter->advertise;
3954 
3955 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3956 	if ((error) || (req->newptr == NULL))
3957 		return (error);
3958 
3959 	return ixgbe_set_advertise(adapter, advertise);
3960 } /* ixgbe_sysctl_advertise */
3961 
3962 /************************************************************************
3963  * ixgbe_set_advertise - Control advertised link speed
3964  *
3965  *   Flags:
3966  *     0x1 - advertise 100 Mb
3967  *     0x2 - advertise 1G
3968  *     0x4 - advertise 10G
3969  *     0x8 - advertise 10 Mb (yes, Mb)
3970  ************************************************************************/
3971 static int
3972 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3973 {
3974 	device_t         dev = iflib_get_dev(adapter->ctx);
3975 	struct ixgbe_hw  *hw;
3976 	ixgbe_link_speed speed = 0;
3977 	ixgbe_link_speed link_caps = 0;
3978 	s32              err = IXGBE_NOT_IMPLEMENTED;
3979 	bool             negotiate = FALSE;
3980 
3981 	/* Checks to validate new value */
3982 	if (adapter->advertise == advertise) /* no change */
3983 		return (0);
3984 
3985 	hw = &adapter->hw;
3986 
3987 	/* No speed changes for backplane media */
3988 	if (hw->phy.media_type == ixgbe_media_type_backplane)
3989 		return (ENODEV);
3990 
3991 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
3992 	      (hw->phy.multispeed_fiber))) {
3993 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
3994 		return (EINVAL);
3995 	}
3996 
3997 	if (advertise < 0x1 || advertise > 0xF) {
3998 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
3999 		return (EINVAL);
4000 	}
4001 
4002 	if (hw->mac.ops.get_link_capabilities) {
4003 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4004 		    &negotiate);
4005 		if (err != IXGBE_SUCCESS) {
4006 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4007 			return (ENODEV);
4008 		}
4009 	}
4010 
4011 	/* Set new value and report new advertised mode */
4012 	if (advertise & 0x1) {
4013 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4014 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4015 			return (EINVAL);
4016 		}
4017 		speed |= IXGBE_LINK_SPEED_100_FULL;
4018 	}
4019 	if (advertise & 0x2) {
4020 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4021 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4022 			return (EINVAL);
4023 		}
4024 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4025 	}
4026 	if (advertise & 0x4) {
4027 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4028 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4029 			return (EINVAL);
4030 		}
4031 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4032 	}
4033 	if (advertise & 0x8) {
4034 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4035 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4036 			return (EINVAL);
4037 		}
4038 		speed |= IXGBE_LINK_SPEED_10_FULL;
4039 	}
4040 
4041 	hw->mac.autotry_restart = TRUE;
4042 	hw->mac.ops.setup_link(hw, speed, TRUE);
4043 	adapter->advertise = advertise;
4044 
4045 	return (0);
4046 } /* ixgbe_set_advertise */
4047 
4048 /************************************************************************
4049  * ixgbe_get_advertise - Get current advertised speed settings
4050  *
4051  *   Formatted for sysctl usage.
4052  *   Flags:
4053  *     0x1 - advertise 100 Mb
4054  *     0x2 - advertise 1G
4055  *     0x4 - advertise 10G
4056  *     0x8 - advertise 10 Mb (yes, Mb)
4057  ************************************************************************/
4058 static int
4059 ixgbe_get_advertise(struct adapter *adapter)
4060 {
4061 	struct ixgbe_hw  *hw = &adapter->hw;
4062 	int              speed;
4063 	ixgbe_link_speed link_caps = 0;
4064 	s32              err;
4065 	bool             negotiate = FALSE;
4066 
4067 	/*
4068 	 * Advertised speed means nothing unless it's copper or
4069 	 * multi-speed fiber
4070 	 */
4071 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4072 	    !(hw->phy.multispeed_fiber))
4073 		return (0);
4074 
4075 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4076 	if (err != IXGBE_SUCCESS)
4077 		return (0);
4078 
4079 	speed =
4080 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4081 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4082 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4083 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4084 
4085 	return speed;
4086 } /* ixgbe_get_advertise */
4087 
4088 /************************************************************************
4089  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4090  *
4091  *   Control values:
4092  *     0/1 - off / on (use default value of 1000)
4093  *
4094  *     Legal timer values are:
4095  *     50,100,250,500,1000,2000,5000,10000
4096  *
4097  *     Turning off interrupt moderation will also turn this off.
4098  ************************************************************************/
4099 static int
4100 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4101 {
4102 	struct adapter *adapter = (struct adapter *)arg1;
4103 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4104 	int            error;
4105 	u16            newval;
4106 
4107 	newval = adapter->dmac;
4108 	error = sysctl_handle_16(oidp, &newval, 0, req);
4109 	if ((error) || (req->newptr == NULL))
4110 		return (error);
4111 
4112 	switch (newval) {
4113 	case 0:
4114 		/* Disabled */
4115 		adapter->dmac = 0;
4116 		break;
4117 	case 1:
4118 		/* Enable and use default */
4119 		adapter->dmac = 1000;
4120 		break;
4121 	case 50:
4122 	case 100:
4123 	case 250:
4124 	case 500:
4125 	case 1000:
4126 	case 2000:
4127 	case 5000:
4128 	case 10000:
4129 		/* Legal values - allow */
4130 		adapter->dmac = newval;
4131 		break;
4132 	default:
4133 		/* Do nothing, illegal value */
4134 		return (EINVAL);
4135 	}
4136 
4137 	/* Re-initialize hardware if it's already running */
4138 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4139 		ifp->if_init(ifp);
4140 
4141 	return (0);
4142 } /* ixgbe_sysctl_dmac */
4143 
4144 #ifdef IXGBE_DEBUG
4145 /************************************************************************
4146  * ixgbe_sysctl_power_state
4147  *
4148  *   Sysctl to test power states
4149  *   Values:
4150  *     0      - set device to D0
4151  *     3      - set device to D3
4152  *     (none) - get current device power state
4153  ************************************************************************/
4154 static int
4155 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4156 {
4157 	struct adapter *adapter = (struct adapter *)arg1;
4158 	device_t       dev = adapter->dev;
4159 	int            curr_ps, new_ps, error = 0;
4160 
4161 	curr_ps = new_ps = pci_get_powerstate(dev);
4162 
4163 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4164 	if ((error) || (req->newptr == NULL))
4165 		return (error);
4166 
4167 	if (new_ps == curr_ps)
4168 		return (0);
4169 
4170 	if (new_ps == 3 && curr_ps == 0)
4171 		error = DEVICE_SUSPEND(dev);
4172 	else if (new_ps == 0 && curr_ps == 3)
4173 		error = DEVICE_RESUME(dev);
4174 	else
4175 		return (EINVAL);
4176 
4177 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4178 
4179 	return (error);
4180 } /* ixgbe_sysctl_power_state */
4181 #endif
4182 
4183 /************************************************************************
4184  * ixgbe_sysctl_wol_enable
4185  *
4186  *   Sysctl to enable/disable the WoL capability,
4187  *   if supported by the adapter.
4188  *
4189  *   Values:
4190  *     0 - disabled
4191  *     1 - enabled
4192  ************************************************************************/
4193 static int
4194 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4195 {
4196 	struct adapter  *adapter = (struct adapter *)arg1;
4197 	struct ixgbe_hw *hw = &adapter->hw;
4198 	int             new_wol_enabled;
4199 	int             error = 0;
4200 
4201 	new_wol_enabled = hw->wol_enabled;
4202 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4203 	if ((error) || (req->newptr == NULL))
4204 		return (error);
4205 	new_wol_enabled = !!(new_wol_enabled);
4206 	if (new_wol_enabled == hw->wol_enabled)
4207 		return (0);
4208 
4209 	if (new_wol_enabled > 0 && !adapter->wol_support)
4210 		return (ENODEV);
4211 	else
4212 		hw->wol_enabled = new_wol_enabled;
4213 
4214 	return (0);
4215 } /* ixgbe_sysctl_wol_enable */
4216 
4217 /************************************************************************
4218  * ixgbe_sysctl_wufc - Wake Up Filter Control
4219  *
4220  *   Sysctl to enable/disable the types of packets that the
4221  *   adapter will wake up on upon receipt.
4222  *   Flags:
4223  *     0x1  - Link Status Change
4224  *     0x2  - Magic Packet
4225  *     0x4  - Direct Exact
4226  *     0x8  - Directed Multicast
4227  *     0x10 - Broadcast
4228  *     0x20 - ARP/IPv4 Request Packet
4229  *     0x40 - Direct IPv4 Packet
4230  *     0x80 - Direct IPv6 Packet
4231  *
4232  *   Settings not listed above will cause the sysctl to return an error.
4233  ************************************************************************/
4234 static int
4235 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4236 {
4237 	struct adapter *adapter = (struct adapter *)arg1;
4238 	int            error = 0;
4239 	u32            new_wufc;
4240 
4241 	new_wufc = adapter->wufc;
4242 
4243 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4244 	if ((error) || (req->newptr == NULL))
4245 		return (error);
4246 	if (new_wufc == adapter->wufc)
4247 		return (0);
4248 
4249 	if (new_wufc & 0xffffff00)
4250 		return (EINVAL);
4251 
4252 	new_wufc &= 0xff;
4253 	new_wufc |= (0xffffff & adapter->wufc);
4254 	adapter->wufc = new_wufc;
4255 
4256 	return (0);
4257 } /* ixgbe_sysctl_wufc */
4258 
4259 #ifdef IXGBE_DEBUG
4260 /************************************************************************
4261  * ixgbe_sysctl_print_rss_config
4262  ************************************************************************/
4263 static int
4264 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4265 {
4266 	struct adapter  *adapter = (struct adapter *)arg1;
4267 	struct ixgbe_hw *hw = &adapter->hw;
4268 	device_t        dev = adapter->dev;
4269 	struct sbuf     *buf;
4270 	int             error = 0, reta_size;
4271 	u32             reg;
4272 
4273 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4274 	if (!buf) {
4275 		device_printf(dev, "Could not allocate sbuf for output.\n");
4276 		return (ENOMEM);
4277 	}
4278 
4279 	// TODO: use sbufs to make a string to print out
4280 	/* Set multiplier for RETA setup and table size based on MAC */
4281 	switch (adapter->hw.mac.type) {
4282 	case ixgbe_mac_X550:
4283 	case ixgbe_mac_X550EM_x:
4284 	case ixgbe_mac_X550EM_a:
4285 		reta_size = 128;
4286 		break;
4287 	default:
4288 		reta_size = 32;
4289 		break;
4290 	}
4291 
4292 	/* Print out the redirection table */
4293 	sbuf_cat(buf, "\n");
4294 	for (int i = 0; i < reta_size; i++) {
4295 		if (i < 32) {
4296 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4297 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4298 		} else {
4299 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4300 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4301 		}
4302 	}
4303 
4304 	// TODO: print more config
4305 
4306 	error = sbuf_finish(buf);
4307 	if (error)
4308 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4309 
4310 	sbuf_delete(buf);
4311 
4312 	return (0);
4313 } /* ixgbe_sysctl_print_rss_config */
4314 #endif /* IXGBE_DEBUG */
4315 
4316 /************************************************************************
4317  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4318  *
4319  *   For X552/X557-AT devices using an external PHY
4320  ************************************************************************/
4321 static int
4322 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4323 {
4324 	struct adapter  *adapter = (struct adapter *)arg1;
4325 	struct ixgbe_hw *hw = &adapter->hw;
4326 	u16             reg;
4327 
4328 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4329 		device_printf(iflib_get_dev(adapter->ctx),
4330 		    "Device has no supported external thermal sensor.\n");
4331 		return (ENODEV);
4332 	}
4333 
4334 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4335 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4336 		device_printf(iflib_get_dev(adapter->ctx),
4337 		    "Error reading from PHY's current temperature register\n");
4338 		return (EAGAIN);
4339 	}
4340 
4341 	/* Shift temp for output */
4342 	reg = reg >> 8;
4343 
4344 	return (sysctl_handle_16(oidp, NULL, reg, req));
4345 } /* ixgbe_sysctl_phy_temp */
4346 
4347 /************************************************************************
4348  * ixgbe_sysctl_phy_overtemp_occurred
4349  *
4350  *   Reports (directly from the PHY) whether the current PHY
4351  *   temperature is over the overtemp threshold.
4352  ************************************************************************/
4353 static int
4354 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4355 {
4356 	struct adapter  *adapter = (struct adapter *)arg1;
4357 	struct ixgbe_hw *hw = &adapter->hw;
4358 	u16             reg;
4359 
4360 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4361 		device_printf(iflib_get_dev(adapter->ctx),
4362 		    "Device has no supported external thermal sensor.\n");
4363 		return (ENODEV);
4364 	}
4365 
4366 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4367 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4368 		device_printf(iflib_get_dev(adapter->ctx),
4369 		    "Error reading from PHY's temperature status register\n");
4370 		return (EAGAIN);
4371 	}
4372 
4373 	/* Get occurrence bit */
4374 	reg = !!(reg & 0x4000);
4375 
4376 	return (sysctl_handle_16(oidp, 0, reg, req));
4377 } /* ixgbe_sysctl_phy_overtemp_occurred */
4378 
4379 /************************************************************************
4380  * ixgbe_sysctl_eee_state
4381  *
4382  *   Sysctl to set EEE power saving feature
4383  *   Values:
4384  *     0      - disable EEE
4385  *     1      - enable EEE
4386  *     (none) - get current device EEE state
4387  ************************************************************************/
4388 static int
4389 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4390 {
4391 	struct adapter *adapter = (struct adapter *)arg1;
4392 	device_t       dev = adapter->dev;
4393 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4394 	int            curr_eee, new_eee, error = 0;
4395 	s32            retval;
4396 
4397 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4398 
4399 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4400 	if ((error) || (req->newptr == NULL))
4401 		return (error);
4402 
4403 	/* Nothing to do */
4404 	if (new_eee == curr_eee)
4405 		return (0);
4406 
4407 	/* Not supported */
4408 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4409 		return (EINVAL);
4410 
4411 	/* Bounds checking */
4412 	if ((new_eee < 0) || (new_eee > 1))
4413 		return (EINVAL);
4414 
4415 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4416 	if (retval) {
4417 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4418 		return (EINVAL);
4419 	}
4420 
4421 	/* Restart auto-neg */
4422 	ifp->if_init(ifp);
4423 
4424 	device_printf(dev, "New EEE state: %d\n", new_eee);
4425 
4426 	/* Cache new value */
4427 	if (new_eee)
4428 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4429 	else
4430 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4431 
4432 	return (error);
4433 } /* ixgbe_sysctl_eee_state */
4434 
4435 /************************************************************************
4436  * ixgbe_init_device_features
4437  ************************************************************************/
4438 static void
4439 ixgbe_init_device_features(struct adapter *adapter)
4440 {
4441 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4442 	                  | IXGBE_FEATURE_RSS
4443 	                  | IXGBE_FEATURE_MSI
4444 	                  | IXGBE_FEATURE_MSIX
4445 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4446 
4447 	/* Set capabilities first... */
4448 	switch (adapter->hw.mac.type) {
4449 	case ixgbe_mac_82598EB:
4450 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4451 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4452 		break;
4453 	case ixgbe_mac_X540:
4454 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4455 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4456 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4457 		    (adapter->hw.bus.func == 0))
4458 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4459 		break;
4460 	case ixgbe_mac_X550:
4461 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4462 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4463 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4464 		break;
4465 	case ixgbe_mac_X550EM_x:
4466 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4467 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4468 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4469 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4470 		break;
4471 	case ixgbe_mac_X550EM_a:
4472 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4473 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4474 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4475 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4476 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4477 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4478 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4479 		}
4480 		break;
4481 	case ixgbe_mac_82599EB:
4482 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4483 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4484 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4485 		    (adapter->hw.bus.func == 0))
4486 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4487 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4488 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4489 		break;
4490 	default:
4491 		break;
4492 	}
4493 
4494 	/* Enabled by default... */
4495 	/* Fan failure detection */
4496 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4497 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4498 	/* Netmap */
4499 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4500 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4501 	/* EEE */
4502 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4503 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4504 	/* Thermal Sensor */
4505 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4506 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4507 
4508 	/* Enabled via global sysctl... */
4509 	/* Flow Director */
4510 	if (ixgbe_enable_fdir) {
4511 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4512 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4513 		else
4514 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4515 	}
4516 	/*
4517 	 * Message Signal Interrupts - Extended (MSI-X)
4518 	 * Normal MSI is only enabled if MSI-X calls fail.
4519 	 */
4520 	if (!ixgbe_enable_msix)
4521 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4522 	/* Receive-Side Scaling (RSS) */
4523 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4524 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4525 
4526 	/* Disable features with unmet dependencies... */
4527 	/* No MSI-X */
4528 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4529 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4530 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4531 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4532 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4533 	}
4534 } /* ixgbe_init_device_features */
4535 
4536 /************************************************************************
4537  * ixgbe_check_fan_failure
4538  ************************************************************************/
4539 static void
4540 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4541 {
4542 	u32 mask;
4543 
4544 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4545 	    IXGBE_ESDP_SDP1;
4546 
4547 	if (reg & mask)
4548 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4549 } /* ixgbe_check_fan_failure */
4550 
4551