xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision d0ba1baed3f6e4936a0c1b89c25f6c59168ef6de)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
124 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
125 static int  ixgbe_if_media_change(if_ctx_t ctx);
126 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
127 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
128 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
129 static void ixgbe_if_multi_set(if_ctx_t ctx);
130 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
131 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
132                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
133 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
134                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
135 static void ixgbe_if_queues_free(if_ctx_t ctx);
136 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
137 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
138 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
139 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
140 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
141 int ixgbe_intr(void *arg);
142 
143 /************************************************************************
144  * Function prototypes
145  ************************************************************************/
146 #if __FreeBSD_version >= 1100036
147 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
148 #endif
149 
150 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
151 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
153 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
154 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
155 
156 static void ixgbe_config_dmac(struct adapter *adapter);
157 static void ixgbe_configure_ivars(struct adapter *adapter);
158 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
159                            s8 type);
160 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
161 static bool ixgbe_sfp_probe(if_ctx_t ctx);
162 
163 static void ixgbe_free_pci_resources(if_ctx_t ctx);
164 
165 static int  ixgbe_msix_link(void *arg);
166 static int  ixgbe_msix_que(void *arg);
167 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
168 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
169 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
170 
171 static int  ixgbe_setup_interface(if_ctx_t ctx);
172 static void ixgbe_init_device_features(struct adapter *adapter);
173 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
174 static void ixgbe_add_media_types(if_ctx_t ctx);
175 static void ixgbe_update_stats_counters(struct adapter *adapter);
176 static void ixgbe_config_link(struct adapter *adapter);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static void ixgbe_check_wol_support(struct adapter *adapter);
179 static void ixgbe_enable_rx_drop(struct adapter *);
180 static void ixgbe_disable_rx_drop(struct adapter *);
181 
182 static void ixgbe_add_hw_stats(struct adapter *adapter);
183 static int  ixgbe_set_flowcntl(struct adapter *, int);
184 static int  ixgbe_set_advertise(struct adapter *, int);
185 static int  ixgbe_get_advertise(struct adapter *);
186 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
187 static void ixgbe_config_gpie(struct adapter *adapter);
188 static void ixgbe_config_delay_values(struct adapter *adapter);
189 
190 /* Sysctl handlers */
191 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
192 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
197 #ifdef IXGBE_DEBUG
198 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
199 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
200 #endif
201 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
202 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
208 
209 /* Deferred interrupt tasklets */
210 static void ixgbe_handle_msf(void *);
211 static void ixgbe_handle_mod(void *);
212 static void ixgbe_handle_phy(void *);
213 
214 /************************************************************************
215  *  FreeBSD Device Interface Entry Points
216  ************************************************************************/
217 static device_method_t ix_methods[] = {
218 	/* Device interface */
219 	DEVMETHOD(device_register, ixgbe_register),
220 	DEVMETHOD(device_probe, iflib_device_probe),
221 	DEVMETHOD(device_attach, iflib_device_attach),
222 	DEVMETHOD(device_detach, iflib_device_detach),
223 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
224 	DEVMETHOD(device_suspend, iflib_device_suspend),
225 	DEVMETHOD(device_resume, iflib_device_resume),
226 #ifdef PCI_IOV
227 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
228 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
229 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
230 #endif /* PCI_IOV */
231 	DEVMETHOD_END
232 };
233 
234 static driver_t ix_driver = {
235 	"ix", ix_methods, sizeof(struct adapter),
236 };
237 
238 devclass_t ix_devclass;
239 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
240 
241 MODULE_DEPEND(ix, pci, 1, 1, 1);
242 MODULE_DEPEND(ix, ether, 1, 1, 1);
243 MODULE_DEPEND(ix, iflib, 1, 1, 1);
244 
245 static device_method_t ixgbe_if_methods[] = {
246 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
247 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
248 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
249 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
250 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
251 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
252 	DEVMETHOD(ifdi_init, ixgbe_if_init),
253 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
254 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
255 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
256 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
257 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
259 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
260 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
261 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
262 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
263 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
264 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
265 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
266 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
267 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
268 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
269 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
270 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
271 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
272 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
273 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
274 #ifdef PCI_IOV
275 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
278 #endif /* PCI_IOV */
279 	DEVMETHOD_END
280 };
281 
282 /*
283  * TUNEABLE PARAMETERS:
284  */
285 
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
287 static driver_t ixgbe_if_driver = {
288   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
289 };
290 
291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
293     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
294 
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
299 
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
304 
305 /*
306  * Smart speed setting, default to on
307  * this only works as a compile option
308  * right now as its during attach, set
309  * this to 'ixgbe_smart_speed_off' to
310  * disable.
311  */
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
313 
314 /*
315  * MSI-X should be the default for best performance,
316  * but this allows it to be forced off for testing.
317  */
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320     "Enable MSI-X interrupts");
321 
322 /*
323  * Defining this on will allow the use
324  * of unsupported SFP+ modules, note that
325  * doing so you are on your own :)
326  */
327 static int allow_unsupported_sfp = FALSE;
328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
329     &allow_unsupported_sfp, 0,
330     "Allow unsupported SFP modules...use at your own risk");
331 
332 /*
333  * Not sure if Flow Director is fully baked,
334  * so we'll default to turning it off.
335  */
336 static int ixgbe_enable_fdir = 0;
337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
338     "Enable Flow Director");
339 
340 /* Receive-Side Scaling */
341 static int ixgbe_enable_rss = 1;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
343     "Enable Receive-Side Scaling (RSS)");
344 
345 #if 0
346 /* Keep running tab on them for sanity check */
347 static int ixgbe_total_ports;
348 #endif
349 
350 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
351 
352 /*
353  * For Flow Director: this is the number of TX packets we sample
354  * for the filter pool, this means every 20th packet will be probed.
355  *
356  * This feature can be disabled by setting this to 0.
357  */
358 static int atr_sample_rate = 20;
359 
360 extern struct if_txrx ixgbe_txrx;
361 
362 static struct if_shared_ctx ixgbe_sctx_init = {
363 	.isc_magic = IFLIB_MAGIC,
364 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
365 	.isc_tx_maxsize = IXGBE_TSO_SIZE,
366 
367 	.isc_tx_maxsegsize = PAGE_SIZE,
368 
369 	.isc_rx_maxsize = PAGE_SIZE*4,
370 	.isc_rx_nsegments = 1,
371 	.isc_rx_maxsegsize = PAGE_SIZE*4,
372 	.isc_nfl = 1,
373 	.isc_ntxqs = 1,
374 	.isc_nrxqs = 1,
375 
376 	.isc_admin_intrcnt = 1,
377 	.isc_vendor_info = ixgbe_vendor_info_array,
378 	.isc_driver_version = ixgbe_driver_version,
379 	.isc_driver = &ixgbe_if_driver,
380 
381 	.isc_nrxd_min = {MIN_RXD},
382 	.isc_ntxd_min = {MIN_TXD},
383 	.isc_nrxd_max = {MAX_RXD},
384 	.isc_ntxd_max = {MAX_TXD},
385 	.isc_nrxd_default = {DEFAULT_RXD},
386 	.isc_ntxd_default = {DEFAULT_TXD},
387 };
388 
389 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
390 
391 /************************************************************************
392  * ixgbe_if_tx_queues_alloc
393  ************************************************************************/
394 static int
395 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
396                          int ntxqs, int ntxqsets)
397 {
398 	struct adapter     *adapter = iflib_get_softc(ctx);
399 	if_softc_ctx_t     scctx = adapter->shared;
400 	struct ix_tx_queue *que;
401 	int                i, j, error;
402 
403 	MPASS(adapter->num_tx_queues > 0);
404 	MPASS(adapter->num_tx_queues == ntxqsets);
405 	MPASS(ntxqs == 1);
406 
407 	/* Allocate queue structure memory */
408 	adapter->tx_queues =
409 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
410 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
411 	if (!adapter->tx_queues) {
412 		device_printf(iflib_get_dev(ctx),
413 		    "Unable to allocate TX ring memory\n");
414 		return (ENOMEM);
415 	}
416 
417 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
418 		struct tx_ring *txr = &que->txr;
419 
420 		/* In case SR-IOV is enabled, align the index properly */
421 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
422 		    i);
423 
424 		txr->adapter = que->adapter = adapter;
425 		adapter->active_queues |= (u64)1 << txr->me;
426 
427 		/* Allocate report status array */
428 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
429 		if (txr->tx_rsq == NULL) {
430 			error = ENOMEM;
431 			goto fail;
432 		}
433 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
434 			txr->tx_rsq[j] = QIDX_INVALID;
435 		/* get the virtual and physical address of the hardware queues */
436 		txr->tail = IXGBE_TDT(txr->me);
437 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
438 		txr->tx_paddr = paddrs[i];
439 
440 		txr->bytes = 0;
441 		txr->total_packets = 0;
442 
443 		/* Set the rate at which we sample packets */
444 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
445 			txr->atr_sample = atr_sample_rate;
446 
447 	}
448 
449 	iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod,
450 	    "mod_task");
451 	iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf,
452 	    "msf_task");
453 	iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy,
454 	    "phy_task");
455 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
456 		iflib_config_gtask_init(ctx, &adapter->mbx_task,
457 		    ixgbe_handle_mbx, "mbx_task");
458 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
459 		iflib_config_gtask_init(ctx, &adapter->fdir_task,
460 		    ixgbe_reinit_fdir, "fdir_task");
461 
462 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
463 	    adapter->num_tx_queues);
464 
465 	return (0);
466 
467 fail:
468 	ixgbe_if_queues_free(ctx);
469 
470 	return (error);
471 } /* ixgbe_if_tx_queues_alloc */
472 
473 /************************************************************************
474  * ixgbe_if_rx_queues_alloc
475  ************************************************************************/
476 static int
477 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
478                          int nrxqs, int nrxqsets)
479 {
480 	struct adapter     *adapter = iflib_get_softc(ctx);
481 	struct ix_rx_queue *que;
482 	int                i;
483 
484 	MPASS(adapter->num_rx_queues > 0);
485 	MPASS(adapter->num_rx_queues == nrxqsets);
486 	MPASS(nrxqs == 1);
487 
488 	/* Allocate queue structure memory */
489 	adapter->rx_queues =
490 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
491 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
492 	if (!adapter->rx_queues) {
493 		device_printf(iflib_get_dev(ctx),
494 		    "Unable to allocate TX ring memory\n");
495 		return (ENOMEM);
496 	}
497 
498 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
499 		struct rx_ring *rxr = &que->rxr;
500 
501 		/* In case SR-IOV is enabled, align the index properly */
502 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
503 		    i);
504 
505 		rxr->adapter = que->adapter = adapter;
506 
507 		/* get the virtual and physical address of the hw queues */
508 		rxr->tail = IXGBE_RDT(rxr->me);
509 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
510 		rxr->rx_paddr = paddrs[i];
511 		rxr->bytes = 0;
512 		rxr->que = que;
513 	}
514 
515 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
516 	    adapter->num_rx_queues);
517 
518 	return (0);
519 } /* ixgbe_if_rx_queues_alloc */
520 
521 /************************************************************************
522  * ixgbe_if_queues_free
523  ************************************************************************/
524 static void
525 ixgbe_if_queues_free(if_ctx_t ctx)
526 {
527 	struct adapter     *adapter = iflib_get_softc(ctx);
528 	struct ix_tx_queue *tx_que = adapter->tx_queues;
529 	struct ix_rx_queue *rx_que = adapter->rx_queues;
530 	int                i;
531 
532 	if (tx_que != NULL) {
533 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
534 			struct tx_ring *txr = &tx_que->txr;
535 			if (txr->tx_rsq == NULL)
536 				break;
537 
538 			free(txr->tx_rsq, M_IXGBE);
539 			txr->tx_rsq = NULL;
540 		}
541 
542 		free(adapter->tx_queues, M_IXGBE);
543 		adapter->tx_queues = NULL;
544 	}
545 	if (rx_que != NULL) {
546 		free(adapter->rx_queues, M_IXGBE);
547 		adapter->rx_queues = NULL;
548 	}
549 } /* ixgbe_if_queues_free */
550 
551 /************************************************************************
552  * ixgbe_initialize_rss_mapping
553  ************************************************************************/
554 static void
555 ixgbe_initialize_rss_mapping(struct adapter *adapter)
556 {
557 	struct ixgbe_hw *hw = &adapter->hw;
558 	u32             reta = 0, mrqc, rss_key[10];
559 	int             queue_id, table_size, index_mult;
560 	int             i, j;
561 	u32             rss_hash_config;
562 
563 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
564 		/* Fetch the configured RSS key */
565 		rss_getkey((uint8_t *)&rss_key);
566 	} else {
567 		/* set up random bits */
568 		arc4rand(&rss_key, sizeof(rss_key), 0);
569 	}
570 
571 	/* Set multiplier for RETA setup and table size based on MAC */
572 	index_mult = 0x1;
573 	table_size = 128;
574 	switch (adapter->hw.mac.type) {
575 	case ixgbe_mac_82598EB:
576 		index_mult = 0x11;
577 		break;
578 	case ixgbe_mac_X550:
579 	case ixgbe_mac_X550EM_x:
580 	case ixgbe_mac_X550EM_a:
581 		table_size = 512;
582 		break;
583 	default:
584 		break;
585 	}
586 
587 	/* Set up the redirection table */
588 	for (i = 0, j = 0; i < table_size; i++, j++) {
589 		if (j == adapter->num_rx_queues)
590 			j = 0;
591 
592 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
593 			/*
594 			 * Fetch the RSS bucket id for the given indirection
595 			 * entry. Cap it at the number of configured buckets
596 			 * (which is num_rx_queues.)
597 			 */
598 			queue_id = rss_get_indirection_to_bucket(i);
599 			queue_id = queue_id % adapter->num_rx_queues;
600 		} else
601 			queue_id = (j * index_mult);
602 
603 		/*
604 		 * The low 8 bits are for hash value (n+0);
605 		 * The next 8 bits are for hash value (n+1), etc.
606 		 */
607 		reta = reta >> 8;
608 		reta = reta | (((uint32_t)queue_id) << 24);
609 		if ((i & 3) == 3) {
610 			if (i < 128)
611 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
612 			else
613 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
614 				    reta);
615 			reta = 0;
616 		}
617 	}
618 
619 	/* Now fill our hash function seeds */
620 	for (i = 0; i < 10; i++)
621 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
622 
623 	/* Perform hash on these packet types */
624 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
625 		rss_hash_config = rss_gethashconfig();
626 	else {
627 		/*
628 		 * Disable UDP - IP fragments aren't currently being handled
629 		 * and so we end up with a mix of 2-tuple and 4-tuple
630 		 * traffic.
631 		 */
632 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
633 		                | RSS_HASHTYPE_RSS_TCP_IPV4
634 		                | RSS_HASHTYPE_RSS_IPV6
635 		                | RSS_HASHTYPE_RSS_TCP_IPV6
636 		                | RSS_HASHTYPE_RSS_IPV6_EX
637 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
638 	}
639 
640 	mrqc = IXGBE_MRQC_RSSEN;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
649 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
650 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
651 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
652 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
653 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
654 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
655 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
656 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
657 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
658 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
659 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
660 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
661 } /* ixgbe_initialize_rss_mapping */
662 
663 /************************************************************************
664  * ixgbe_initialize_receive_units - Setup receive registers and features.
665  ************************************************************************/
666 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
667 
668 static void
669 ixgbe_initialize_receive_units(if_ctx_t ctx)
670 {
671 	struct adapter     *adapter = iflib_get_softc(ctx);
672 	if_softc_ctx_t     scctx = adapter->shared;
673 	struct ixgbe_hw    *hw = &adapter->hw;
674 	struct ifnet       *ifp = iflib_get_ifp(ctx);
675 	struct ix_rx_queue *que;
676 	int                i, j;
677 	u32                bufsz, fctrl, srrctl, rxcsum;
678 	u32                hlreg;
679 
680 	/*
681 	 * Make sure receives are disabled while
682 	 * setting up the descriptor ring
683 	 */
684 	ixgbe_disable_rx(hw);
685 
686 	/* Enable broadcasts */
687 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
688 	fctrl |= IXGBE_FCTRL_BAM;
689 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
690 		fctrl |= IXGBE_FCTRL_DPF;
691 		fctrl |= IXGBE_FCTRL_PMCF;
692 	}
693 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
694 
695 	/* Set for Jumbo Frames? */
696 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
697 	if (ifp->if_mtu > ETHERMTU)
698 		hlreg |= IXGBE_HLREG0_JUMBOEN;
699 	else
700 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
701 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
702 
703 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
704 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
705 
706 	/* Setup the Base and Length of the Rx Descriptor Ring */
707 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
708 		struct rx_ring *rxr = &que->rxr;
709 		u64            rdba = rxr->rx_paddr;
710 
711 		j = rxr->me;
712 
713 		/* Setup the Base and Length of the Rx Descriptor Ring */
714 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
715 		    (rdba & 0x00000000ffffffffULL));
716 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
717 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
718 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
719 
720 		/* Set up the SRRCTL register */
721 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
722 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
723 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
724 		srrctl |= bufsz;
725 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
726 
727 		/*
728 		 * Set DROP_EN iff we have no flow control and >1 queue.
729 		 * Note that srrctl was cleared shortly before during reset,
730 		 * so we do not need to clear the bit, but do it just in case
731 		 * this code is moved elsewhere.
732 		 */
733 		if (adapter->num_rx_queues > 1 &&
734 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
735 			srrctl |= IXGBE_SRRCTL_DROP_EN;
736 		} else {
737 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
738 		}
739 
740 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
741 
742 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
743 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
744 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
745 
746 		/* Set the driver rx tail address */
747 		rxr->tail =  IXGBE_RDT(rxr->me);
748 	}
749 
750 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
751 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
752 		            | IXGBE_PSRTYPE_UDPHDR
753 		            | IXGBE_PSRTYPE_IPV4HDR
754 		            | IXGBE_PSRTYPE_IPV6HDR;
755 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
756 	}
757 
758 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
759 
760 	ixgbe_initialize_rss_mapping(adapter);
761 
762 	if (adapter->num_rx_queues > 1) {
763 		/* RSS and RX IPP Checksum are mutually exclusive */
764 		rxcsum |= IXGBE_RXCSUM_PCSD;
765 	}
766 
767 	if (ifp->if_capenable & IFCAP_RXCSUM)
768 		rxcsum |= IXGBE_RXCSUM_PCSD;
769 
770 	/* This is useful for calculating UDP/IP fragment checksums */
771 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
772 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
773 
774 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
775 
776 } /* ixgbe_initialize_receive_units */
777 
778 /************************************************************************
779  * ixgbe_initialize_transmit_units - Enable transmit units.
780  ************************************************************************/
781 static void
782 ixgbe_initialize_transmit_units(if_ctx_t ctx)
783 {
784 	struct adapter     *adapter = iflib_get_softc(ctx);
785 	struct ixgbe_hw    *hw = &adapter->hw;
786 	if_softc_ctx_t     scctx = adapter->shared;
787 	struct ix_tx_queue *que;
788 	int i;
789 
790 	/* Setup the Base and Length of the Tx Descriptor Ring */
791 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
792 	    i++, que++) {
793 		struct tx_ring	   *txr = &que->txr;
794 		u64 tdba = txr->tx_paddr;
795 		u32 txctrl = 0;
796 		int j = txr->me;
797 
798 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
799 		    (tdba & 0x00000000ffffffffULL));
800 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
801 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
802 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
803 
804 		/* Setup the HW Tx Head and Tail descriptor pointers */
805 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
806 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
807 
808 		/* Cache the tail address */
809 		txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
810 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
811 			txr->tx_rsq[k] = QIDX_INVALID;
812 
813 		/* Disable Head Writeback */
814 		/*
815 		 * Note: for X550 series devices, these registers are actually
816 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
817 		 * fields remain the same.
818 		 */
819 		switch (hw->mac.type) {
820 		case ixgbe_mac_82598EB:
821 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
822 			break;
823 		default:
824 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
825 			break;
826 		}
827 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
828 		switch (hw->mac.type) {
829 		case ixgbe_mac_82598EB:
830 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
831 			break;
832 		default:
833 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
834 			break;
835 		}
836 
837 	}
838 
839 	if (hw->mac.type != ixgbe_mac_82598EB) {
840 		u32 dmatxctl, rttdcs;
841 
842 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
843 		dmatxctl |= IXGBE_DMATXCTL_TE;
844 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
845 		/* Disable arbiter to set MTQC */
846 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
847 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
848 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
849 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
850 		    ixgbe_get_mtqc(adapter->iov_mode));
851 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
852 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
853 	}
854 
855 } /* ixgbe_initialize_transmit_units */
856 
857 /************************************************************************
858  * ixgbe_register
859  ************************************************************************/
860 static void *
861 ixgbe_register(device_t dev)
862 {
863 	return (ixgbe_sctx);
864 } /* ixgbe_register */
865 
866 /************************************************************************
867  * ixgbe_if_attach_pre - Device initialization routine, part 1
868  *
869  *   Called when the driver is being loaded.
870  *   Identifies the type of hardware, initializes the hardware,
871  *   and initializes iflib structures.
872  *
873  *   return 0 on success, positive on failure
874  ************************************************************************/
875 static int
876 ixgbe_if_attach_pre(if_ctx_t ctx)
877 {
878 	struct adapter  *adapter;
879 	device_t        dev;
880 	if_softc_ctx_t  scctx;
881 	struct ixgbe_hw *hw;
882 	int             error = 0;
883 	u32             ctrl_ext;
884 
885 	INIT_DEBUGOUT("ixgbe_attach: begin");
886 
887 	/* Allocate, clear, and link in our adapter structure */
888 	dev = iflib_get_dev(ctx);
889 	adapter = iflib_get_softc(ctx);
890 	adapter->hw.back = adapter;
891 	adapter->ctx = ctx;
892 	adapter->dev = dev;
893 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
894 	adapter->media = iflib_get_media(ctx);
895 	hw = &adapter->hw;
896 
897 	/* Determine hardware revision */
898 	hw->vendor_id = pci_get_vendor(dev);
899 	hw->device_id = pci_get_device(dev);
900 	hw->revision_id = pci_get_revid(dev);
901 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
902 	hw->subsystem_device_id = pci_get_subdevice(dev);
903 
904 	/* Do base PCI setup - map BAR0 */
905 	if (ixgbe_allocate_pci_resources(ctx)) {
906 		device_printf(dev, "Allocation of PCI resources failed\n");
907 		return (ENXIO);
908 	}
909 
910 	/* let hardware know driver is loaded */
911 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
912 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
913 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
914 
915 	/*
916 	 * Initialize the shared code
917 	 */
918 	if (ixgbe_init_shared_code(hw) != 0) {
919 		device_printf(dev, "Unable to initialize the shared code\n");
920 		error = ENXIO;
921 		goto err_pci;
922 	}
923 
924 	if (hw->mbx.ops.init_params)
925 		hw->mbx.ops.init_params(hw);
926 
927 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
928 
929 	if (hw->mac.type != ixgbe_mac_82598EB)
930 		hw->phy.smart_speed = ixgbe_smart_speed;
931 
932 	ixgbe_init_device_features(adapter);
933 
934 	/* Enable WoL (if supported) */
935 	ixgbe_check_wol_support(adapter);
936 
937 	/* Verify adapter fan is still functional (if applicable) */
938 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
939 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
940 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
941 	}
942 
943 	/* Ensure SW/FW semaphore is free */
944 	ixgbe_init_swfw_semaphore(hw);
945 
946 	/* Set an initial default flow control value */
947 	hw->fc.requested_mode = ixgbe_flow_control;
948 
949 	hw->phy.reset_if_overtemp = TRUE;
950 	error = ixgbe_reset_hw(hw);
951 	hw->phy.reset_if_overtemp = FALSE;
952 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
953 		/*
954 		 * No optics in this port, set up
955 		 * so the timer routine will probe
956 		 * for later insertion.
957 		 */
958 		adapter->sfp_probe = TRUE;
959 		error = 0;
960 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
961 		device_printf(dev, "Unsupported SFP+ module detected!\n");
962 		error = EIO;
963 		goto err_pci;
964 	} else if (error) {
965 		device_printf(dev, "Hardware initialization failed\n");
966 		error = EIO;
967 		goto err_pci;
968 	}
969 
970 	/* Make sure we have a good EEPROM before we read from it */
971 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
972 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
973 		error = EIO;
974 		goto err_pci;
975 	}
976 
977 	error = ixgbe_start_hw(hw);
978 	switch (error) {
979 	case IXGBE_ERR_EEPROM_VERSION:
980 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
981 		break;
982 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
983 		device_printf(dev, "Unsupported SFP+ Module\n");
984 		error = EIO;
985 		goto err_pci;
986 	case IXGBE_ERR_SFP_NOT_PRESENT:
987 		device_printf(dev, "No SFP+ Module found\n");
988 		/* falls thru */
989 	default:
990 		break;
991 	}
992 
993 	/* Most of the iflib initialization... */
994 
995 	iflib_set_mac(ctx, hw->mac.addr);
996 	switch (adapter->hw.mac.type) {
997 	case ixgbe_mac_X550:
998 	case ixgbe_mac_X550EM_x:
999 	case ixgbe_mac_X550EM_a:
1000 		scctx->isc_rss_table_size = 512;
1001 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1002 		break;
1003 	default:
1004 		scctx->isc_rss_table_size = 128;
1005 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1006 	}
1007 
1008 	/* Allow legacy interrupts */
1009 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1010 
1011 	scctx->isc_txqsizes[0] =
1012 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1013 	    sizeof(u32), DBA_ALIGN),
1014 	scctx->isc_rxqsizes[0] =
1015 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1016 	    DBA_ALIGN);
1017 
1018 	/* XXX */
1019 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1020 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1021 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1022 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1023 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1024 	} else {
1025 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1026 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1027 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1028 	}
1029 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1030 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1031 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1032 
1033 	scctx->isc_txrx = &ixgbe_txrx;
1034 
1035 	scctx->isc_capenable = IXGBE_CAPS;
1036 
1037 	return (0);
1038 
1039 err_pci:
1040 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1041 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1042 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1043 	ixgbe_free_pci_resources(ctx);
1044 
1045 	return (error);
1046 } /* ixgbe_if_attach_pre */
1047 
1048  /*********************************************************************
1049  * ixgbe_if_attach_post - Device initialization routine, part 2
1050  *
1051  *   Called during driver load, but after interrupts and
1052  *   resources have been allocated and configured.
1053  *   Sets up some data structures not relevant to iflib.
1054  *
1055  *   return 0 on success, positive on failure
1056  *********************************************************************/
1057 static int
1058 ixgbe_if_attach_post(if_ctx_t ctx)
1059 {
1060 	device_t dev;
1061 	struct adapter  *adapter;
1062 	struct ixgbe_hw *hw;
1063 	int             error = 0;
1064 
1065 	dev = iflib_get_dev(ctx);
1066 	adapter = iflib_get_softc(ctx);
1067 	hw = &adapter->hw;
1068 
1069 
1070 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1071 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1072 		device_printf(dev, "Device does not support legacy interrupts");
1073 		error = ENXIO;
1074 		goto err;
1075 	}
1076 
1077 	/* Allocate multicast array memory. */
1078 	adapter->mta = malloc(sizeof(*adapter->mta) *
1079 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1080 	if (adapter->mta == NULL) {
1081 		device_printf(dev, "Can not allocate multicast setup array\n");
1082 		error = ENOMEM;
1083 		goto err;
1084 	}
1085 
1086 	/* hw.ix defaults init */
1087 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1088 
1089 	/* Enable the optics for 82599 SFP+ fiber */
1090 	ixgbe_enable_tx_laser(hw);
1091 
1092 	/* Enable power to the phy. */
1093 	ixgbe_set_phy_power(hw, TRUE);
1094 
1095 	ixgbe_initialize_iov(adapter);
1096 
1097 	error = ixgbe_setup_interface(ctx);
1098 	if (error) {
1099 		device_printf(dev, "Interface setup failed: %d\n", error);
1100 		goto err;
1101 	}
1102 
1103 	ixgbe_if_update_admin_status(ctx);
1104 
1105 	/* Initialize statistics */
1106 	ixgbe_update_stats_counters(adapter);
1107 	ixgbe_add_hw_stats(adapter);
1108 
1109 	/* Check PCIE slot type/speed/width */
1110 	ixgbe_get_slot_info(adapter);
1111 
1112 	/*
1113 	 * Do time init and sysctl init here, but
1114 	 * only on the first port of a bypass adapter.
1115 	 */
1116 	ixgbe_bypass_init(adapter);
1117 
1118 	/* Set an initial dmac value */
1119 	adapter->dmac = 0;
1120 	/* Set initial advertised speeds (if applicable) */
1121 	adapter->advertise = ixgbe_get_advertise(adapter);
1122 
1123 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1124 		ixgbe_define_iov_schemas(dev, &error);
1125 
1126 	/* Add sysctls */
1127 	ixgbe_add_device_sysctls(ctx);
1128 
1129 	return (0);
1130 err:
1131 	return (error);
1132 } /* ixgbe_if_attach_post */
1133 
1134 /************************************************************************
1135  * ixgbe_check_wol_support
1136  *
1137  *   Checks whether the adapter's ports are capable of
1138  *   Wake On LAN by reading the adapter's NVM.
1139  *
1140  *   Sets each port's hw->wol_enabled value depending
1141  *   on the value read here.
1142  ************************************************************************/
1143 static void
1144 ixgbe_check_wol_support(struct adapter *adapter)
1145 {
1146 	struct ixgbe_hw *hw = &adapter->hw;
1147 	u16             dev_caps = 0;
1148 
1149 	/* Find out WoL support for port */
1150 	adapter->wol_support = hw->wol_enabled = 0;
1151 	ixgbe_get_device_caps(hw, &dev_caps);
1152 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1153 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1154 	     hw->bus.func == 0))
1155 		adapter->wol_support = hw->wol_enabled = 1;
1156 
1157 	/* Save initial wake up filter configuration */
1158 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1159 
1160 	return;
1161 } /* ixgbe_check_wol_support */
1162 
1163 /************************************************************************
1164  * ixgbe_setup_interface
1165  *
1166  *   Setup networking device structure and register an interface.
1167  ************************************************************************/
1168 static int
1169 ixgbe_setup_interface(if_ctx_t ctx)
1170 {
1171 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1172 	struct adapter *adapter = iflib_get_softc(ctx);
1173 
1174 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1175 
1176 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1177 	if_setbaudrate(ifp, IF_Gbps(10));
1178 
1179 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1180 
1181 	/*
1182 	 * Don't turn this on by default, if vlans are
1183 	 * created on another pseudo device (eg. lagg)
1184 	 * then vlan events are not passed thru, breaking
1185 	 * operation, but with HW FILTER off it works. If
1186 	 * using vlans directly on the ixgbe driver you can
1187 	 * enable this and get full hardware tag filtering.
1188 	 */
1189 	if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWFILTER);
1190 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1191 
1192 	ixgbe_add_media_types(ctx);
1193 
1194 	/* Autoselect media by default */
1195 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1196 
1197 	return (0);
1198 } /* ixgbe_setup_interface */
1199 
1200 /************************************************************************
1201  * ixgbe_if_get_counter
1202  ************************************************************************/
1203 static uint64_t
1204 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1205 {
1206 	struct adapter *adapter = iflib_get_softc(ctx);
1207 	if_t           ifp = iflib_get_ifp(ctx);
1208 
1209 	switch (cnt) {
1210 	case IFCOUNTER_IPACKETS:
1211 		return (adapter->ipackets);
1212 	case IFCOUNTER_OPACKETS:
1213 		return (adapter->opackets);
1214 	case IFCOUNTER_IBYTES:
1215 		return (adapter->ibytes);
1216 	case IFCOUNTER_OBYTES:
1217 		return (adapter->obytes);
1218 	case IFCOUNTER_IMCASTS:
1219 		return (adapter->imcasts);
1220 	case IFCOUNTER_OMCASTS:
1221 		return (adapter->omcasts);
1222 	case IFCOUNTER_COLLISIONS:
1223 		return (0);
1224 	case IFCOUNTER_IQDROPS:
1225 		return (adapter->iqdrops);
1226 	case IFCOUNTER_OQDROPS:
1227 		return (0);
1228 	case IFCOUNTER_IERRORS:
1229 		return (adapter->ierrors);
1230 	default:
1231 		return (if_get_counter_default(ifp, cnt));
1232 	}
1233 } /* ixgbe_if_get_counter */
1234 
1235 /************************************************************************
1236  * ixgbe_if_i2c_req
1237  ************************************************************************/
1238 static int
1239 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1240 {
1241 	struct adapter		*adapter = iflib_get_softc(ctx);
1242 	struct ixgbe_hw 	*hw = &adapter->hw;
1243 	int 			i;
1244 
1245 
1246 	if (hw->phy.ops.read_i2c_byte == NULL)
1247 		return (ENXIO);
1248 	for (i = 0; i < req->len; i++)
1249 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1250 		    req->dev_addr, &req->data[i]);
1251 	return (0);
1252 } /* ixgbe_if_i2c_req */
1253 
1254 /************************************************************************
1255  * ixgbe_add_media_types
1256  ************************************************************************/
1257 static void
1258 ixgbe_add_media_types(if_ctx_t ctx)
1259 {
1260 	struct adapter  *adapter = iflib_get_softc(ctx);
1261 	struct ixgbe_hw *hw = &adapter->hw;
1262 	device_t        dev = iflib_get_dev(ctx);
1263 	u64             layer;
1264 
1265 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1266 
1267 	/* Media types with matching FreeBSD media defines */
1268 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1269 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1270 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1271 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1272 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1273 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1274 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1275 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1276 
1277 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1278 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1279 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1280 		    NULL);
1281 
1282 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1283 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1284 		if (hw->phy.multispeed_fiber)
1285 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1286 			    NULL);
1287 	}
1288 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1289 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1290 		if (hw->phy.multispeed_fiber)
1291 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1292 			    NULL);
1293 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1294 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1295 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1296 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1297 
1298 #ifdef IFM_ETH_XTYPE
1299 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1300 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1301 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1302 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1303 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1304 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1305 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1306 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1307 #else
1308 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1309 		device_printf(dev, "Media supported: 10GbaseKR\n");
1310 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1311 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1312 	}
1313 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1314 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1315 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1316 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1317 	}
1318 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1319 		device_printf(dev, "Media supported: 1000baseKX\n");
1320 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1321 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1322 	}
1323 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1324 		device_printf(dev, "Media supported: 2500baseKX\n");
1325 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1326 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1327 	}
1328 #endif
1329 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1330 		device_printf(dev, "Media supported: 1000baseBX\n");
1331 
1332 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1333 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1334 		    0, NULL);
1335 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1336 	}
1337 
1338 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1339 } /* ixgbe_add_media_types */
1340 
1341 /************************************************************************
1342  * ixgbe_is_sfp
1343  ************************************************************************/
1344 static inline bool
1345 ixgbe_is_sfp(struct ixgbe_hw *hw)
1346 {
1347 	switch (hw->mac.type) {
1348 	case ixgbe_mac_82598EB:
1349 		if (hw->phy.type == ixgbe_phy_nl)
1350 			return (TRUE);
1351 		return (FALSE);
1352 	case ixgbe_mac_82599EB:
1353 		switch (hw->mac.ops.get_media_type(hw)) {
1354 		case ixgbe_media_type_fiber:
1355 		case ixgbe_media_type_fiber_qsfp:
1356 			return (TRUE);
1357 		default:
1358 			return (FALSE);
1359 		}
1360 	case ixgbe_mac_X550EM_x:
1361 	case ixgbe_mac_X550EM_a:
1362 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1363 			return (TRUE);
1364 		return (FALSE);
1365 	default:
1366 		return (FALSE);
1367 	}
1368 } /* ixgbe_is_sfp */
1369 
1370 /************************************************************************
1371  * ixgbe_config_link
1372  ************************************************************************/
1373 static void
1374 ixgbe_config_link(struct adapter *adapter)
1375 {
1376 	struct ixgbe_hw *hw = &adapter->hw;
1377 	u32             autoneg, err = 0;
1378 	bool            sfp, negotiate;
1379 
1380 	sfp = ixgbe_is_sfp(hw);
1381 
1382 	if (sfp) {
1383 		GROUPTASK_ENQUEUE(&adapter->mod_task);
1384 	} else {
1385 		if (hw->mac.ops.check_link)
1386 			err = ixgbe_check_link(hw, &adapter->link_speed,
1387 			    &adapter->link_up, FALSE);
1388 		if (err)
1389 			return;
1390 		autoneg = hw->phy.autoneg_advertised;
1391 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1392 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1393 			    &negotiate);
1394 		if (err)
1395 			return;
1396 		if (hw->mac.ops.setup_link)
1397 			err = hw->mac.ops.setup_link(hw, autoneg,
1398 			    adapter->link_up);
1399 	}
1400 
1401 } /* ixgbe_config_link */
1402 
1403 /************************************************************************
1404  * ixgbe_update_stats_counters - Update board statistics counters.
1405  ************************************************************************/
1406 static void
1407 ixgbe_update_stats_counters(struct adapter *adapter)
1408 {
1409 	struct ixgbe_hw       *hw = &adapter->hw;
1410 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1411 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1412 	u64                   total_missed_rx = 0;
1413 
1414 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1415 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1416 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1417 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1418 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1419 
1420 	for (int i = 0; i < 16; i++) {
1421 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1422 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1423 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1424 	}
1425 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1426 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1427 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1428 
1429 	/* Hardware workaround, gprc counts missed packets */
1430 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1431 	stats->gprc -= missed_rx;
1432 
1433 	if (hw->mac.type != ixgbe_mac_82598EB) {
1434 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1435 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1436 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1437 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1438 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1439 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1440 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1441 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1442 	} else {
1443 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1444 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1445 		/* 82598 only has a counter in the high register */
1446 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1447 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1448 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1449 	}
1450 
1451 	/*
1452 	 * Workaround: mprc hardware is incorrectly counting
1453 	 * broadcasts, so for now we subtract those.
1454 	 */
1455 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1456 	stats->bprc += bprc;
1457 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1458 	if (hw->mac.type == ixgbe_mac_82598EB)
1459 		stats->mprc -= bprc;
1460 
1461 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1462 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1463 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1464 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1465 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1466 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1467 
1468 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1469 	stats->lxontxc += lxon;
1470 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1471 	stats->lxofftxc += lxoff;
1472 	total = lxon + lxoff;
1473 
1474 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1475 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1476 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1477 	stats->gptc -= total;
1478 	stats->mptc -= total;
1479 	stats->ptc64 -= total;
1480 	stats->gotc -= total * ETHER_MIN_LEN;
1481 
1482 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1483 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1484 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1485 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1486 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1487 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1488 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1489 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1490 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1491 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1492 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1493 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1494 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1495 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1496 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1497 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1498 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1499 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1500 	/* Only read FCOE on 82599 */
1501 	if (hw->mac.type != ixgbe_mac_82598EB) {
1502 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1503 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1504 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1505 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1506 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1507 	}
1508 
1509 	/* Fill out the OS statistics structure */
1510 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1511 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1512 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1513 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1514 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1515 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1516 	IXGBE_SET_COLLISIONS(adapter, 0);
1517 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1518 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1519 } /* ixgbe_update_stats_counters */
1520 
1521 /************************************************************************
1522  * ixgbe_add_hw_stats
1523  *
1524  *   Add sysctl variables, one per statistic, to the system.
1525  ************************************************************************/
1526 static void
1527 ixgbe_add_hw_stats(struct adapter *adapter)
1528 {
1529 	device_t               dev = iflib_get_dev(adapter->ctx);
1530 	struct ix_rx_queue     *rx_que;
1531 	struct ix_tx_queue     *tx_que;
1532 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1533 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1534 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1535 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1536 	struct sysctl_oid      *stat_node, *queue_node;
1537 	struct sysctl_oid_list *stat_list, *queue_list;
1538 	int                    i;
1539 
1540 #define QUEUE_NAME_LEN 32
1541 	char                   namebuf[QUEUE_NAME_LEN];
1542 
1543 	/* Driver Statistics */
1544 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1545 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1546 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1547 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1548 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1549 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1550 
1551 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1552 		struct tx_ring *txr = &tx_que->txr;
1553 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1554 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1555 		    CTLFLAG_RD, NULL, "Queue Name");
1556 		queue_list = SYSCTL_CHILDREN(queue_node);
1557 
1558 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1559 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1560 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1561 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1562 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1563 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1564 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1565 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1566 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1567 		    CTLFLAG_RD, &txr->total_packets,
1568 		    "Queue Packets Transmitted");
1569 	}
1570 
1571 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1572 		struct rx_ring *rxr = &rx_que->rxr;
1573 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1574 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1575 		    CTLFLAG_RD, NULL, "Queue Name");
1576 		queue_list = SYSCTL_CHILDREN(queue_node);
1577 
1578 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1579 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1580 		    sizeof(&adapter->rx_queues[i]),
1581 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1582 		    "Interrupt Rate");
1583 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1584 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1585 		    "irqs on this queue");
1586 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1587 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1588 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1589 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1590 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1591 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1592 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1593 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1594 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1595 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1596 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1597 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1598 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1599 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1600 	}
1601 
1602 	/* MAC stats get their own sub node */
1603 
1604 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1605 	    CTLFLAG_RD, NULL, "MAC Statistics");
1606 	stat_list = SYSCTL_CHILDREN(stat_node);
1607 
1608 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1609 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1610 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1611 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1612 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1613 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1614 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1615 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1616 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1617 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1618 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1619 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1620 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1621 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1622 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1623 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1624 
1625 	/* Flow Control stats */
1626 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1627 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1628 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1629 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1630 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1631 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1632 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1633 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1634 
1635 	/* Packet Reception Stats */
1636 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1637 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1638 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1639 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1640 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1641 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1642 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1643 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1644 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1645 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1646 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1647 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1648 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1649 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1650 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1651 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1652 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1653 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1654 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1655 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1656 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1657 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1658 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1659 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1660 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1661 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1662 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1663 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1664 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1665 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1666 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1667 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1668 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1669 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1670 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1671 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1672 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1673 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1674 
1675 	/* Packet Transmission Stats */
1676 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1677 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1678 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1679 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1680 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1681 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1682 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1683 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1684 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1685 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1686 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1687 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1688 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1689 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1690 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1691 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1692 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1693 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1694 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1695 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1696 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1697 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1698 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1699 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1700 } /* ixgbe_add_hw_stats */
1701 
1702 /************************************************************************
1703  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1704  *
1705  *   Retrieves the TDH value from the hardware
1706  ************************************************************************/
1707 static int
1708 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1709 {
1710 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1711 	int            error;
1712 	unsigned int   val;
1713 
1714 	if (!txr)
1715 		return (0);
1716 
1717 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1718 	error = sysctl_handle_int(oidp, &val, 0, req);
1719 	if (error || !req->newptr)
1720 		return error;
1721 
1722 	return (0);
1723 } /* ixgbe_sysctl_tdh_handler */
1724 
1725 /************************************************************************
1726  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1727  *
1728  *   Retrieves the TDT value from the hardware
1729  ************************************************************************/
1730 static int
1731 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1732 {
1733 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1734 	int            error;
1735 	unsigned int   val;
1736 
1737 	if (!txr)
1738 		return (0);
1739 
1740 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1741 	error = sysctl_handle_int(oidp, &val, 0, req);
1742 	if (error || !req->newptr)
1743 		return error;
1744 
1745 	return (0);
1746 } /* ixgbe_sysctl_tdt_handler */
1747 
1748 /************************************************************************
1749  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1750  *
1751  *   Retrieves the RDH value from the hardware
1752  ************************************************************************/
1753 static int
1754 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1755 {
1756 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1757 	int            error;
1758 	unsigned int   val;
1759 
1760 	if (!rxr)
1761 		return (0);
1762 
1763 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1764 	error = sysctl_handle_int(oidp, &val, 0, req);
1765 	if (error || !req->newptr)
1766 		return error;
1767 
1768 	return (0);
1769 } /* ixgbe_sysctl_rdh_handler */
1770 
1771 /************************************************************************
1772  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1773  *
1774  *   Retrieves the RDT value from the hardware
1775  ************************************************************************/
1776 static int
1777 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1778 {
1779 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1780 	int            error;
1781 	unsigned int   val;
1782 
1783 	if (!rxr)
1784 		return (0);
1785 
1786 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1787 	error = sysctl_handle_int(oidp, &val, 0, req);
1788 	if (error || !req->newptr)
1789 		return error;
1790 
1791 	return (0);
1792 } /* ixgbe_sysctl_rdt_handler */
1793 
1794 /************************************************************************
1795  * ixgbe_if_vlan_register
1796  *
1797  *   Run via vlan config EVENT, it enables us to use the
1798  *   HW Filter table since we can get the vlan id. This
1799  *   just creates the entry in the soft version of the
1800  *   VFTA, init will repopulate the real table.
1801  ************************************************************************/
1802 static void
1803 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1804 {
1805 	struct adapter *adapter = iflib_get_softc(ctx);
1806 	u16            index, bit;
1807 
1808 	index = (vtag >> 5) & 0x7F;
1809 	bit = vtag & 0x1F;
1810 	adapter->shadow_vfta[index] |= (1 << bit);
1811 	++adapter->num_vlans;
1812 	ixgbe_setup_vlan_hw_support(ctx);
1813 } /* ixgbe_if_vlan_register */
1814 
1815 /************************************************************************
1816  * ixgbe_if_vlan_unregister
1817  *
1818  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1819  ************************************************************************/
1820 static void
1821 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1822 {
1823 	struct adapter *adapter = iflib_get_softc(ctx);
1824 	u16            index, bit;
1825 
1826 	index = (vtag >> 5) & 0x7F;
1827 	bit = vtag & 0x1F;
1828 	adapter->shadow_vfta[index] &= ~(1 << bit);
1829 	--adapter->num_vlans;
1830 	/* Re-init to load the changes */
1831 	ixgbe_setup_vlan_hw_support(ctx);
1832 } /* ixgbe_if_vlan_unregister */
1833 
1834 /************************************************************************
1835  * ixgbe_setup_vlan_hw_support
1836  ************************************************************************/
1837 static void
1838 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1839 {
1840 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1841 	struct adapter  *adapter = iflib_get_softc(ctx);
1842 	struct ixgbe_hw *hw = &adapter->hw;
1843 	struct rx_ring  *rxr;
1844 	int             i;
1845 	u32             ctrl;
1846 
1847 
1848 	/*
1849 	 * We get here thru init_locked, meaning
1850 	 * a soft reset, this has already cleared
1851 	 * the VFTA and other state, so if there
1852 	 * have been no vlan's registered do nothing.
1853 	 */
1854 	if (adapter->num_vlans == 0)
1855 		return;
1856 
1857 	/* Setup the queues for vlans */
1858 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1859 		for (i = 0; i < adapter->num_rx_queues; i++) {
1860 			rxr = &adapter->rx_queues[i].rxr;
1861 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1862 			if (hw->mac.type != ixgbe_mac_82598EB) {
1863 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1864 				ctrl |= IXGBE_RXDCTL_VME;
1865 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1866 			}
1867 			rxr->vtag_strip = TRUE;
1868 		}
1869 	}
1870 
1871 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1872 		return;
1873 	/*
1874 	 * A soft reset zero's out the VFTA, so
1875 	 * we need to repopulate it now.
1876 	 */
1877 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1878 		if (adapter->shadow_vfta[i] != 0)
1879 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1880 			    adapter->shadow_vfta[i]);
1881 
1882 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1883 	/* Enable the Filter Table if enabled */
1884 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1885 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1886 		ctrl |= IXGBE_VLNCTRL_VFE;
1887 	}
1888 	if (hw->mac.type == ixgbe_mac_82598EB)
1889 		ctrl |= IXGBE_VLNCTRL_VME;
1890 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1891 } /* ixgbe_setup_vlan_hw_support */
1892 
1893 /************************************************************************
1894  * ixgbe_get_slot_info
1895  *
1896  *   Get the width and transaction speed of
1897  *   the slot this adapter is plugged into.
1898  ************************************************************************/
1899 static void
1900 ixgbe_get_slot_info(struct adapter *adapter)
1901 {
1902 	device_t        dev = iflib_get_dev(adapter->ctx);
1903 	struct ixgbe_hw *hw = &adapter->hw;
1904 	int             bus_info_valid = TRUE;
1905 	u32             offset;
1906 	u16             link;
1907 
1908 	/* Some devices are behind an internal bridge */
1909 	switch (hw->device_id) {
1910 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1911 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1912 		goto get_parent_info;
1913 	default:
1914 		break;
1915 	}
1916 
1917 	ixgbe_get_bus_info(hw);
1918 
1919 	/*
1920 	 * Some devices don't use PCI-E, but there is no need
1921 	 * to display "Unknown" for bus speed and width.
1922 	 */
1923 	switch (hw->mac.type) {
1924 	case ixgbe_mac_X550EM_x:
1925 	case ixgbe_mac_X550EM_a:
1926 		return;
1927 	default:
1928 		goto display;
1929 	}
1930 
1931 get_parent_info:
1932 	/*
1933 	 * For the Quad port adapter we need to parse back
1934 	 * up the PCI tree to find the speed of the expansion
1935 	 * slot into which this adapter is plugged. A bit more work.
1936 	 */
1937 	dev = device_get_parent(device_get_parent(dev));
1938 #ifdef IXGBE_DEBUG
1939 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1940 	    pci_get_slot(dev), pci_get_function(dev));
1941 #endif
1942 	dev = device_get_parent(device_get_parent(dev));
1943 #ifdef IXGBE_DEBUG
1944 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1945 	    pci_get_slot(dev), pci_get_function(dev));
1946 #endif
1947 	/* Now get the PCI Express Capabilities offset */
1948 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1949 		/*
1950 		 * Hmm...can't get PCI-Express capabilities.
1951 		 * Falling back to default method.
1952 		 */
1953 		bus_info_valid = FALSE;
1954 		ixgbe_get_bus_info(hw);
1955 		goto display;
1956 	}
1957 	/* ...and read the Link Status Register */
1958 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1959 	ixgbe_set_pci_config_data_generic(hw, link);
1960 
1961 display:
1962 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1963 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1964 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1965 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1966 	     "Unknown"),
1967 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1968 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1969 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1970 	     "Unknown"));
1971 
1972 	if (bus_info_valid) {
1973 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1974 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1975 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1976 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1977 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1978 		}
1979 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1980 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1981 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1982 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1983 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1984 		}
1985 	} else
1986 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1987 
1988 	return;
1989 } /* ixgbe_get_slot_info */
1990 
1991 /************************************************************************
1992  * ixgbe_if_msix_intr_assign
1993  *
1994  *   Setup MSI-X Interrupt resources and handlers
1995  ************************************************************************/
1996 static int
1997 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1998 {
1999 	struct adapter     *adapter = iflib_get_softc(ctx);
2000 	struct ix_rx_queue *rx_que = adapter->rx_queues;
2001 	struct ix_tx_queue *tx_que;
2002 	int                error, rid, vector = 0;
2003 	int                cpu_id = 0;
2004 	char               buf[16];
2005 
2006 	/* Admin Que is vector 0*/
2007 	rid = vector + 1;
2008 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2009 		rid = vector + 1;
2010 
2011 		snprintf(buf, sizeof(buf), "rxq%d", i);
2012 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2013 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2014 
2015 		if (error) {
2016 			device_printf(iflib_get_dev(ctx),
2017 			    "Failed to allocate que int %d err: %d", i, error);
2018 			adapter->num_rx_queues = i + 1;
2019 			goto fail;
2020 		}
2021 
2022 		rx_que->msix = vector;
2023 		adapter->active_queues |= (u64)(1 << rx_que->msix);
2024 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2025 			/*
2026 			 * The queue ID is used as the RSS layer bucket ID.
2027 			 * We look up the queue ID -> RSS CPU ID and select
2028 			 * that.
2029 			 */
2030 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2031 		} else {
2032 			/*
2033 			 * Bind the msix vector, and thus the
2034 			 * rings to the corresponding cpu.
2035 			 *
2036 			 * This just happens to match the default RSS
2037 			 * round-robin bucket -> queue -> CPU allocation.
2038 			 */
2039 			if (adapter->num_rx_queues > 1)
2040 				cpu_id = i;
2041 		}
2042 
2043 	}
2044 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2045 		snprintf(buf, sizeof(buf), "txq%d", i);
2046 		tx_que = &adapter->tx_queues[i];
2047 		tx_que->msix = i % adapter->num_rx_queues;
2048 		iflib_softirq_alloc_generic(ctx,
2049 		    &adapter->rx_queues[tx_que->msix].que_irq,
2050 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2051 	}
2052 	rid = vector + 1;
2053 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2054 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2055 	if (error) {
2056 		device_printf(iflib_get_dev(ctx),
2057 		    "Failed to register admin handler");
2058 		return (error);
2059 	}
2060 
2061 	adapter->vector = vector;
2062 
2063 	return (0);
2064 fail:
2065 	iflib_irq_free(ctx, &adapter->irq);
2066 	rx_que = adapter->rx_queues;
2067 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2068 		iflib_irq_free(ctx, &rx_que->que_irq);
2069 
2070 	return (error);
2071 } /* ixgbe_if_msix_intr_assign */
2072 
2073 /*********************************************************************
2074  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2075  **********************************************************************/
2076 static int
2077 ixgbe_msix_que(void *arg)
2078 {
2079 	struct ix_rx_queue *que = arg;
2080 	struct adapter     *adapter = que->adapter;
2081 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2082 
2083 	/* Protect against spurious interrupts */
2084 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2085 		return 0;
2086 
2087 	ixgbe_disable_queue(adapter, que->msix);
2088 	++que->irqs;
2089 
2090 	return (FILTER_SCHEDULE_THREAD);
2091 } /* ixgbe_msix_que */
2092 
2093 /************************************************************************
2094  * ixgbe_media_status - Media Ioctl callback
2095  *
2096  *   Called whenever the user queries the status of
2097  *   the interface using ifconfig.
2098  ************************************************************************/
2099 static void
2100 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2101 {
2102 	struct adapter  *adapter = iflib_get_softc(ctx);
2103 	struct ixgbe_hw *hw = &adapter->hw;
2104 	int             layer;
2105 
2106 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2107 
2108 	iflib_admin_intr_deferred(ctx);
2109 
2110 	ifmr->ifm_status = IFM_AVALID;
2111 	ifmr->ifm_active = IFM_ETHER;
2112 
2113 	if (!adapter->link_active)
2114 		return;
2115 
2116 	ifmr->ifm_status |= IFM_ACTIVE;
2117 	layer = adapter->phy_layer;
2118 
2119 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2120 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2121 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2122 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2123 		switch (adapter->link_speed) {
2124 		case IXGBE_LINK_SPEED_10GB_FULL:
2125 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2126 			break;
2127 		case IXGBE_LINK_SPEED_1GB_FULL:
2128 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2129 			break;
2130 		case IXGBE_LINK_SPEED_100_FULL:
2131 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2132 			break;
2133 		case IXGBE_LINK_SPEED_10_FULL:
2134 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2135 			break;
2136 		}
2137 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2138 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2139 		switch (adapter->link_speed) {
2140 		case IXGBE_LINK_SPEED_10GB_FULL:
2141 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2142 			break;
2143 		}
2144 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2145 		switch (adapter->link_speed) {
2146 		case IXGBE_LINK_SPEED_10GB_FULL:
2147 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2148 			break;
2149 		case IXGBE_LINK_SPEED_1GB_FULL:
2150 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2151 			break;
2152 		}
2153 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2154 		switch (adapter->link_speed) {
2155 		case IXGBE_LINK_SPEED_10GB_FULL:
2156 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2157 			break;
2158 		case IXGBE_LINK_SPEED_1GB_FULL:
2159 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2160 			break;
2161 		}
2162 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2163 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2164 		switch (adapter->link_speed) {
2165 		case IXGBE_LINK_SPEED_10GB_FULL:
2166 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2167 			break;
2168 		case IXGBE_LINK_SPEED_1GB_FULL:
2169 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2170 			break;
2171 		}
2172 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2173 		switch (adapter->link_speed) {
2174 		case IXGBE_LINK_SPEED_10GB_FULL:
2175 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2176 			break;
2177 		}
2178 	/*
2179 	 * XXX: These need to use the proper media types once
2180 	 * they're added.
2181 	 */
2182 #ifndef IFM_ETH_XTYPE
2183 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2184 		switch (adapter->link_speed) {
2185 		case IXGBE_LINK_SPEED_10GB_FULL:
2186 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2187 			break;
2188 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2189 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2190 			break;
2191 		case IXGBE_LINK_SPEED_1GB_FULL:
2192 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2193 			break;
2194 		}
2195 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2196 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2197 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2198 		switch (adapter->link_speed) {
2199 		case IXGBE_LINK_SPEED_10GB_FULL:
2200 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2201 			break;
2202 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2203 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2204 			break;
2205 		case IXGBE_LINK_SPEED_1GB_FULL:
2206 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2207 			break;
2208 		}
2209 #else
2210 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2211 		switch (adapter->link_speed) {
2212 		case IXGBE_LINK_SPEED_10GB_FULL:
2213 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2214 			break;
2215 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2216 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2217 			break;
2218 		case IXGBE_LINK_SPEED_1GB_FULL:
2219 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2220 			break;
2221 		}
2222 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2223 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2224 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2225 		switch (adapter->link_speed) {
2226 		case IXGBE_LINK_SPEED_10GB_FULL:
2227 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2228 			break;
2229 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2230 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2231 			break;
2232 		case IXGBE_LINK_SPEED_1GB_FULL:
2233 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2234 			break;
2235 		}
2236 #endif
2237 
2238 	/* If nothing is recognized... */
2239 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2240 		ifmr->ifm_active |= IFM_UNKNOWN;
2241 
2242 	/* Display current flow control setting used on link */
2243 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2244 	    hw->fc.current_mode == ixgbe_fc_full)
2245 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2246 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2247 	    hw->fc.current_mode == ixgbe_fc_full)
2248 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2249 } /* ixgbe_media_status */
2250 
2251 /************************************************************************
2252  * ixgbe_media_change - Media Ioctl callback
2253  *
2254  *   Called when the user changes speed/duplex using
2255  *   media/mediopt option with ifconfig.
2256  ************************************************************************/
2257 static int
2258 ixgbe_if_media_change(if_ctx_t ctx)
2259 {
2260 	struct adapter   *adapter = iflib_get_softc(ctx);
2261 	struct ifmedia   *ifm = iflib_get_media(ctx);
2262 	struct ixgbe_hw  *hw = &adapter->hw;
2263 	ixgbe_link_speed speed = 0;
2264 
2265 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2266 
2267 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2268 		return (EINVAL);
2269 
2270 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2271 		return (EPERM);
2272 
2273 	/*
2274 	 * We don't actually need to check against the supported
2275 	 * media types of the adapter; ifmedia will take care of
2276 	 * that for us.
2277 	 */
2278 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2279 	case IFM_AUTO:
2280 	case IFM_10G_T:
2281 		speed |= IXGBE_LINK_SPEED_100_FULL;
2282 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2283 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2284 		break;
2285 	case IFM_10G_LRM:
2286 	case IFM_10G_LR:
2287 #ifndef IFM_ETH_XTYPE
2288 	case IFM_10G_SR: /* KR, too */
2289 	case IFM_10G_CX4: /* KX4 */
2290 #else
2291 	case IFM_10G_KR:
2292 	case IFM_10G_KX4:
2293 #endif
2294 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2295 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2296 		break;
2297 #ifndef IFM_ETH_XTYPE
2298 	case IFM_1000_CX: /* KX */
2299 #else
2300 	case IFM_1000_KX:
2301 #endif
2302 	case IFM_1000_LX:
2303 	case IFM_1000_SX:
2304 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2305 		break;
2306 	case IFM_1000_T:
2307 		speed |= IXGBE_LINK_SPEED_100_FULL;
2308 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2309 		break;
2310 	case IFM_10G_TWINAX:
2311 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2312 		break;
2313 	case IFM_100_TX:
2314 		speed |= IXGBE_LINK_SPEED_100_FULL;
2315 		break;
2316 	case IFM_10_T:
2317 		speed |= IXGBE_LINK_SPEED_10_FULL;
2318 		break;
2319 	default:
2320 		goto invalid;
2321 	}
2322 
2323 	hw->mac.autotry_restart = TRUE;
2324 	hw->mac.ops.setup_link(hw, speed, TRUE);
2325 	adapter->advertise =
2326 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2327 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2328 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2329 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2330 
2331 	return (0);
2332 
2333 invalid:
2334 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2335 
2336 	return (EINVAL);
2337 } /* ixgbe_if_media_change */
2338 
2339 /************************************************************************
2340  * ixgbe_set_promisc
2341  ************************************************************************/
2342 static int
2343 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2344 {
2345 	struct adapter *adapter = iflib_get_softc(ctx);
2346 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2347 	u32            rctl;
2348 	int            mcnt = 0;
2349 
2350 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2351 	rctl &= (~IXGBE_FCTRL_UPE);
2352 	if (ifp->if_flags & IFF_ALLMULTI)
2353 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2354 	else {
2355 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2356 	}
2357 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2358 		rctl &= (~IXGBE_FCTRL_MPE);
2359 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2360 
2361 	if (ifp->if_flags & IFF_PROMISC) {
2362 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2363 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2364 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2365 		rctl |= IXGBE_FCTRL_MPE;
2366 		rctl &= ~IXGBE_FCTRL_UPE;
2367 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2368 	}
2369 	return (0);
2370 } /* ixgbe_if_promisc_set */
2371 
2372 /************************************************************************
2373  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2374  ************************************************************************/
2375 static int
2376 ixgbe_msix_link(void *arg)
2377 {
2378 	struct adapter  *adapter = arg;
2379 	struct ixgbe_hw *hw = &adapter->hw;
2380 	u32             eicr, eicr_mask;
2381 	s32             retval;
2382 
2383 	++adapter->link_irq;
2384 
2385 	/* Pause other interrupts */
2386 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2387 
2388 	/* First get the cause */
2389 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2390 	/* Be sure the queue bits are not cleared */
2391 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2392 	/* Clear interrupt with write */
2393 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2394 
2395 	/* Link status change */
2396 	if (eicr & IXGBE_EICR_LSC) {
2397 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2398 		iflib_admin_intr_deferred(adapter->ctx);
2399 	}
2400 
2401 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2402 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2403 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2404 			/* This is probably overkill :) */
2405 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2406 				return (FILTER_HANDLED);
2407 			/* Disable the interrupt */
2408 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2409 			GROUPTASK_ENQUEUE(&adapter->fdir_task);
2410 		} else
2411 			if (eicr & IXGBE_EICR_ECC) {
2412 				device_printf(iflib_get_dev(adapter->ctx),
2413 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2414 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2415 			}
2416 
2417 		/* Check for over temp condition */
2418 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2419 			switch (adapter->hw.mac.type) {
2420 			case ixgbe_mac_X550EM_a:
2421 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2422 					break;
2423 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2424 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2425 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2426 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2427 				retval = hw->phy.ops.check_overtemp(hw);
2428 				if (retval != IXGBE_ERR_OVERTEMP)
2429 					break;
2430 				device_printf(iflib_get_dev(adapter->ctx),
2431 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2432 				device_printf(iflib_get_dev(adapter->ctx),
2433 				    "System shutdown required!\n");
2434 				break;
2435 			default:
2436 				if (!(eicr & IXGBE_EICR_TS))
2437 					break;
2438 				retval = hw->phy.ops.check_overtemp(hw);
2439 				if (retval != IXGBE_ERR_OVERTEMP)
2440 					break;
2441 				device_printf(iflib_get_dev(adapter->ctx),
2442 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2443 				device_printf(iflib_get_dev(adapter->ctx),
2444 				    "System shutdown required!\n");
2445 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2446 				break;
2447 			}
2448 		}
2449 
2450 		/* Check for VF message */
2451 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2452 		    (eicr & IXGBE_EICR_MAILBOX))
2453 			GROUPTASK_ENQUEUE(&adapter->mbx_task);
2454 	}
2455 
2456 	if (ixgbe_is_sfp(hw)) {
2457 		/* Pluggable optics-related interrupt */
2458 		if (hw->mac.type >= ixgbe_mac_X540)
2459 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2460 		else
2461 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2462 
2463 		if (eicr & eicr_mask) {
2464 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2465 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2466 				GROUPTASK_ENQUEUE(&adapter->mod_task);
2467 		}
2468 
2469 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2470 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2471 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2472 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2473 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2474 				GROUPTASK_ENQUEUE(&adapter->msf_task);
2475 		}
2476 	}
2477 
2478 	/* Check for fan failure */
2479 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2480 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2481 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2482 	}
2483 
2484 	/* External PHY interrupt */
2485 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2486 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2487 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2488 		GROUPTASK_ENQUEUE(&adapter->phy_task);
2489 	}
2490 
2491 	/* Re-enable other interrupts */
2492 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2493 
2494 	return (FILTER_HANDLED);
2495 } /* ixgbe_msix_link */
2496 
2497 /************************************************************************
2498  * ixgbe_sysctl_interrupt_rate_handler
2499  ************************************************************************/
2500 static int
2501 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2502 {
2503 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2504 	int                error;
2505 	unsigned int       reg, usec, rate;
2506 
2507 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2508 	usec = ((reg & 0x0FF8) >> 3);
2509 	if (usec > 0)
2510 		rate = 500000 / usec;
2511 	else
2512 		rate = 0;
2513 	error = sysctl_handle_int(oidp, &rate, 0, req);
2514 	if (error || !req->newptr)
2515 		return error;
2516 	reg &= ~0xfff; /* default, no limitation */
2517 	ixgbe_max_interrupt_rate = 0;
2518 	if (rate > 0 && rate < 500000) {
2519 		if (rate < 1000)
2520 			rate = 1000;
2521 		ixgbe_max_interrupt_rate = rate;
2522 		reg |= ((4000000/rate) & 0xff8);
2523 	}
2524 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2525 
2526 	return (0);
2527 } /* ixgbe_sysctl_interrupt_rate_handler */
2528 
2529 /************************************************************************
2530  * ixgbe_add_device_sysctls
2531  ************************************************************************/
2532 static void
2533 ixgbe_add_device_sysctls(if_ctx_t ctx)
2534 {
2535 	struct adapter         *adapter = iflib_get_softc(ctx);
2536 	device_t               dev = iflib_get_dev(ctx);
2537 	struct ixgbe_hw        *hw = &adapter->hw;
2538 	struct sysctl_oid_list *child;
2539 	struct sysctl_ctx_list *ctx_list;
2540 
2541 	ctx_list = device_get_sysctl_ctx(dev);
2542 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2543 
2544 	/* Sysctls for all devices */
2545 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2546 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2547 	    IXGBE_SYSCTL_DESC_SET_FC);
2548 
2549 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2550 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2551 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2552 
2553 #ifdef IXGBE_DEBUG
2554 	/* testing sysctls (for all devices) */
2555 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2556 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2557 	    "I", "PCI Power State");
2558 
2559 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2560 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2561 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2562 #endif
2563 	/* for X550 series devices */
2564 	if (hw->mac.type >= ixgbe_mac_X550)
2565 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2566 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2567 		    "I", "DMA Coalesce");
2568 
2569 	/* for WoL-capable devices */
2570 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2571 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2572 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2573 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2574 
2575 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2576 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2577 		    "I", "Enable/Disable Wake Up Filters");
2578 	}
2579 
2580 	/* for X552/X557-AT devices */
2581 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2582 		struct sysctl_oid *phy_node;
2583 		struct sysctl_oid_list *phy_list;
2584 
2585 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2586 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2587 		phy_list = SYSCTL_CHILDREN(phy_node);
2588 
2589 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2590 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2591 		    "I", "Current External PHY Temperature (Celsius)");
2592 
2593 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2594 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2595 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2596 		    "External PHY High Temperature Event Occurred");
2597 	}
2598 
2599 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2600 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2601 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2602 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2603 	}
2604 } /* ixgbe_add_device_sysctls */
2605 
2606 /************************************************************************
2607  * ixgbe_allocate_pci_resources
2608  ************************************************************************/
2609 static int
2610 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2611 {
2612 	struct adapter *adapter = iflib_get_softc(ctx);
2613 	device_t        dev = iflib_get_dev(ctx);
2614 	int             rid;
2615 
2616 	rid = PCIR_BAR(0);
2617 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2618 	    RF_ACTIVE);
2619 
2620 	if (!(adapter->pci_mem)) {
2621 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2622 		return (ENXIO);
2623 	}
2624 
2625 	/* Save bus_space values for READ/WRITE_REG macros */
2626 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2627 	adapter->osdep.mem_bus_space_handle =
2628 	    rman_get_bushandle(adapter->pci_mem);
2629 	/* Set hw values for shared code */
2630 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2631 
2632 	return (0);
2633 } /* ixgbe_allocate_pci_resources */
2634 
2635 /************************************************************************
2636  * ixgbe_detach - Device removal routine
2637  *
2638  *   Called when the driver is being removed.
2639  *   Stops the adapter and deallocates all the resources
2640  *   that were allocated for driver operation.
2641  *
2642  *   return 0 on success, positive on failure
2643  ************************************************************************/
2644 static int
2645 ixgbe_if_detach(if_ctx_t ctx)
2646 {
2647 	struct adapter *adapter = iflib_get_softc(ctx);
2648 	device_t       dev = iflib_get_dev(ctx);
2649 	u32            ctrl_ext;
2650 
2651 	INIT_DEBUGOUT("ixgbe_detach: begin");
2652 
2653 	if (ixgbe_pci_iov_detach(dev) != 0) {
2654 		device_printf(dev, "SR-IOV in use; detach first.\n");
2655 		return (EBUSY);
2656 	}
2657 
2658 	iflib_config_gtask_deinit(&adapter->mod_task);
2659 	iflib_config_gtask_deinit(&adapter->msf_task);
2660 	iflib_config_gtask_deinit(&adapter->phy_task);
2661 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2662 		iflib_config_gtask_deinit(&adapter->mbx_task);
2663 
2664 	ixgbe_setup_low_power_mode(ctx);
2665 
2666 	/* let hardware know driver is unloading */
2667 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2668 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2669 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2670 
2671 	ixgbe_free_pci_resources(ctx);
2672 	free(adapter->mta, M_IXGBE);
2673 
2674 	return (0);
2675 } /* ixgbe_if_detach */
2676 
2677 /************************************************************************
2678  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2679  *
2680  *   Prepare the adapter/port for LPLU and/or WoL
2681  ************************************************************************/
2682 static int
2683 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2684 {
2685 	struct adapter  *adapter = iflib_get_softc(ctx);
2686 	struct ixgbe_hw *hw = &adapter->hw;
2687 	device_t        dev = iflib_get_dev(ctx);
2688 	s32             error = 0;
2689 
2690 	if (!hw->wol_enabled)
2691 		ixgbe_set_phy_power(hw, FALSE);
2692 
2693 	/* Limit power management flow to X550EM baseT */
2694 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2695 	    hw->phy.ops.enter_lplu) {
2696 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2697 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2698 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2699 
2700 		/*
2701 		 * Clear Wake Up Status register to prevent any previous wakeup
2702 		 * events from waking us up immediately after we suspend.
2703 		 */
2704 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2705 
2706 		/*
2707 		 * Program the Wakeup Filter Control register with user filter
2708 		 * settings
2709 		 */
2710 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2711 
2712 		/* Enable wakeups and power management in Wakeup Control */
2713 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2714 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2715 
2716 		/* X550EM baseT adapters need a special LPLU flow */
2717 		hw->phy.reset_disable = TRUE;
2718 		ixgbe_if_stop(ctx);
2719 		error = hw->phy.ops.enter_lplu(hw);
2720 		if (error)
2721 			device_printf(dev, "Error entering LPLU: %d\n", error);
2722 		hw->phy.reset_disable = FALSE;
2723 	} else {
2724 		/* Just stop for other adapters */
2725 		ixgbe_if_stop(ctx);
2726 	}
2727 
2728 	return error;
2729 } /* ixgbe_setup_low_power_mode */
2730 
2731 /************************************************************************
2732  * ixgbe_shutdown - Shutdown entry point
2733  ************************************************************************/
2734 static int
2735 ixgbe_if_shutdown(if_ctx_t ctx)
2736 {
2737 	int error = 0;
2738 
2739 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2740 
2741 	error = ixgbe_setup_low_power_mode(ctx);
2742 
2743 	return (error);
2744 } /* ixgbe_if_shutdown */
2745 
2746 /************************************************************************
2747  * ixgbe_suspend
2748  *
2749  *   From D0 to D3
2750  ************************************************************************/
2751 static int
2752 ixgbe_if_suspend(if_ctx_t ctx)
2753 {
2754 	int error = 0;
2755 
2756 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2757 
2758 	error = ixgbe_setup_low_power_mode(ctx);
2759 
2760 	return (error);
2761 } /* ixgbe_if_suspend */
2762 
2763 /************************************************************************
2764  * ixgbe_resume
2765  *
2766  *   From D3 to D0
2767  ************************************************************************/
2768 static int
2769 ixgbe_if_resume(if_ctx_t ctx)
2770 {
2771 	struct adapter  *adapter = iflib_get_softc(ctx);
2772 	device_t        dev = iflib_get_dev(ctx);
2773 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2774 	struct ixgbe_hw *hw = &adapter->hw;
2775 	u32             wus;
2776 
2777 	INIT_DEBUGOUT("ixgbe_resume: begin");
2778 
2779 	/* Read & clear WUS register */
2780 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2781 	if (wus)
2782 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2783 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2784 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2785 	/* And clear WUFC until next low-power transition */
2786 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2787 
2788 	/*
2789 	 * Required after D3->D0 transition;
2790 	 * will re-advertise all previous advertised speeds
2791 	 */
2792 	if (ifp->if_flags & IFF_UP)
2793 		ixgbe_if_init(ctx);
2794 
2795 	return (0);
2796 } /* ixgbe_if_resume */
2797 
2798 /************************************************************************
2799  * ixgbe_if_mtu_set - Ioctl mtu entry point
2800  *
2801  *   Return 0 on success, EINVAL on failure
2802  ************************************************************************/
2803 static int
2804 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2805 {
2806 	struct adapter *adapter = iflib_get_softc(ctx);
2807 	int error = 0;
2808 
2809 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2810 
2811 	if (mtu > IXGBE_MAX_MTU) {
2812 		error = EINVAL;
2813 	} else {
2814 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2815 	}
2816 
2817 	return error;
2818 } /* ixgbe_if_mtu_set */
2819 
2820 /************************************************************************
2821  * ixgbe_if_crcstrip_set
2822  ************************************************************************/
2823 static void
2824 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2825 {
2826 	struct adapter *sc = iflib_get_softc(ctx);
2827 	struct ixgbe_hw *hw = &sc->hw;
2828 	/* crc stripping is set in two places:
2829 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2830 	 * IXGBE_RDRXCTL (set by the original driver in
2831 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2832 	 *	We disable the setting when netmap is compiled in).
2833 	 * We update the values here, but also in ixgbe.c because
2834 	 * init_locked sometimes is called outside our control.
2835 	 */
2836 	uint32_t hl, rxc;
2837 
2838 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2839 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2840 #ifdef NETMAP
2841 	if (netmap_verbose)
2842 		D("%s read  HLREG 0x%x rxc 0x%x",
2843 			onoff ? "enter" : "exit", hl, rxc);
2844 #endif
2845 	/* hw requirements ... */
2846 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2847 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2848 	if (onoff && !crcstrip) {
2849 		/* keep the crc. Fast rx */
2850 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2851 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2852 	} else {
2853 		/* reset default mode */
2854 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2855 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2856 	}
2857 #ifdef NETMAP
2858 	if (netmap_verbose)
2859 		D("%s write HLREG 0x%x rxc 0x%x",
2860 			onoff ? "enter" : "exit", hl, rxc);
2861 #endif
2862 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2863 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2864 } /* ixgbe_if_crcstrip_set */
2865 
2866 /*********************************************************************
2867  * ixgbe_if_init - Init entry point
2868  *
2869  *   Used in two ways: It is used by the stack as an init
2870  *   entry point in network interface structure. It is also
2871  *   used by the driver as a hw/sw initialization routine to
2872  *   get to a consistent state.
2873  *
2874  *   Return 0 on success, positive on failure
2875  **********************************************************************/
2876 void
2877 ixgbe_if_init(if_ctx_t ctx)
2878 {
2879 	struct adapter     *adapter = iflib_get_softc(ctx);
2880 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2881 	device_t           dev = iflib_get_dev(ctx);
2882 	struct ixgbe_hw *hw = &adapter->hw;
2883 	struct ix_rx_queue *rx_que;
2884 	struct ix_tx_queue *tx_que;
2885 	u32             txdctl, mhadd;
2886 	u32             rxdctl, rxctrl;
2887 	u32             ctrl_ext;
2888 
2889 	int             i, j, err;
2890 
2891 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2892 
2893 	/* Queue indices may change with IOV mode */
2894 	ixgbe_align_all_queue_indices(adapter);
2895 
2896 	/* reprogram the RAR[0] in case user changed it. */
2897 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2898 
2899 	/* Get the latest mac address, User can use a LAA */
2900 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2901 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2902 	hw->addr_ctrl.rar_used_count = 1;
2903 
2904 	ixgbe_init_hw(hw);
2905 
2906 	ixgbe_initialize_iov(adapter);
2907 
2908 	ixgbe_initialize_transmit_units(ctx);
2909 
2910 	/* Setup Multicast table */
2911 	ixgbe_if_multi_set(ctx);
2912 
2913 	/* Determine the correct mbuf pool, based on frame size */
2914 	if (adapter->max_frame_size <= MCLBYTES)
2915 		adapter->rx_mbuf_sz = MCLBYTES;
2916 	else
2917 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2918 
2919 	/* Configure RX settings */
2920 	ixgbe_initialize_receive_units(ctx);
2921 
2922 	/* Enable SDP & MSI-X interrupts based on adapter */
2923 	ixgbe_config_gpie(adapter);
2924 
2925 	/* Set MTU size */
2926 	if (ifp->if_mtu > ETHERMTU) {
2927 		/* aka IXGBE_MAXFRS on 82599 and newer */
2928 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2929 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2930 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2931 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2932 	}
2933 
2934 	/* Now enable all the queues */
2935 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2936 		struct tx_ring *txr = &tx_que->txr;
2937 
2938 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2939 		txdctl |= IXGBE_TXDCTL_ENABLE;
2940 		/* Set WTHRESH to 8, burst writeback */
2941 		txdctl |= (8 << 16);
2942 		/*
2943 		 * When the internal queue falls below PTHRESH (32),
2944 		 * start prefetching as long as there are at least
2945 		 * HTHRESH (1) buffers ready. The values are taken
2946 		 * from the Intel linux driver 3.8.21.
2947 		 * Prefetching enables tx line rate even with 1 queue.
2948 		 */
2949 		txdctl |= (32 << 0) | (1 << 8);
2950 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2951 	}
2952 
2953 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2954 		struct rx_ring *rxr = &rx_que->rxr;
2955 
2956 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2957 		if (hw->mac.type == ixgbe_mac_82598EB) {
2958 			/*
2959 			 * PTHRESH = 21
2960 			 * HTHRESH = 4
2961 			 * WTHRESH = 8
2962 			 */
2963 			rxdctl &= ~0x3FFFFF;
2964 			rxdctl |= 0x080420;
2965 		}
2966 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2967 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2968 		for (j = 0; j < 10; j++) {
2969 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2970 			    IXGBE_RXDCTL_ENABLE)
2971 				break;
2972 			else
2973 				msec_delay(1);
2974 		}
2975 		wmb();
2976 	}
2977 
2978 	/* Enable Receive engine */
2979 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2980 	if (hw->mac.type == ixgbe_mac_82598EB)
2981 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2982 	rxctrl |= IXGBE_RXCTRL_RXEN;
2983 	ixgbe_enable_rx_dma(hw, rxctrl);
2984 
2985 	/* Set up MSI/MSI-X routing */
2986 	if (ixgbe_enable_msix)  {
2987 		ixgbe_configure_ivars(adapter);
2988 		/* Set up auto-mask */
2989 		if (hw->mac.type == ixgbe_mac_82598EB)
2990 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2991 		else {
2992 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2993 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2994 		}
2995 	} else {  /* Simple settings for Legacy/MSI */
2996 		ixgbe_set_ivar(adapter, 0, 0, 0);
2997 		ixgbe_set_ivar(adapter, 0, 0, 1);
2998 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2999 	}
3000 
3001 	ixgbe_init_fdir(adapter);
3002 
3003 	/*
3004 	 * Check on any SFP devices that
3005 	 * need to be kick-started
3006 	 */
3007 	if (hw->phy.type == ixgbe_phy_none) {
3008 		err = hw->phy.ops.identify(hw);
3009 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3010 			device_printf(dev,
3011 			    "Unsupported SFP+ module type was detected.\n");
3012 			return;
3013 		}
3014 	}
3015 
3016 	/* Set moderation on the Link interrupt */
3017 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3018 
3019 	/* Enable power to the phy. */
3020 	ixgbe_set_phy_power(hw, TRUE);
3021 
3022 	/* Config/Enable Link */
3023 	ixgbe_config_link(adapter);
3024 
3025 	/* Hardware Packet Buffer & Flow Control setup */
3026 	ixgbe_config_delay_values(adapter);
3027 
3028 	/* Initialize the FC settings */
3029 	ixgbe_start_hw(hw);
3030 
3031 	/* Set up VLAN support and filter */
3032 	ixgbe_setup_vlan_hw_support(ctx);
3033 
3034 	/* Setup DMA Coalescing */
3035 	ixgbe_config_dmac(adapter);
3036 
3037 	/* And now turn on interrupts */
3038 	ixgbe_if_enable_intr(ctx);
3039 
3040 	/* Enable the use of the MBX by the VF's */
3041 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3042 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3043 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3044 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3045 	}
3046 
3047 } /* ixgbe_init_locked */
3048 
3049 /************************************************************************
3050  * ixgbe_set_ivar
3051  *
3052  *   Setup the correct IVAR register for a particular MSI-X interrupt
3053  *     (yes this is all very magic and confusing :)
3054  *    - entry is the register array entry
3055  *    - vector is the MSI-X vector for this queue
3056  *    - type is RX/TX/MISC
3057  ************************************************************************/
3058 static void
3059 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3060 {
3061 	struct ixgbe_hw *hw = &adapter->hw;
3062 	u32 ivar, index;
3063 
3064 	vector |= IXGBE_IVAR_ALLOC_VAL;
3065 
3066 	switch (hw->mac.type) {
3067 	case ixgbe_mac_82598EB:
3068 		if (type == -1)
3069 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3070 		else
3071 			entry += (type * 64);
3072 		index = (entry >> 2) & 0x1F;
3073 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3074 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3075 		ivar |= (vector << (8 * (entry & 0x3)));
3076 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3077 		break;
3078 	case ixgbe_mac_82599EB:
3079 	case ixgbe_mac_X540:
3080 	case ixgbe_mac_X550:
3081 	case ixgbe_mac_X550EM_x:
3082 	case ixgbe_mac_X550EM_a:
3083 		if (type == -1) { /* MISC IVAR */
3084 			index = (entry & 1) * 8;
3085 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3086 			ivar &= ~(0xFF << index);
3087 			ivar |= (vector << index);
3088 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3089 		} else {          /* RX/TX IVARS */
3090 			index = (16 * (entry & 1)) + (8 * type);
3091 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3092 			ivar &= ~(0xFF << index);
3093 			ivar |= (vector << index);
3094 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3095 		}
3096 	default:
3097 		break;
3098 	}
3099 } /* ixgbe_set_ivar */
3100 
3101 /************************************************************************
3102  * ixgbe_configure_ivars
3103  ************************************************************************/
3104 static void
3105 ixgbe_configure_ivars(struct adapter *adapter)
3106 {
3107 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3108 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3109 	u32                newitr;
3110 
3111 	if (ixgbe_max_interrupt_rate > 0)
3112 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3113 	else {
3114 		/*
3115 		 * Disable DMA coalescing if interrupt moderation is
3116 		 * disabled.
3117 		 */
3118 		adapter->dmac = 0;
3119 		newitr = 0;
3120 	}
3121 
3122 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3123 		struct rx_ring *rxr = &rx_que->rxr;
3124 
3125 		/* First the RX queue entry */
3126 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3127 
3128 		/* Set an Initial EITR value */
3129 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3130 	}
3131 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3132 		struct tx_ring *txr = &tx_que->txr;
3133 
3134 		/* ... and the TX */
3135 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3136 	}
3137 	/* For the Link interrupt */
3138 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3139 } /* ixgbe_configure_ivars */
3140 
3141 /************************************************************************
3142  * ixgbe_config_gpie
3143  ************************************************************************/
3144 static void
3145 ixgbe_config_gpie(struct adapter *adapter)
3146 {
3147 	struct ixgbe_hw *hw = &adapter->hw;
3148 	u32             gpie;
3149 
3150 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3151 
3152 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3153 		/* Enable Enhanced MSI-X mode */
3154 		gpie |= IXGBE_GPIE_MSIX_MODE
3155 		     |  IXGBE_GPIE_EIAME
3156 		     |  IXGBE_GPIE_PBA_SUPPORT
3157 		     |  IXGBE_GPIE_OCD;
3158 	}
3159 
3160 	/* Fan Failure Interrupt */
3161 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3162 		gpie |= IXGBE_SDP1_GPIEN;
3163 
3164 	/* Thermal Sensor Interrupt */
3165 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3166 		gpie |= IXGBE_SDP0_GPIEN_X540;
3167 
3168 	/* Link detection */
3169 	switch (hw->mac.type) {
3170 	case ixgbe_mac_82599EB:
3171 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3172 		break;
3173 	case ixgbe_mac_X550EM_x:
3174 	case ixgbe_mac_X550EM_a:
3175 		gpie |= IXGBE_SDP0_GPIEN_X540;
3176 		break;
3177 	default:
3178 		break;
3179 	}
3180 
3181 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3182 
3183 } /* ixgbe_config_gpie */
3184 
3185 /************************************************************************
3186  * ixgbe_config_delay_values
3187  *
3188  *   Requires adapter->max_frame_size to be set.
3189  ************************************************************************/
3190 static void
3191 ixgbe_config_delay_values(struct adapter *adapter)
3192 {
3193 	struct ixgbe_hw *hw = &adapter->hw;
3194 	u32             rxpb, frame, size, tmp;
3195 
3196 	frame = adapter->max_frame_size;
3197 
3198 	/* Calculate High Water */
3199 	switch (hw->mac.type) {
3200 	case ixgbe_mac_X540:
3201 	case ixgbe_mac_X550:
3202 	case ixgbe_mac_X550EM_x:
3203 	case ixgbe_mac_X550EM_a:
3204 		tmp = IXGBE_DV_X540(frame, frame);
3205 		break;
3206 	default:
3207 		tmp = IXGBE_DV(frame, frame);
3208 		break;
3209 	}
3210 	size = IXGBE_BT2KB(tmp);
3211 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3212 	hw->fc.high_water[0] = rxpb - size;
3213 
3214 	/* Now calculate Low Water */
3215 	switch (hw->mac.type) {
3216 	case ixgbe_mac_X540:
3217 	case ixgbe_mac_X550:
3218 	case ixgbe_mac_X550EM_x:
3219 	case ixgbe_mac_X550EM_a:
3220 		tmp = IXGBE_LOW_DV_X540(frame);
3221 		break;
3222 	default:
3223 		tmp = IXGBE_LOW_DV(frame);
3224 		break;
3225 	}
3226 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3227 
3228 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3229 	hw->fc.send_xon = TRUE;
3230 } /* ixgbe_config_delay_values */
3231 
3232 /************************************************************************
3233  * ixgbe_set_multi - Multicast Update
3234  *
3235  *   Called whenever multicast address list is updated.
3236  ************************************************************************/
3237 static int
3238 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3239 {
3240 	struct adapter *adapter = arg;
3241 	struct ixgbe_mc_addr *mta = adapter->mta;
3242 
3243 	if (ifma->ifma_addr->sa_family != AF_LINK)
3244 		return (0);
3245 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3246 		return (0);
3247 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3248 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3249 	mta[count].vmdq = adapter->pool;
3250 
3251 	return (1);
3252 } /* ixgbe_mc_filter_apply */
3253 
3254 static void
3255 ixgbe_if_multi_set(if_ctx_t ctx)
3256 {
3257 	struct adapter       *adapter = iflib_get_softc(ctx);
3258 	struct ixgbe_mc_addr *mta;
3259 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3260 	u8                   *update_ptr;
3261 	int                  mcnt = 0;
3262 	u32                  fctrl;
3263 
3264 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3265 
3266 	mta = adapter->mta;
3267 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3268 
3269 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3270 
3271 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3272 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3273 	if (ifp->if_flags & IFF_PROMISC)
3274 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3275 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3276 	    ifp->if_flags & IFF_ALLMULTI) {
3277 		fctrl |= IXGBE_FCTRL_MPE;
3278 		fctrl &= ~IXGBE_FCTRL_UPE;
3279 	} else
3280 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3281 
3282 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3283 
3284 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3285 		update_ptr = (u8 *)mta;
3286 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3287 		    ixgbe_mc_array_itr, TRUE);
3288 	}
3289 
3290 } /* ixgbe_if_multi_set */
3291 
3292 /************************************************************************
3293  * ixgbe_mc_array_itr
3294  *
3295  *   An iterator function needed by the multicast shared code.
3296  *   It feeds the shared code routine the addresses in the
3297  *   array of ixgbe_set_multi() one by one.
3298  ************************************************************************/
3299 static u8 *
3300 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3301 {
3302 	struct ixgbe_mc_addr *mta;
3303 
3304 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3305 	*vmdq = mta->vmdq;
3306 
3307 	*update_ptr = (u8*)(mta + 1);
3308 
3309 	return (mta->addr);
3310 } /* ixgbe_mc_array_itr */
3311 
3312 /************************************************************************
3313  * ixgbe_local_timer - Timer routine
3314  *
3315  *   Checks for link status, updates statistics,
3316  *   and runs the watchdog check.
3317  ************************************************************************/
3318 static void
3319 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3320 {
3321 	struct adapter *adapter = iflib_get_softc(ctx);
3322 
3323 	if (qid != 0)
3324 		return;
3325 
3326 	/* Check for pluggable optics */
3327 	if (adapter->sfp_probe)
3328 		if (!ixgbe_sfp_probe(ctx))
3329 			return; /* Nothing to do */
3330 
3331 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3332 	    &adapter->link_up, 0);
3333 
3334 	/* Fire off the adminq task */
3335 	iflib_admin_intr_deferred(ctx);
3336 
3337 } /* ixgbe_if_timer */
3338 
3339 /************************************************************************
3340  * ixgbe_sfp_probe
3341  *
3342  *   Determine if a port had optics inserted.
3343  ************************************************************************/
3344 static bool
3345 ixgbe_sfp_probe(if_ctx_t ctx)
3346 {
3347 	struct adapter  *adapter = iflib_get_softc(ctx);
3348 	struct ixgbe_hw *hw = &adapter->hw;
3349 	device_t        dev = iflib_get_dev(ctx);
3350 	bool            result = FALSE;
3351 
3352 	if ((hw->phy.type == ixgbe_phy_nl) &&
3353 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3354 		s32 ret = hw->phy.ops.identify_sfp(hw);
3355 		if (ret)
3356 			goto out;
3357 		ret = hw->phy.ops.reset(hw);
3358 		adapter->sfp_probe = FALSE;
3359 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3360 			device_printf(dev, "Unsupported SFP+ module detected!");
3361 			device_printf(dev,
3362 			    "Reload driver with supported module.\n");
3363 			goto out;
3364 		} else
3365 			device_printf(dev, "SFP+ module detected!\n");
3366 		/* We now have supported optics */
3367 		result = TRUE;
3368 	}
3369 out:
3370 
3371 	return (result);
3372 } /* ixgbe_sfp_probe */
3373 
3374 /************************************************************************
3375  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3376  ************************************************************************/
3377 static void
3378 ixgbe_handle_mod(void *context)
3379 {
3380 	if_ctx_t        ctx = context;
3381 	struct adapter  *adapter = iflib_get_softc(ctx);
3382 	struct ixgbe_hw *hw = &adapter->hw;
3383 	device_t        dev = iflib_get_dev(ctx);
3384 	u32             err, cage_full = 0;
3385 
3386 	adapter->sfp_reinit = 1;
3387 	if (adapter->hw.need_crosstalk_fix) {
3388 		switch (hw->mac.type) {
3389 		case ixgbe_mac_82599EB:
3390 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3391 			    IXGBE_ESDP_SDP2;
3392 			break;
3393 		case ixgbe_mac_X550EM_x:
3394 		case ixgbe_mac_X550EM_a:
3395 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3396 			    IXGBE_ESDP_SDP0;
3397 			break;
3398 		default:
3399 			break;
3400 		}
3401 
3402 		if (!cage_full)
3403 			goto handle_mod_out;
3404 	}
3405 
3406 	err = hw->phy.ops.identify_sfp(hw);
3407 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3408 		device_printf(dev,
3409 		    "Unsupported SFP+ module type was detected.\n");
3410 		goto handle_mod_out;
3411 	}
3412 
3413 	if (hw->mac.type == ixgbe_mac_82598EB)
3414 		err = hw->phy.ops.reset(hw);
3415 	else
3416 		err = hw->mac.ops.setup_sfp(hw);
3417 
3418 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3419 		device_printf(dev,
3420 		    "Setup failure - unsupported SFP+ module type.\n");
3421 		goto handle_mod_out;
3422 	}
3423 	GROUPTASK_ENQUEUE(&adapter->msf_task);
3424 	return;
3425 
3426 handle_mod_out:
3427 	adapter->sfp_reinit = 0;
3428 } /* ixgbe_handle_mod */
3429 
3430 
3431 /************************************************************************
3432  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3433  ************************************************************************/
3434 static void
3435 ixgbe_handle_msf(void *context)
3436 {
3437 	if_ctx_t        ctx = context;
3438 	struct adapter  *adapter = iflib_get_softc(ctx);
3439 	struct ixgbe_hw *hw = &adapter->hw;
3440 	u32             autoneg;
3441 	bool            negotiate;
3442 
3443 	if (adapter->sfp_reinit != 1)
3444 		return;
3445 
3446 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3447 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3448 
3449 	autoneg = hw->phy.autoneg_advertised;
3450 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3451 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3452 	if (hw->mac.ops.setup_link)
3453 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3454 
3455 	/* Adjust media types shown in ifconfig */
3456 	ifmedia_removeall(adapter->media);
3457 	ixgbe_add_media_types(adapter->ctx);
3458 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3459 
3460 	adapter->sfp_reinit = 0;
3461 } /* ixgbe_handle_msf */
3462 
3463 /************************************************************************
3464  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3465  ************************************************************************/
3466 static void
3467 ixgbe_handle_phy(void *context)
3468 {
3469 	if_ctx_t        ctx = context;
3470 	struct adapter  *adapter = iflib_get_softc(ctx);
3471 	struct ixgbe_hw *hw = &adapter->hw;
3472 	int             error;
3473 
3474 	error = hw->phy.ops.handle_lasi(hw);
3475 	if (error == IXGBE_ERR_OVERTEMP)
3476 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3477 	else if (error)
3478 		device_printf(adapter->dev,
3479 		    "Error handling LASI interrupt: %d\n", error);
3480 } /* ixgbe_handle_phy */
3481 
3482 /************************************************************************
3483  * ixgbe_if_stop - Stop the hardware
3484  *
3485  *   Disables all traffic on the adapter by issuing a
3486  *   global reset on the MAC and deallocates TX/RX buffers.
3487  ************************************************************************/
3488 static void
3489 ixgbe_if_stop(if_ctx_t ctx)
3490 {
3491 	struct adapter  *adapter = iflib_get_softc(ctx);
3492 	struct ixgbe_hw *hw = &adapter->hw;
3493 
3494 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3495 
3496 	ixgbe_reset_hw(hw);
3497 	hw->adapter_stopped = FALSE;
3498 	ixgbe_stop_adapter(hw);
3499 	if (hw->mac.type == ixgbe_mac_82599EB)
3500 		ixgbe_stop_mac_link_on_d3_82599(hw);
3501 	/* Turn off the laser - noop with no optics */
3502 	ixgbe_disable_tx_laser(hw);
3503 
3504 	/* Update the stack */
3505 	adapter->link_up = FALSE;
3506 	ixgbe_if_update_admin_status(ctx);
3507 
3508 	/* reprogram the RAR[0] in case user changed it. */
3509 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3510 
3511 	return;
3512 } /* ixgbe_if_stop */
3513 
3514 /************************************************************************
3515  * ixgbe_update_link_status - Update OS on link state
3516  *
3517  * Note: Only updates the OS on the cached link state.
3518  *       The real check of the hardware only happens with
3519  *       a link interrupt.
3520  ************************************************************************/
3521 static void
3522 ixgbe_if_update_admin_status(if_ctx_t ctx)
3523 {
3524 	struct adapter *adapter = iflib_get_softc(ctx);
3525 	device_t       dev = iflib_get_dev(ctx);
3526 
3527 	if (adapter->link_up) {
3528 		if (adapter->link_active == FALSE) {
3529 			if (bootverbose)
3530 				device_printf(dev, "Link is up %d Gbps %s \n",
3531 				    ((adapter->link_speed == 128) ? 10 : 1),
3532 				    "Full Duplex");
3533 			adapter->link_active = TRUE;
3534 			/* Update any Flow Control changes */
3535 			ixgbe_fc_enable(&adapter->hw);
3536 			/* Update DMA coalescing config */
3537 			ixgbe_config_dmac(adapter);
3538 			/* should actually be negotiated value */
3539 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3540 
3541 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3542 				ixgbe_ping_all_vfs(adapter);
3543 		}
3544 	} else { /* Link down */
3545 		if (adapter->link_active == TRUE) {
3546 			if (bootverbose)
3547 				device_printf(dev, "Link is Down\n");
3548 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3549 			adapter->link_active = FALSE;
3550 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3551 				ixgbe_ping_all_vfs(adapter);
3552 		}
3553 	}
3554 
3555 	ixgbe_update_stats_counters(adapter);
3556 
3557 	/* Re-enable link interrupts */
3558        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3559 } /* ixgbe_if_update_admin_status */
3560 
3561 /************************************************************************
3562  * ixgbe_config_dmac - Configure DMA Coalescing
3563  ************************************************************************/
3564 static void
3565 ixgbe_config_dmac(struct adapter *adapter)
3566 {
3567 	struct ixgbe_hw          *hw = &adapter->hw;
3568 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3569 
3570 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3571 		return;
3572 
3573 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3574 	    dcfg->link_speed ^ adapter->link_speed) {
3575 		dcfg->watchdog_timer = adapter->dmac;
3576 		dcfg->fcoe_en = FALSE;
3577 		dcfg->link_speed = adapter->link_speed;
3578 		dcfg->num_tcs = 1;
3579 
3580 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3581 		    dcfg->watchdog_timer, dcfg->link_speed);
3582 
3583 		hw->mac.ops.dmac_config(hw);
3584 	}
3585 } /* ixgbe_config_dmac */
3586 
3587 /************************************************************************
3588  * ixgbe_if_enable_intr
3589  ************************************************************************/
3590 void
3591 ixgbe_if_enable_intr(if_ctx_t ctx)
3592 {
3593 	struct adapter     *adapter = iflib_get_softc(ctx);
3594 	struct ixgbe_hw    *hw = &adapter->hw;
3595 	struct ix_rx_queue *que = adapter->rx_queues;
3596 	u32                mask, fwsm;
3597 
3598 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3599 
3600 	switch (adapter->hw.mac.type) {
3601 	case ixgbe_mac_82599EB:
3602 		mask |= IXGBE_EIMS_ECC;
3603 		/* Temperature sensor on some adapters */
3604 		mask |= IXGBE_EIMS_GPI_SDP0;
3605 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3606 		mask |= IXGBE_EIMS_GPI_SDP1;
3607 		mask |= IXGBE_EIMS_GPI_SDP2;
3608 		break;
3609 	case ixgbe_mac_X540:
3610 		/* Detect if Thermal Sensor is enabled */
3611 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3612 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3613 			mask |= IXGBE_EIMS_TS;
3614 		mask |= IXGBE_EIMS_ECC;
3615 		break;
3616 	case ixgbe_mac_X550:
3617 		/* MAC thermal sensor is automatically enabled */
3618 		mask |= IXGBE_EIMS_TS;
3619 		mask |= IXGBE_EIMS_ECC;
3620 		break;
3621 	case ixgbe_mac_X550EM_x:
3622 	case ixgbe_mac_X550EM_a:
3623 		/* Some devices use SDP0 for important information */
3624 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3625 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3626 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3627 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3628 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3629 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3630 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3631 		mask |= IXGBE_EIMS_ECC;
3632 		break;
3633 	default:
3634 		break;
3635 	}
3636 
3637 	/* Enable Fan Failure detection */
3638 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3639 		mask |= IXGBE_EIMS_GPI_SDP1;
3640 	/* Enable SR-IOV */
3641 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3642 		mask |= IXGBE_EIMS_MAILBOX;
3643 	/* Enable Flow Director */
3644 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3645 		mask |= IXGBE_EIMS_FLOW_DIR;
3646 
3647 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3648 
3649 	/* With MSI-X we use auto clear */
3650 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3651 		mask = IXGBE_EIMS_ENABLE_MASK;
3652 		/* Don't autoclear Link */
3653 		mask &= ~IXGBE_EIMS_OTHER;
3654 		mask &= ~IXGBE_EIMS_LSC;
3655 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3656 			mask &= ~IXGBE_EIMS_MAILBOX;
3657 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3658 	}
3659 
3660 	/*
3661 	 * Now enable all queues, this is done separately to
3662 	 * allow for handling the extended (beyond 32) MSI-X
3663 	 * vectors that can be used by 82599
3664 	 */
3665 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3666 		ixgbe_enable_queue(adapter, que->msix);
3667 
3668 	IXGBE_WRITE_FLUSH(hw);
3669 
3670 } /* ixgbe_if_enable_intr */
3671 
3672 /************************************************************************
3673  * ixgbe_disable_intr
3674  ************************************************************************/
3675 static void
3676 ixgbe_if_disable_intr(if_ctx_t ctx)
3677 {
3678 	struct adapter *adapter = iflib_get_softc(ctx);
3679 
3680 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3681 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3682 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3683 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3684 	} else {
3685 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3686 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3687 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3688 	}
3689 	IXGBE_WRITE_FLUSH(&adapter->hw);
3690 
3691 } /* ixgbe_if_disable_intr */
3692 
3693 /************************************************************************
3694  * ixgbe_if_rx_queue_intr_enable
3695  ************************************************************************/
3696 static int
3697 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3698 {
3699 	struct adapter     *adapter = iflib_get_softc(ctx);
3700 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3701 
3702 	ixgbe_enable_queue(adapter, que->rxr.me);
3703 
3704 	return (0);
3705 } /* ixgbe_if_rx_queue_intr_enable */
3706 
3707 /************************************************************************
3708  * ixgbe_enable_queue
3709  ************************************************************************/
3710 static void
3711 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3712 {
3713 	struct ixgbe_hw *hw = &adapter->hw;
3714 	u64             queue = (u64)(1 << vector);
3715 	u32             mask;
3716 
3717 	if (hw->mac.type == ixgbe_mac_82598EB) {
3718 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3719 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3720 	} else {
3721 		mask = (queue & 0xFFFFFFFF);
3722 		if (mask)
3723 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3724 		mask = (queue >> 32);
3725 		if (mask)
3726 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3727 	}
3728 } /* ixgbe_enable_queue */
3729 
3730 /************************************************************************
3731  * ixgbe_disable_queue
3732  ************************************************************************/
3733 static void
3734 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3735 {
3736 	struct ixgbe_hw *hw = &adapter->hw;
3737 	u64             queue = (u64)(1 << vector);
3738 	u32             mask;
3739 
3740 	if (hw->mac.type == ixgbe_mac_82598EB) {
3741 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3742 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3743 	} else {
3744 		mask = (queue & 0xFFFFFFFF);
3745 		if (mask)
3746 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3747 		mask = (queue >> 32);
3748 		if (mask)
3749 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3750 	}
3751 } /* ixgbe_disable_queue */
3752 
3753 /************************************************************************
3754  * ixgbe_intr - Legacy Interrupt Service Routine
3755  ************************************************************************/
3756 int
3757 ixgbe_intr(void *arg)
3758 {
3759 	struct adapter     *adapter = arg;
3760 	struct ix_rx_queue *que = adapter->rx_queues;
3761 	struct ixgbe_hw    *hw = &adapter->hw;
3762 	if_ctx_t           ctx = adapter->ctx;
3763 	u32                eicr, eicr_mask;
3764 
3765 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3766 
3767 	++que->irqs;
3768 	if (eicr == 0) {
3769 		ixgbe_if_enable_intr(ctx);
3770 		return (FILTER_HANDLED);
3771 	}
3772 
3773 	/* Check for fan failure */
3774 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3775 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3776 		device_printf(adapter->dev,
3777 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3778 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3779 	}
3780 
3781 	/* Link status change */
3782 	if (eicr & IXGBE_EICR_LSC) {
3783 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3784 		iflib_admin_intr_deferred(ctx);
3785 	}
3786 
3787 	if (ixgbe_is_sfp(hw)) {
3788 		/* Pluggable optics-related interrupt */
3789 		if (hw->mac.type >= ixgbe_mac_X540)
3790 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3791 		else
3792 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3793 
3794 		if (eicr & eicr_mask) {
3795 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3796 			GROUPTASK_ENQUEUE(&adapter->mod_task);
3797 		}
3798 
3799 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3800 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3801 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3802 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3803 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
3804 				GROUPTASK_ENQUEUE(&adapter->msf_task);
3805 		}
3806 	}
3807 
3808 	/* External PHY interrupt */
3809 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3810 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3811 		GROUPTASK_ENQUEUE(&adapter->phy_task);
3812 
3813 	return (FILTER_SCHEDULE_THREAD);
3814 } /* ixgbe_intr */
3815 
3816 /************************************************************************
3817  * ixgbe_free_pci_resources
3818  ************************************************************************/
3819 static void
3820 ixgbe_free_pci_resources(if_ctx_t ctx)
3821 {
3822 	struct adapter *adapter = iflib_get_softc(ctx);
3823 	struct         ix_rx_queue *que = adapter->rx_queues;
3824 	device_t       dev = iflib_get_dev(ctx);
3825 
3826 	/* Release all msix queue resources */
3827 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3828 		iflib_irq_free(ctx, &adapter->irq);
3829 
3830 	if (que != NULL) {
3831 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3832 			iflib_irq_free(ctx, &que->que_irq);
3833 		}
3834 	}
3835 
3836 	/*
3837 	 * Free link/admin interrupt
3838 	 */
3839 	if (adapter->pci_mem != NULL)
3840 		bus_release_resource(dev, SYS_RES_MEMORY,
3841 		                     PCIR_BAR(0), adapter->pci_mem);
3842 
3843 } /* ixgbe_free_pci_resources */
3844 
3845 /************************************************************************
3846  * ixgbe_sysctl_flowcntl
3847  *
3848  *   SYSCTL wrapper around setting Flow Control
3849  ************************************************************************/
3850 static int
3851 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3852 {
3853 	struct adapter *adapter;
3854 	int            error, fc;
3855 
3856 	adapter = (struct adapter *)arg1;
3857 	fc = adapter->hw.fc.current_mode;
3858 
3859 	error = sysctl_handle_int(oidp, &fc, 0, req);
3860 	if ((error) || (req->newptr == NULL))
3861 		return (error);
3862 
3863 	/* Don't bother if it's not changed */
3864 	if (fc == adapter->hw.fc.current_mode)
3865 		return (0);
3866 
3867 	return ixgbe_set_flowcntl(adapter, fc);
3868 } /* ixgbe_sysctl_flowcntl */
3869 
3870 /************************************************************************
3871  * ixgbe_set_flowcntl - Set flow control
3872  *
3873  *   Flow control values:
3874  *     0 - off
3875  *     1 - rx pause
3876  *     2 - tx pause
3877  *     3 - full
3878  ************************************************************************/
3879 static int
3880 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3881 {
3882 	switch (fc) {
3883 	case ixgbe_fc_rx_pause:
3884 	case ixgbe_fc_tx_pause:
3885 	case ixgbe_fc_full:
3886 		adapter->hw.fc.requested_mode = fc;
3887 		if (adapter->num_rx_queues > 1)
3888 			ixgbe_disable_rx_drop(adapter);
3889 		break;
3890 	case ixgbe_fc_none:
3891 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3892 		if (adapter->num_rx_queues > 1)
3893 			ixgbe_enable_rx_drop(adapter);
3894 		break;
3895 	default:
3896 		return (EINVAL);
3897 	}
3898 
3899 	/* Don't autoneg if forcing a value */
3900 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3901 	ixgbe_fc_enable(&adapter->hw);
3902 
3903 	return (0);
3904 } /* ixgbe_set_flowcntl */
3905 
3906 /************************************************************************
3907  * ixgbe_enable_rx_drop
3908  *
3909  *   Enable the hardware to drop packets when the buffer is
3910  *   full. This is useful with multiqueue, so that no single
3911  *   queue being full stalls the entire RX engine. We only
3912  *   enable this when Multiqueue is enabled AND Flow Control
3913  *   is disabled.
3914  ************************************************************************/
3915 static void
3916 ixgbe_enable_rx_drop(struct adapter *adapter)
3917 {
3918 	struct ixgbe_hw *hw = &adapter->hw;
3919 	struct rx_ring  *rxr;
3920 	u32             srrctl;
3921 
3922 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3923 		rxr = &adapter->rx_queues[i].rxr;
3924 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3925 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3926 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3927 	}
3928 
3929 	/* enable drop for each vf */
3930 	for (int i = 0; i < adapter->num_vfs; i++) {
3931 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3932 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3933 		                IXGBE_QDE_ENABLE));
3934 	}
3935 } /* ixgbe_enable_rx_drop */
3936 
3937 /************************************************************************
3938  * ixgbe_disable_rx_drop
3939  ************************************************************************/
3940 static void
3941 ixgbe_disable_rx_drop(struct adapter *adapter)
3942 {
3943 	struct ixgbe_hw *hw = &adapter->hw;
3944 	struct rx_ring  *rxr;
3945 	u32             srrctl;
3946 
3947 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3948 		rxr = &adapter->rx_queues[i].rxr;
3949 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3950 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3951 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3952 	}
3953 
3954 	/* disable drop for each vf */
3955 	for (int i = 0; i < adapter->num_vfs; i++) {
3956 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3957 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3958 	}
3959 } /* ixgbe_disable_rx_drop */
3960 
3961 /************************************************************************
3962  * ixgbe_sysctl_advertise
3963  *
3964  *   SYSCTL wrapper around setting advertised speed
3965  ************************************************************************/
3966 static int
3967 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3968 {
3969 	struct adapter *adapter;
3970 	int            error, advertise;
3971 
3972 	adapter = (struct adapter *)arg1;
3973 	advertise = adapter->advertise;
3974 
3975 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3976 	if ((error) || (req->newptr == NULL))
3977 		return (error);
3978 
3979 	return ixgbe_set_advertise(adapter, advertise);
3980 } /* ixgbe_sysctl_advertise */
3981 
3982 /************************************************************************
3983  * ixgbe_set_advertise - Control advertised link speed
3984  *
3985  *   Flags:
3986  *     0x1 - advertise 100 Mb
3987  *     0x2 - advertise 1G
3988  *     0x4 - advertise 10G
3989  *     0x8 - advertise 10 Mb (yes, Mb)
3990  ************************************************************************/
3991 static int
3992 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3993 {
3994 	device_t         dev = iflib_get_dev(adapter->ctx);
3995 	struct ixgbe_hw  *hw;
3996 	ixgbe_link_speed speed = 0;
3997 	ixgbe_link_speed link_caps = 0;
3998 	s32              err = IXGBE_NOT_IMPLEMENTED;
3999 	bool             negotiate = FALSE;
4000 
4001 	/* Checks to validate new value */
4002 	if (adapter->advertise == advertise) /* no change */
4003 		return (0);
4004 
4005 	hw = &adapter->hw;
4006 
4007 	/* No speed changes for backplane media */
4008 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4009 		return (ENODEV);
4010 
4011 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4012 	      (hw->phy.multispeed_fiber))) {
4013 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4014 		return (EINVAL);
4015 	}
4016 
4017 	if (advertise < 0x1 || advertise > 0xF) {
4018 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4019 		return (EINVAL);
4020 	}
4021 
4022 	if (hw->mac.ops.get_link_capabilities) {
4023 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4024 		    &negotiate);
4025 		if (err != IXGBE_SUCCESS) {
4026 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4027 			return (ENODEV);
4028 		}
4029 	}
4030 
4031 	/* Set new value and report new advertised mode */
4032 	if (advertise & 0x1) {
4033 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4034 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4035 			return (EINVAL);
4036 		}
4037 		speed |= IXGBE_LINK_SPEED_100_FULL;
4038 	}
4039 	if (advertise & 0x2) {
4040 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4041 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4042 			return (EINVAL);
4043 		}
4044 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4045 	}
4046 	if (advertise & 0x4) {
4047 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4048 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4049 			return (EINVAL);
4050 		}
4051 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4052 	}
4053 	if (advertise & 0x8) {
4054 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4055 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4056 			return (EINVAL);
4057 		}
4058 		speed |= IXGBE_LINK_SPEED_10_FULL;
4059 	}
4060 
4061 	hw->mac.autotry_restart = TRUE;
4062 	hw->mac.ops.setup_link(hw, speed, TRUE);
4063 	adapter->advertise = advertise;
4064 
4065 	return (0);
4066 } /* ixgbe_set_advertise */
4067 
4068 /************************************************************************
4069  * ixgbe_get_advertise - Get current advertised speed settings
4070  *
4071  *   Formatted for sysctl usage.
4072  *   Flags:
4073  *     0x1 - advertise 100 Mb
4074  *     0x2 - advertise 1G
4075  *     0x4 - advertise 10G
4076  *     0x8 - advertise 10 Mb (yes, Mb)
4077  ************************************************************************/
4078 static int
4079 ixgbe_get_advertise(struct adapter *adapter)
4080 {
4081 	struct ixgbe_hw  *hw = &adapter->hw;
4082 	int              speed;
4083 	ixgbe_link_speed link_caps = 0;
4084 	s32              err;
4085 	bool             negotiate = FALSE;
4086 
4087 	/*
4088 	 * Advertised speed means nothing unless it's copper or
4089 	 * multi-speed fiber
4090 	 */
4091 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4092 	    !(hw->phy.multispeed_fiber))
4093 		return (0);
4094 
4095 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4096 	if (err != IXGBE_SUCCESS)
4097 		return (0);
4098 
4099 	speed =
4100 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4101 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4102 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4103 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4104 
4105 	return speed;
4106 } /* ixgbe_get_advertise */
4107 
4108 /************************************************************************
4109  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4110  *
4111  *   Control values:
4112  *     0/1 - off / on (use default value of 1000)
4113  *
4114  *     Legal timer values are:
4115  *     50,100,250,500,1000,2000,5000,10000
4116  *
4117  *     Turning off interrupt moderation will also turn this off.
4118  ************************************************************************/
4119 static int
4120 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4121 {
4122 	struct adapter *adapter = (struct adapter *)arg1;
4123 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4124 	int            error;
4125 	u16            newval;
4126 
4127 	newval = adapter->dmac;
4128 	error = sysctl_handle_16(oidp, &newval, 0, req);
4129 	if ((error) || (req->newptr == NULL))
4130 		return (error);
4131 
4132 	switch (newval) {
4133 	case 0:
4134 		/* Disabled */
4135 		adapter->dmac = 0;
4136 		break;
4137 	case 1:
4138 		/* Enable and use default */
4139 		adapter->dmac = 1000;
4140 		break;
4141 	case 50:
4142 	case 100:
4143 	case 250:
4144 	case 500:
4145 	case 1000:
4146 	case 2000:
4147 	case 5000:
4148 	case 10000:
4149 		/* Legal values - allow */
4150 		adapter->dmac = newval;
4151 		break;
4152 	default:
4153 		/* Do nothing, illegal value */
4154 		return (EINVAL);
4155 	}
4156 
4157 	/* Re-initialize hardware if it's already running */
4158 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4159 		ifp->if_init(ifp);
4160 
4161 	return (0);
4162 } /* ixgbe_sysctl_dmac */
4163 
4164 #ifdef IXGBE_DEBUG
4165 /************************************************************************
4166  * ixgbe_sysctl_power_state
4167  *
4168  *   Sysctl to test power states
4169  *   Values:
4170  *     0      - set device to D0
4171  *     3      - set device to D3
4172  *     (none) - get current device power state
4173  ************************************************************************/
4174 static int
4175 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4176 {
4177 	struct adapter *adapter = (struct adapter *)arg1;
4178 	device_t       dev = adapter->dev;
4179 	int            curr_ps, new_ps, error = 0;
4180 
4181 	curr_ps = new_ps = pci_get_powerstate(dev);
4182 
4183 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4184 	if ((error) || (req->newptr == NULL))
4185 		return (error);
4186 
4187 	if (new_ps == curr_ps)
4188 		return (0);
4189 
4190 	if (new_ps == 3 && curr_ps == 0)
4191 		error = DEVICE_SUSPEND(dev);
4192 	else if (new_ps == 0 && curr_ps == 3)
4193 		error = DEVICE_RESUME(dev);
4194 	else
4195 		return (EINVAL);
4196 
4197 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4198 
4199 	return (error);
4200 } /* ixgbe_sysctl_power_state */
4201 #endif
4202 
4203 /************************************************************************
4204  * ixgbe_sysctl_wol_enable
4205  *
4206  *   Sysctl to enable/disable the WoL capability,
4207  *   if supported by the adapter.
4208  *
4209  *   Values:
4210  *     0 - disabled
4211  *     1 - enabled
4212  ************************************************************************/
4213 static int
4214 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4215 {
4216 	struct adapter  *adapter = (struct adapter *)arg1;
4217 	struct ixgbe_hw *hw = &adapter->hw;
4218 	int             new_wol_enabled;
4219 	int             error = 0;
4220 
4221 	new_wol_enabled = hw->wol_enabled;
4222 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4223 	if ((error) || (req->newptr == NULL))
4224 		return (error);
4225 	new_wol_enabled = !!(new_wol_enabled);
4226 	if (new_wol_enabled == hw->wol_enabled)
4227 		return (0);
4228 
4229 	if (new_wol_enabled > 0 && !adapter->wol_support)
4230 		return (ENODEV);
4231 	else
4232 		hw->wol_enabled = new_wol_enabled;
4233 
4234 	return (0);
4235 } /* ixgbe_sysctl_wol_enable */
4236 
4237 /************************************************************************
4238  * ixgbe_sysctl_wufc - Wake Up Filter Control
4239  *
4240  *   Sysctl to enable/disable the types of packets that the
4241  *   adapter will wake up on upon receipt.
4242  *   Flags:
4243  *     0x1  - Link Status Change
4244  *     0x2  - Magic Packet
4245  *     0x4  - Direct Exact
4246  *     0x8  - Directed Multicast
4247  *     0x10 - Broadcast
4248  *     0x20 - ARP/IPv4 Request Packet
4249  *     0x40 - Direct IPv4 Packet
4250  *     0x80 - Direct IPv6 Packet
4251  *
4252  *   Settings not listed above will cause the sysctl to return an error.
4253  ************************************************************************/
4254 static int
4255 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4256 {
4257 	struct adapter *adapter = (struct adapter *)arg1;
4258 	int            error = 0;
4259 	u32            new_wufc;
4260 
4261 	new_wufc = adapter->wufc;
4262 
4263 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4264 	if ((error) || (req->newptr == NULL))
4265 		return (error);
4266 	if (new_wufc == adapter->wufc)
4267 		return (0);
4268 
4269 	if (new_wufc & 0xffffff00)
4270 		return (EINVAL);
4271 
4272 	new_wufc &= 0xff;
4273 	new_wufc |= (0xffffff & adapter->wufc);
4274 	adapter->wufc = new_wufc;
4275 
4276 	return (0);
4277 } /* ixgbe_sysctl_wufc */
4278 
4279 #ifdef IXGBE_DEBUG
4280 /************************************************************************
4281  * ixgbe_sysctl_print_rss_config
4282  ************************************************************************/
4283 static int
4284 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4285 {
4286 	struct adapter  *adapter = (struct adapter *)arg1;
4287 	struct ixgbe_hw *hw = &adapter->hw;
4288 	device_t        dev = adapter->dev;
4289 	struct sbuf     *buf;
4290 	int             error = 0, reta_size;
4291 	u32             reg;
4292 
4293 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4294 	if (!buf) {
4295 		device_printf(dev, "Could not allocate sbuf for output.\n");
4296 		return (ENOMEM);
4297 	}
4298 
4299 	// TODO: use sbufs to make a string to print out
4300 	/* Set multiplier for RETA setup and table size based on MAC */
4301 	switch (adapter->hw.mac.type) {
4302 	case ixgbe_mac_X550:
4303 	case ixgbe_mac_X550EM_x:
4304 	case ixgbe_mac_X550EM_a:
4305 		reta_size = 128;
4306 		break;
4307 	default:
4308 		reta_size = 32;
4309 		break;
4310 	}
4311 
4312 	/* Print out the redirection table */
4313 	sbuf_cat(buf, "\n");
4314 	for (int i = 0; i < reta_size; i++) {
4315 		if (i < 32) {
4316 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4317 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4318 		} else {
4319 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4320 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4321 		}
4322 	}
4323 
4324 	// TODO: print more config
4325 
4326 	error = sbuf_finish(buf);
4327 	if (error)
4328 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4329 
4330 	sbuf_delete(buf);
4331 
4332 	return (0);
4333 } /* ixgbe_sysctl_print_rss_config */
4334 #endif /* IXGBE_DEBUG */
4335 
4336 /************************************************************************
4337  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4338  *
4339  *   For X552/X557-AT devices using an external PHY
4340  ************************************************************************/
4341 static int
4342 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4343 {
4344 	struct adapter  *adapter = (struct adapter *)arg1;
4345 	struct ixgbe_hw *hw = &adapter->hw;
4346 	u16             reg;
4347 
4348 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4349 		device_printf(iflib_get_dev(adapter->ctx),
4350 		    "Device has no supported external thermal sensor.\n");
4351 		return (ENODEV);
4352 	}
4353 
4354 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4355 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4356 		device_printf(iflib_get_dev(adapter->ctx),
4357 		    "Error reading from PHY's current temperature register\n");
4358 		return (EAGAIN);
4359 	}
4360 
4361 	/* Shift temp for output */
4362 	reg = reg >> 8;
4363 
4364 	return (sysctl_handle_16(oidp, NULL, reg, req));
4365 } /* ixgbe_sysctl_phy_temp */
4366 
4367 /************************************************************************
4368  * ixgbe_sysctl_phy_overtemp_occurred
4369  *
4370  *   Reports (directly from the PHY) whether the current PHY
4371  *   temperature is over the overtemp threshold.
4372  ************************************************************************/
4373 static int
4374 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4375 {
4376 	struct adapter  *adapter = (struct adapter *)arg1;
4377 	struct ixgbe_hw *hw = &adapter->hw;
4378 	u16             reg;
4379 
4380 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4381 		device_printf(iflib_get_dev(adapter->ctx),
4382 		    "Device has no supported external thermal sensor.\n");
4383 		return (ENODEV);
4384 	}
4385 
4386 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4387 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4388 		device_printf(iflib_get_dev(adapter->ctx),
4389 		    "Error reading from PHY's temperature status register\n");
4390 		return (EAGAIN);
4391 	}
4392 
4393 	/* Get occurrence bit */
4394 	reg = !!(reg & 0x4000);
4395 
4396 	return (sysctl_handle_16(oidp, 0, reg, req));
4397 } /* ixgbe_sysctl_phy_overtemp_occurred */
4398 
4399 /************************************************************************
4400  * ixgbe_sysctl_eee_state
4401  *
4402  *   Sysctl to set EEE power saving feature
4403  *   Values:
4404  *     0      - disable EEE
4405  *     1      - enable EEE
4406  *     (none) - get current device EEE state
4407  ************************************************************************/
4408 static int
4409 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4410 {
4411 	struct adapter *adapter = (struct adapter *)arg1;
4412 	device_t       dev = adapter->dev;
4413 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4414 	int            curr_eee, new_eee, error = 0;
4415 	s32            retval;
4416 
4417 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4418 
4419 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4420 	if ((error) || (req->newptr == NULL))
4421 		return (error);
4422 
4423 	/* Nothing to do */
4424 	if (new_eee == curr_eee)
4425 		return (0);
4426 
4427 	/* Not supported */
4428 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4429 		return (EINVAL);
4430 
4431 	/* Bounds checking */
4432 	if ((new_eee < 0) || (new_eee > 1))
4433 		return (EINVAL);
4434 
4435 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4436 	if (retval) {
4437 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4438 		return (EINVAL);
4439 	}
4440 
4441 	/* Restart auto-neg */
4442 	ifp->if_init(ifp);
4443 
4444 	device_printf(dev, "New EEE state: %d\n", new_eee);
4445 
4446 	/* Cache new value */
4447 	if (new_eee)
4448 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4449 	else
4450 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4451 
4452 	return (error);
4453 } /* ixgbe_sysctl_eee_state */
4454 
4455 /************************************************************************
4456  * ixgbe_init_device_features
4457  ************************************************************************/
4458 static void
4459 ixgbe_init_device_features(struct adapter *adapter)
4460 {
4461 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4462 	                  | IXGBE_FEATURE_RSS
4463 	                  | IXGBE_FEATURE_MSI
4464 	                  | IXGBE_FEATURE_MSIX
4465 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4466 
4467 	/* Set capabilities first... */
4468 	switch (adapter->hw.mac.type) {
4469 	case ixgbe_mac_82598EB:
4470 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4471 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4472 		break;
4473 	case ixgbe_mac_X540:
4474 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4475 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4476 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4477 		    (adapter->hw.bus.func == 0))
4478 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4479 		break;
4480 	case ixgbe_mac_X550:
4481 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4482 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4483 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4484 		break;
4485 	case ixgbe_mac_X550EM_x:
4486 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4487 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4488 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4489 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4490 		break;
4491 	case ixgbe_mac_X550EM_a:
4492 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4493 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4494 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4495 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4496 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4497 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4498 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4499 		}
4500 		break;
4501 	case ixgbe_mac_82599EB:
4502 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4503 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4504 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4505 		    (adapter->hw.bus.func == 0))
4506 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4507 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4508 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4509 		break;
4510 	default:
4511 		break;
4512 	}
4513 
4514 	/* Enabled by default... */
4515 	/* Fan failure detection */
4516 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4517 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4518 	/* Netmap */
4519 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4520 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4521 	/* EEE */
4522 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4523 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4524 	/* Thermal Sensor */
4525 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4526 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4527 
4528 	/* Enabled via global sysctl... */
4529 	/* Flow Director */
4530 	if (ixgbe_enable_fdir) {
4531 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4532 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4533 		else
4534 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4535 	}
4536 	/*
4537 	 * Message Signal Interrupts - Extended (MSI-X)
4538 	 * Normal MSI is only enabled if MSI-X calls fail.
4539 	 */
4540 	if (!ixgbe_enable_msix)
4541 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4542 	/* Receive-Side Scaling (RSS) */
4543 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4544 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4545 
4546 	/* Disable features with unmet dependencies... */
4547 	/* No MSI-X */
4548 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4549 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4550 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4551 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4552 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4553 	}
4554 } /* ixgbe_init_device_features */
4555 
4556 /************************************************************************
4557  * ixgbe_check_fan_failure
4558  ************************************************************************/
4559 static void
4560 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4561 {
4562 	u32 mask;
4563 
4564 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4565 	    IXGBE_ESDP_SDP1;
4566 
4567 	if (reg & mask)
4568 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4569 } /* ixgbe_check_fan_failure */
4570