xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 0bf48626aaa33768078f5872b922b1487b3a9296)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 int ixgbe_intr(void *arg);
143 
144 /************************************************************************
145  * Function prototypes
146  ************************************************************************/
147 #if __FreeBSD_version >= 1100036
148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
149 #endif
150 
151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
154 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
155 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
156 
157 static void ixgbe_config_dmac(struct adapter *adapter);
158 static void ixgbe_configure_ivars(struct adapter *adapter);
159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
160                            s8 type);
161 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static bool ixgbe_sfp_probe(if_ctx_t ctx);
163 
164 static void ixgbe_free_pci_resources(if_ctx_t ctx);
165 
166 static int  ixgbe_msix_link(void *arg);
167 static int  ixgbe_msix_que(void *arg);
168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
169 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
171 
172 static int  ixgbe_setup_interface(if_ctx_t ctx);
173 static void ixgbe_init_device_features(struct adapter *adapter);
174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
175 static void ixgbe_add_media_types(if_ctx_t ctx);
176 static void ixgbe_update_stats_counters(struct adapter *adapter);
177 static void ixgbe_config_link(if_ctx_t ctx);
178 static void ixgbe_get_slot_info(struct adapter *);
179 static void ixgbe_check_wol_support(struct adapter *adapter);
180 static void ixgbe_enable_rx_drop(struct adapter *);
181 static void ixgbe_disable_rx_drop(struct adapter *);
182 
183 static void ixgbe_add_hw_stats(struct adapter *adapter);
184 static int  ixgbe_set_flowcntl(struct adapter *, int);
185 static int  ixgbe_set_advertise(struct adapter *, int);
186 static int  ixgbe_get_advertise(struct adapter *);
187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
188 static void ixgbe_config_gpie(struct adapter *adapter);
189 static void ixgbe_config_delay_values(struct adapter *adapter);
190 
191 /* Sysctl handlers */
192 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
198 #ifdef IXGBE_DEBUG
199 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
200 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
201 #endif
202 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
209 
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_msf(void *);
212 static void ixgbe_handle_mod(void *);
213 static void ixgbe_handle_phy(void *);
214 
215 /************************************************************************
216  *  FreeBSD Device Interface Entry Points
217  ************************************************************************/
218 static device_method_t ix_methods[] = {
219 	/* Device interface */
220 	DEVMETHOD(device_register, ixgbe_register),
221 	DEVMETHOD(device_probe, iflib_device_probe),
222 	DEVMETHOD(device_attach, iflib_device_attach),
223 	DEVMETHOD(device_detach, iflib_device_detach),
224 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
225 	DEVMETHOD(device_suspend, iflib_device_suspend),
226 	DEVMETHOD(device_resume, iflib_device_resume),
227 #ifdef PCI_IOV
228 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
229 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
230 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
231 #endif /* PCI_IOV */
232 	DEVMETHOD_END
233 };
234 
235 static driver_t ix_driver = {
236 	"ix", ix_methods, sizeof(struct adapter),
237 };
238 
239 devclass_t ix_devclass;
240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
242 MODULE_DEPEND(ix, pci, 1, 1, 1);
243 MODULE_DEPEND(ix, ether, 1, 1, 1);
244 MODULE_DEPEND(ix, iflib, 1, 1, 1);
245 
246 static device_method_t ixgbe_if_methods[] = {
247 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
248 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
249 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
250 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
251 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
252 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
253 	DEVMETHOD(ifdi_init, ixgbe_if_init),
254 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
255 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
256 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
257 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
258 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
259 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
260 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
262 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
263 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
264 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
265 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
266 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
267 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
268 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
269 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
270 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
271 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
272 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
273 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
274 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
275 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
276 #ifdef PCI_IOV
277 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
278 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
279 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
280 #endif /* PCI_IOV */
281 	DEVMETHOD_END
282 };
283 
284 /*
285  * TUNEABLE PARAMETERS:
286  */
287 
288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
289 static driver_t ixgbe_if_driver = {
290   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
291 };
292 
293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
295     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
296 
297 /* Flow control setting, default to full */
298 static int ixgbe_flow_control = ixgbe_fc_full;
299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
300     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
301 
302 /* Advertise Speed, default to 0 (auto) */
303 static int ixgbe_advertise_speed = 0;
304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
305     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 
307 /*
308  * Smart speed setting, default to on
309  * this only works as a compile option
310  * right now as its during attach, set
311  * this to 'ixgbe_smart_speed_off' to
312  * disable.
313  */
314 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 
316 /*
317  * MSI-X should be the default for best performance,
318  * but this allows it to be forced off for testing.
319  */
320 static int ixgbe_enable_msix = 1;
321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
322     "Enable MSI-X interrupts");
323 
324 /*
325  * Defining this on will allow the use
326  * of unsupported SFP+ modules, note that
327  * doing so you are on your own :)
328  */
329 static int allow_unsupported_sfp = FALSE;
330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
331     &allow_unsupported_sfp, 0,
332     "Allow unsupported SFP modules...use at your own risk");
333 
334 /*
335  * Not sure if Flow Director is fully baked,
336  * so we'll default to turning it off.
337  */
338 static int ixgbe_enable_fdir = 0;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
340     "Enable Flow Director");
341 
342 /* Receive-Side Scaling */
343 static int ixgbe_enable_rss = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
345     "Enable Receive-Side Scaling (RSS)");
346 
347 #if 0
348 /* Keep running tab on them for sanity check */
349 static int ixgbe_total_ports;
350 #endif
351 
352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
353 
354 /*
355  * For Flow Director: this is the number of TX packets we sample
356  * for the filter pool, this means every 20th packet will be probed.
357  *
358  * This feature can be disabled by setting this to 0.
359  */
360 static int atr_sample_rate = 20;
361 
362 extern struct if_txrx ixgbe_txrx;
363 
364 static struct if_shared_ctx ixgbe_sctx_init = {
365 	.isc_magic = IFLIB_MAGIC,
366 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
367 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 	.isc_tx_maxsegsize = PAGE_SIZE,
369 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
370 	.isc_tso_maxsegsize = PAGE_SIZE,
371 	.isc_rx_maxsize = PAGE_SIZE*4,
372 	.isc_rx_nsegments = 1,
373 	.isc_rx_maxsegsize = PAGE_SIZE*4,
374 	.isc_nfl = 1,
375 	.isc_ntxqs = 1,
376 	.isc_nrxqs = 1,
377 
378 	.isc_admin_intrcnt = 1,
379 	.isc_vendor_info = ixgbe_vendor_info_array,
380 	.isc_driver_version = ixgbe_driver_version,
381 	.isc_driver = &ixgbe_if_driver,
382 	.isc_flags = IFLIB_TSO_INIT_IP,
383 
384 	.isc_nrxd_min = {MIN_RXD},
385 	.isc_ntxd_min = {MIN_TXD},
386 	.isc_nrxd_max = {MAX_RXD},
387 	.isc_ntxd_max = {MAX_TXD},
388 	.isc_nrxd_default = {DEFAULT_RXD},
389 	.isc_ntxd_default = {DEFAULT_TXD},
390 };
391 
392 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
393 
394 /************************************************************************
395  * ixgbe_if_tx_queues_alloc
396  ************************************************************************/
397 static int
398 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
399                          int ntxqs, int ntxqsets)
400 {
401 	struct adapter     *adapter = iflib_get_softc(ctx);
402 	if_softc_ctx_t     scctx = adapter->shared;
403 	struct ix_tx_queue *que;
404 	int                i, j, error;
405 
406 	MPASS(adapter->num_tx_queues > 0);
407 	MPASS(adapter->num_tx_queues == ntxqsets);
408 	MPASS(ntxqs == 1);
409 
410 	/* Allocate queue structure memory */
411 	adapter->tx_queues =
412 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
413 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
414 	if (!adapter->tx_queues) {
415 		device_printf(iflib_get_dev(ctx),
416 		    "Unable to allocate TX ring memory\n");
417 		return (ENOMEM);
418 	}
419 
420 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
421 		struct tx_ring *txr = &que->txr;
422 
423 		/* In case SR-IOV is enabled, align the index properly */
424 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
425 		    i);
426 
427 		txr->adapter = que->adapter = adapter;
428 
429 		/* Allocate report status array */
430 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
431 		if (txr->tx_rsq == NULL) {
432 			error = ENOMEM;
433 			goto fail;
434 		}
435 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
436 			txr->tx_rsq[j] = QIDX_INVALID;
437 		/* get the virtual and physical address of the hardware queues */
438 		txr->tail = IXGBE_TDT(txr->me);
439 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
440 		txr->tx_paddr = paddrs[i];
441 
442 		txr->bytes = 0;
443 		txr->total_packets = 0;
444 
445 		/* Set the rate at which we sample packets */
446 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
447 			txr->atr_sample = atr_sample_rate;
448 
449 	}
450 
451 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
452 	    adapter->num_tx_queues);
453 
454 	return (0);
455 
456 fail:
457 	ixgbe_if_queues_free(ctx);
458 
459 	return (error);
460 } /* ixgbe_if_tx_queues_alloc */
461 
462 /************************************************************************
463  * ixgbe_if_rx_queues_alloc
464  ************************************************************************/
465 static int
466 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
467                          int nrxqs, int nrxqsets)
468 {
469 	struct adapter     *adapter = iflib_get_softc(ctx);
470 	struct ix_rx_queue *que;
471 	int                i;
472 
473 	MPASS(adapter->num_rx_queues > 0);
474 	MPASS(adapter->num_rx_queues == nrxqsets);
475 	MPASS(nrxqs == 1);
476 
477 	/* Allocate queue structure memory */
478 	adapter->rx_queues =
479 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
480 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
481 	if (!adapter->rx_queues) {
482 		device_printf(iflib_get_dev(ctx),
483 		    "Unable to allocate TX ring memory\n");
484 		return (ENOMEM);
485 	}
486 
487 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
488 		struct rx_ring *rxr = &que->rxr;
489 
490 		/* In case SR-IOV is enabled, align the index properly */
491 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
492 		    i);
493 
494 		rxr->adapter = que->adapter = adapter;
495 
496 		/* get the virtual and physical address of the hw queues */
497 		rxr->tail = IXGBE_RDT(rxr->me);
498 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
499 		rxr->rx_paddr = paddrs[i];
500 		rxr->bytes = 0;
501 		rxr->que = que;
502 	}
503 
504 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
505 	    adapter->num_rx_queues);
506 
507 	return (0);
508 } /* ixgbe_if_rx_queues_alloc */
509 
510 /************************************************************************
511  * ixgbe_if_queues_free
512  ************************************************************************/
513 static void
514 ixgbe_if_queues_free(if_ctx_t ctx)
515 {
516 	struct adapter     *adapter = iflib_get_softc(ctx);
517 	struct ix_tx_queue *tx_que = adapter->tx_queues;
518 	struct ix_rx_queue *rx_que = adapter->rx_queues;
519 	int                i;
520 
521 	if (tx_que != NULL) {
522 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
523 			struct tx_ring *txr = &tx_que->txr;
524 			if (txr->tx_rsq == NULL)
525 				break;
526 
527 			free(txr->tx_rsq, M_IXGBE);
528 			txr->tx_rsq = NULL;
529 		}
530 
531 		free(adapter->tx_queues, M_IXGBE);
532 		adapter->tx_queues = NULL;
533 	}
534 	if (rx_que != NULL) {
535 		free(adapter->rx_queues, M_IXGBE);
536 		adapter->rx_queues = NULL;
537 	}
538 } /* ixgbe_if_queues_free */
539 
540 /************************************************************************
541  * ixgbe_initialize_rss_mapping
542  ************************************************************************/
543 static void
544 ixgbe_initialize_rss_mapping(struct adapter *adapter)
545 {
546 	struct ixgbe_hw *hw = &adapter->hw;
547 	u32             reta = 0, mrqc, rss_key[10];
548 	int             queue_id, table_size, index_mult;
549 	int             i, j;
550 	u32             rss_hash_config;
551 
552 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
553 		/* Fetch the configured RSS key */
554 		rss_getkey((uint8_t *)&rss_key);
555 	} else {
556 		/* set up random bits */
557 		arc4rand(&rss_key, sizeof(rss_key), 0);
558 	}
559 
560 	/* Set multiplier for RETA setup and table size based on MAC */
561 	index_mult = 0x1;
562 	table_size = 128;
563 	switch (adapter->hw.mac.type) {
564 	case ixgbe_mac_82598EB:
565 		index_mult = 0x11;
566 		break;
567 	case ixgbe_mac_X550:
568 	case ixgbe_mac_X550EM_x:
569 	case ixgbe_mac_X550EM_a:
570 		table_size = 512;
571 		break;
572 	default:
573 		break;
574 	}
575 
576 	/* Set up the redirection table */
577 	for (i = 0, j = 0; i < table_size; i++, j++) {
578 		if (j == adapter->num_rx_queues)
579 			j = 0;
580 
581 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
582 			/*
583 			 * Fetch the RSS bucket id for the given indirection
584 			 * entry. Cap it at the number of configured buckets
585 			 * (which is num_rx_queues.)
586 			 */
587 			queue_id = rss_get_indirection_to_bucket(i);
588 			queue_id = queue_id % adapter->num_rx_queues;
589 		} else
590 			queue_id = (j * index_mult);
591 
592 		/*
593 		 * The low 8 bits are for hash value (n+0);
594 		 * The next 8 bits are for hash value (n+1), etc.
595 		 */
596 		reta = reta >> 8;
597 		reta = reta | (((uint32_t)queue_id) << 24);
598 		if ((i & 3) == 3) {
599 			if (i < 128)
600 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
601 			else
602 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
603 				    reta);
604 			reta = 0;
605 		}
606 	}
607 
608 	/* Now fill our hash function seeds */
609 	for (i = 0; i < 10; i++)
610 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
611 
612 	/* Perform hash on these packet types */
613 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
614 		rss_hash_config = rss_gethashconfig();
615 	else {
616 		/*
617 		 * Disable UDP - IP fragments aren't currently being handled
618 		 * and so we end up with a mix of 2-tuple and 4-tuple
619 		 * traffic.
620 		 */
621 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
622 		                | RSS_HASHTYPE_RSS_TCP_IPV4
623 		                | RSS_HASHTYPE_RSS_IPV6
624 		                | RSS_HASHTYPE_RSS_TCP_IPV6
625 		                | RSS_HASHTYPE_RSS_IPV6_EX
626 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
627 	}
628 
629 	mrqc = IXGBE_MRQC_RSSEN;
630 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
631 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
632 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
633 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
634 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
635 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
636 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
637 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
638 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
639 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
640 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
641 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
642 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
643 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
644 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
645 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
646 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
647 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
648 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
649 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
650 } /* ixgbe_initialize_rss_mapping */
651 
652 /************************************************************************
653  * ixgbe_initialize_receive_units - Setup receive registers and features.
654  ************************************************************************/
655 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
656 
657 static void
658 ixgbe_initialize_receive_units(if_ctx_t ctx)
659 {
660 	struct adapter     *adapter = iflib_get_softc(ctx);
661 	if_softc_ctx_t     scctx = adapter->shared;
662 	struct ixgbe_hw    *hw = &adapter->hw;
663 	struct ifnet       *ifp = iflib_get_ifp(ctx);
664 	struct ix_rx_queue *que;
665 	int                i, j;
666 	u32                bufsz, fctrl, srrctl, rxcsum;
667 	u32                hlreg;
668 
669 	/*
670 	 * Make sure receives are disabled while
671 	 * setting up the descriptor ring
672 	 */
673 	ixgbe_disable_rx(hw);
674 
675 	/* Enable broadcasts */
676 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
677 	fctrl |= IXGBE_FCTRL_BAM;
678 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
679 		fctrl |= IXGBE_FCTRL_DPF;
680 		fctrl |= IXGBE_FCTRL_PMCF;
681 	}
682 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
683 
684 	/* Set for Jumbo Frames? */
685 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
686 	if (ifp->if_mtu > ETHERMTU)
687 		hlreg |= IXGBE_HLREG0_JUMBOEN;
688 	else
689 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
690 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
691 
692 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
693 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
694 
695 	/* Setup the Base and Length of the Rx Descriptor Ring */
696 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
697 		struct rx_ring *rxr = &que->rxr;
698 		u64            rdba = rxr->rx_paddr;
699 
700 		j = rxr->me;
701 
702 		/* Setup the Base and Length of the Rx Descriptor Ring */
703 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
704 		    (rdba & 0x00000000ffffffffULL));
705 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
706 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
707 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
708 
709 		/* Set up the SRRCTL register */
710 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
711 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
712 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
713 		srrctl |= bufsz;
714 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
715 
716 		/*
717 		 * Set DROP_EN iff we have no flow control and >1 queue.
718 		 * Note that srrctl was cleared shortly before during reset,
719 		 * so we do not need to clear the bit, but do it just in case
720 		 * this code is moved elsewhere.
721 		 */
722 		if (adapter->num_rx_queues > 1 &&
723 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
724 			srrctl |= IXGBE_SRRCTL_DROP_EN;
725 		} else {
726 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
727 		}
728 
729 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
730 
731 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
732 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
733 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
734 
735 		/* Set the driver rx tail address */
736 		rxr->tail =  IXGBE_RDT(rxr->me);
737 	}
738 
739 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
740 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
741 		            | IXGBE_PSRTYPE_UDPHDR
742 		            | IXGBE_PSRTYPE_IPV4HDR
743 		            | IXGBE_PSRTYPE_IPV6HDR;
744 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
745 	}
746 
747 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
748 
749 	ixgbe_initialize_rss_mapping(adapter);
750 
751 	if (adapter->num_rx_queues > 1) {
752 		/* RSS and RX IPP Checksum are mutually exclusive */
753 		rxcsum |= IXGBE_RXCSUM_PCSD;
754 	}
755 
756 	if (ifp->if_capenable & IFCAP_RXCSUM)
757 		rxcsum |= IXGBE_RXCSUM_PCSD;
758 
759 	/* This is useful for calculating UDP/IP fragment checksums */
760 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
761 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
762 
763 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
764 
765 } /* ixgbe_initialize_receive_units */
766 
767 /************************************************************************
768  * ixgbe_initialize_transmit_units - Enable transmit units.
769  ************************************************************************/
770 static void
771 ixgbe_initialize_transmit_units(if_ctx_t ctx)
772 {
773 	struct adapter     *adapter = iflib_get_softc(ctx);
774 	struct ixgbe_hw    *hw = &adapter->hw;
775 	if_softc_ctx_t     scctx = adapter->shared;
776 	struct ix_tx_queue *que;
777 	int i;
778 
779 	/* Setup the Base and Length of the Tx Descriptor Ring */
780 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
781 	    i++, que++) {
782 		struct tx_ring	   *txr = &que->txr;
783 		u64 tdba = txr->tx_paddr;
784 		u32 txctrl = 0;
785 		int j = txr->me;
786 
787 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
788 		    (tdba & 0x00000000ffffffffULL));
789 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
790 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
791 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
792 
793 		/* Setup the HW Tx Head and Tail descriptor pointers */
794 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
795 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
796 
797 		/* Cache the tail address */
798 		txr->tail = IXGBE_TDT(txr->me);
799 
800 		txr->tx_rs_cidx = txr->tx_rs_pidx;
801 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
802 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
803 			txr->tx_rsq[k] = QIDX_INVALID;
804 
805 		/* Disable Head Writeback */
806 		/*
807 		 * Note: for X550 series devices, these registers are actually
808 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
809 		 * fields remain the same.
810 		 */
811 		switch (hw->mac.type) {
812 		case ixgbe_mac_82598EB:
813 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
814 			break;
815 		default:
816 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
817 			break;
818 		}
819 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
820 		switch (hw->mac.type) {
821 		case ixgbe_mac_82598EB:
822 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
823 			break;
824 		default:
825 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
826 			break;
827 		}
828 
829 	}
830 
831 	if (hw->mac.type != ixgbe_mac_82598EB) {
832 		u32 dmatxctl, rttdcs;
833 
834 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
835 		dmatxctl |= IXGBE_DMATXCTL_TE;
836 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
837 		/* Disable arbiter to set MTQC */
838 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
839 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
840 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
841 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
842 		    ixgbe_get_mtqc(adapter->iov_mode));
843 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
844 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
845 	}
846 
847 } /* ixgbe_initialize_transmit_units */
848 
849 /************************************************************************
850  * ixgbe_register
851  ************************************************************************/
852 static void *
853 ixgbe_register(device_t dev)
854 {
855 	return (ixgbe_sctx);
856 } /* ixgbe_register */
857 
858 /************************************************************************
859  * ixgbe_if_attach_pre - Device initialization routine, part 1
860  *
861  *   Called when the driver is being loaded.
862  *   Identifies the type of hardware, initializes the hardware,
863  *   and initializes iflib structures.
864  *
865  *   return 0 on success, positive on failure
866  ************************************************************************/
867 static int
868 ixgbe_if_attach_pre(if_ctx_t ctx)
869 {
870 	struct adapter  *adapter;
871 	device_t        dev;
872 	if_softc_ctx_t  scctx;
873 	struct ixgbe_hw *hw;
874 	int             error = 0;
875 	u32             ctrl_ext;
876 
877 	INIT_DEBUGOUT("ixgbe_attach: begin");
878 
879 	/* Allocate, clear, and link in our adapter structure */
880 	dev = iflib_get_dev(ctx);
881 	adapter = iflib_get_softc(ctx);
882 	adapter->hw.back = adapter;
883 	adapter->ctx = ctx;
884 	adapter->dev = dev;
885 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
886 	adapter->media = iflib_get_media(ctx);
887 	hw = &adapter->hw;
888 
889 	/* Determine hardware revision */
890 	hw->vendor_id = pci_get_vendor(dev);
891 	hw->device_id = pci_get_device(dev);
892 	hw->revision_id = pci_get_revid(dev);
893 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
894 	hw->subsystem_device_id = pci_get_subdevice(dev);
895 
896 	/* Do base PCI setup - map BAR0 */
897 	if (ixgbe_allocate_pci_resources(ctx)) {
898 		device_printf(dev, "Allocation of PCI resources failed\n");
899 		return (ENXIO);
900 	}
901 
902 	/* let hardware know driver is loaded */
903 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
904 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
905 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
906 
907 	/*
908 	 * Initialize the shared code
909 	 */
910 	if (ixgbe_init_shared_code(hw) != 0) {
911 		device_printf(dev, "Unable to initialize the shared code\n");
912 		error = ENXIO;
913 		goto err_pci;
914 	}
915 
916 	if (hw->mbx.ops.init_params)
917 		hw->mbx.ops.init_params(hw);
918 
919 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
920 
921 	if (hw->mac.type != ixgbe_mac_82598EB)
922 		hw->phy.smart_speed = ixgbe_smart_speed;
923 
924 	ixgbe_init_device_features(adapter);
925 
926 	/* Enable WoL (if supported) */
927 	ixgbe_check_wol_support(adapter);
928 
929 	/* Verify adapter fan is still functional (if applicable) */
930 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
931 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
932 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
933 	}
934 
935 	/* Ensure SW/FW semaphore is free */
936 	ixgbe_init_swfw_semaphore(hw);
937 
938 	/* Set an initial default flow control value */
939 	hw->fc.requested_mode = ixgbe_flow_control;
940 
941 	hw->phy.reset_if_overtemp = TRUE;
942 	error = ixgbe_reset_hw(hw);
943 	hw->phy.reset_if_overtemp = FALSE;
944 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
945 		/*
946 		 * No optics in this port, set up
947 		 * so the timer routine will probe
948 		 * for later insertion.
949 		 */
950 		adapter->sfp_probe = TRUE;
951 		error = 0;
952 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
953 		device_printf(dev, "Unsupported SFP+ module detected!\n");
954 		error = EIO;
955 		goto err_pci;
956 	} else if (error) {
957 		device_printf(dev, "Hardware initialization failed\n");
958 		error = EIO;
959 		goto err_pci;
960 	}
961 
962 	/* Make sure we have a good EEPROM before we read from it */
963 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
964 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
965 		error = EIO;
966 		goto err_pci;
967 	}
968 
969 	error = ixgbe_start_hw(hw);
970 	switch (error) {
971 	case IXGBE_ERR_EEPROM_VERSION:
972 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
973 		break;
974 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
975 		device_printf(dev, "Unsupported SFP+ Module\n");
976 		error = EIO;
977 		goto err_pci;
978 	case IXGBE_ERR_SFP_NOT_PRESENT:
979 		device_printf(dev, "No SFP+ Module found\n");
980 		/* falls thru */
981 	default:
982 		break;
983 	}
984 
985 	/* Most of the iflib initialization... */
986 
987 	iflib_set_mac(ctx, hw->mac.addr);
988 	switch (adapter->hw.mac.type) {
989 	case ixgbe_mac_X550:
990 	case ixgbe_mac_X550EM_x:
991 	case ixgbe_mac_X550EM_a:
992 		scctx->isc_rss_table_size = 512;
993 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
994 		break;
995 	default:
996 		scctx->isc_rss_table_size = 128;
997 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
998 	}
999 
1000 	/* Allow legacy interrupts */
1001 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1002 
1003 	scctx->isc_txqsizes[0] =
1004 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1005 	    sizeof(u32), DBA_ALIGN),
1006 	scctx->isc_rxqsizes[0] =
1007 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1008 	    DBA_ALIGN);
1009 
1010 	/* XXX */
1011 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1012 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1013 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1014 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1015 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1016 	} else {
1017 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1018 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1019 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1020 	}
1021 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1022 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1023 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1024 
1025 	scctx->isc_txrx = &ixgbe_txrx;
1026 
1027 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1028 
1029 	return (0);
1030 
1031 err_pci:
1032 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1033 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1034 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1035 	ixgbe_free_pci_resources(ctx);
1036 
1037 	return (error);
1038 } /* ixgbe_if_attach_pre */
1039 
1040  /*********************************************************************
1041  * ixgbe_if_attach_post - Device initialization routine, part 2
1042  *
1043  *   Called during driver load, but after interrupts and
1044  *   resources have been allocated and configured.
1045  *   Sets up some data structures not relevant to iflib.
1046  *
1047  *   return 0 on success, positive on failure
1048  *********************************************************************/
1049 static int
1050 ixgbe_if_attach_post(if_ctx_t ctx)
1051 {
1052 	device_t dev;
1053 	struct adapter  *adapter;
1054 	struct ixgbe_hw *hw;
1055 	int             error = 0;
1056 
1057 	dev = iflib_get_dev(ctx);
1058 	adapter = iflib_get_softc(ctx);
1059 	hw = &adapter->hw;
1060 
1061 
1062 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1063 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1064 		device_printf(dev, "Device does not support legacy interrupts");
1065 		error = ENXIO;
1066 		goto err;
1067 	}
1068 
1069 	/* Allocate multicast array memory. */
1070 	adapter->mta = malloc(sizeof(*adapter->mta) *
1071 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1072 	if (adapter->mta == NULL) {
1073 		device_printf(dev, "Can not allocate multicast setup array\n");
1074 		error = ENOMEM;
1075 		goto err;
1076 	}
1077 
1078 	/* hw.ix defaults init */
1079 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1080 
1081 	/* Enable the optics for 82599 SFP+ fiber */
1082 	ixgbe_enable_tx_laser(hw);
1083 
1084 	/* Enable power to the phy. */
1085 	ixgbe_set_phy_power(hw, TRUE);
1086 
1087 	ixgbe_initialize_iov(adapter);
1088 
1089 	error = ixgbe_setup_interface(ctx);
1090 	if (error) {
1091 		device_printf(dev, "Interface setup failed: %d\n", error);
1092 		goto err;
1093 	}
1094 
1095 	ixgbe_if_update_admin_status(ctx);
1096 
1097 	/* Initialize statistics */
1098 	ixgbe_update_stats_counters(adapter);
1099 	ixgbe_add_hw_stats(adapter);
1100 
1101 	/* Check PCIE slot type/speed/width */
1102 	ixgbe_get_slot_info(adapter);
1103 
1104 	/*
1105 	 * Do time init and sysctl init here, but
1106 	 * only on the first port of a bypass adapter.
1107 	 */
1108 	ixgbe_bypass_init(adapter);
1109 
1110 	/* Set an initial dmac value */
1111 	adapter->dmac = 0;
1112 	/* Set initial advertised speeds (if applicable) */
1113 	adapter->advertise = ixgbe_get_advertise(adapter);
1114 
1115 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1116 		ixgbe_define_iov_schemas(dev, &error);
1117 
1118 	/* Add sysctls */
1119 	ixgbe_add_device_sysctls(ctx);
1120 
1121 	return (0);
1122 err:
1123 	return (error);
1124 } /* ixgbe_if_attach_post */
1125 
1126 /************************************************************************
1127  * ixgbe_check_wol_support
1128  *
1129  *   Checks whether the adapter's ports are capable of
1130  *   Wake On LAN by reading the adapter's NVM.
1131  *
1132  *   Sets each port's hw->wol_enabled value depending
1133  *   on the value read here.
1134  ************************************************************************/
1135 static void
1136 ixgbe_check_wol_support(struct adapter *adapter)
1137 {
1138 	struct ixgbe_hw *hw = &adapter->hw;
1139 	u16             dev_caps = 0;
1140 
1141 	/* Find out WoL support for port */
1142 	adapter->wol_support = hw->wol_enabled = 0;
1143 	ixgbe_get_device_caps(hw, &dev_caps);
1144 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1145 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1146 	     hw->bus.func == 0))
1147 		adapter->wol_support = hw->wol_enabled = 1;
1148 
1149 	/* Save initial wake up filter configuration */
1150 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1151 
1152 	return;
1153 } /* ixgbe_check_wol_support */
1154 
1155 /************************************************************************
1156  * ixgbe_setup_interface
1157  *
1158  *   Setup networking device structure and register an interface.
1159  ************************************************************************/
1160 static int
1161 ixgbe_setup_interface(if_ctx_t ctx)
1162 {
1163 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1164 	struct adapter *adapter = iflib_get_softc(ctx);
1165 
1166 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1167 
1168 	if_setbaudrate(ifp, IF_Gbps(10));
1169 
1170 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1171 
1172 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1173 
1174 	ixgbe_add_media_types(ctx);
1175 
1176 	/* Autoselect media by default */
1177 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1178 
1179 	return (0);
1180 } /* ixgbe_setup_interface */
1181 
1182 /************************************************************************
1183  * ixgbe_if_get_counter
1184  ************************************************************************/
1185 static uint64_t
1186 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1187 {
1188 	struct adapter *adapter = iflib_get_softc(ctx);
1189 	if_t           ifp = iflib_get_ifp(ctx);
1190 
1191 	switch (cnt) {
1192 	case IFCOUNTER_IPACKETS:
1193 		return (adapter->ipackets);
1194 	case IFCOUNTER_OPACKETS:
1195 		return (adapter->opackets);
1196 	case IFCOUNTER_IBYTES:
1197 		return (adapter->ibytes);
1198 	case IFCOUNTER_OBYTES:
1199 		return (adapter->obytes);
1200 	case IFCOUNTER_IMCASTS:
1201 		return (adapter->imcasts);
1202 	case IFCOUNTER_OMCASTS:
1203 		return (adapter->omcasts);
1204 	case IFCOUNTER_COLLISIONS:
1205 		return (0);
1206 	case IFCOUNTER_IQDROPS:
1207 		return (adapter->iqdrops);
1208 	case IFCOUNTER_OQDROPS:
1209 		return (0);
1210 	case IFCOUNTER_IERRORS:
1211 		return (adapter->ierrors);
1212 	default:
1213 		return (if_get_counter_default(ifp, cnt));
1214 	}
1215 } /* ixgbe_if_get_counter */
1216 
1217 /************************************************************************
1218  * ixgbe_if_i2c_req
1219  ************************************************************************/
1220 static int
1221 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1222 {
1223 	struct adapter		*adapter = iflib_get_softc(ctx);
1224 	struct ixgbe_hw 	*hw = &adapter->hw;
1225 	int 			i;
1226 
1227 
1228 	if (hw->phy.ops.read_i2c_byte == NULL)
1229 		return (ENXIO);
1230 	for (i = 0; i < req->len; i++)
1231 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1232 		    req->dev_addr, &req->data[i]);
1233 	return (0);
1234 } /* ixgbe_if_i2c_req */
1235 
1236 /************************************************************************
1237  * ixgbe_add_media_types
1238  ************************************************************************/
1239 static void
1240 ixgbe_add_media_types(if_ctx_t ctx)
1241 {
1242 	struct adapter  *adapter = iflib_get_softc(ctx);
1243 	struct ixgbe_hw *hw = &adapter->hw;
1244 	device_t        dev = iflib_get_dev(ctx);
1245 	u64             layer;
1246 
1247 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1248 
1249 	/* Media types with matching FreeBSD media defines */
1250 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1251 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1252 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1253 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1254 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1255 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1256 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1257 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1258 
1259 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1260 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1261 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1262 		    NULL);
1263 
1264 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1265 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1266 		if (hw->phy.multispeed_fiber)
1267 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1268 			    NULL);
1269 	}
1270 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1271 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1272 		if (hw->phy.multispeed_fiber)
1273 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1274 			    NULL);
1275 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1276 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1277 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1278 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1279 
1280 #ifdef IFM_ETH_XTYPE
1281 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1282 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1283 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1284 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1285 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1286 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1287 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1288 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1289 #else
1290 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1291 		device_printf(dev, "Media supported: 10GbaseKR\n");
1292 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1293 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1294 	}
1295 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1296 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1297 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1298 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1299 	}
1300 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1301 		device_printf(dev, "Media supported: 1000baseKX\n");
1302 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1303 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1304 	}
1305 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1306 		device_printf(dev, "Media supported: 2500baseKX\n");
1307 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1308 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1309 	}
1310 #endif
1311 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1312 		device_printf(dev, "Media supported: 1000baseBX\n");
1313 
1314 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1315 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1316 		    0, NULL);
1317 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1318 	}
1319 
1320 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1321 } /* ixgbe_add_media_types */
1322 
1323 /************************************************************************
1324  * ixgbe_is_sfp
1325  ************************************************************************/
1326 static inline bool
1327 ixgbe_is_sfp(struct ixgbe_hw *hw)
1328 {
1329 	switch (hw->mac.type) {
1330 	case ixgbe_mac_82598EB:
1331 		if (hw->phy.type == ixgbe_phy_nl)
1332 			return (TRUE);
1333 		return (FALSE);
1334 	case ixgbe_mac_82599EB:
1335 		switch (hw->mac.ops.get_media_type(hw)) {
1336 		case ixgbe_media_type_fiber:
1337 		case ixgbe_media_type_fiber_qsfp:
1338 			return (TRUE);
1339 		default:
1340 			return (FALSE);
1341 		}
1342 	case ixgbe_mac_X550EM_x:
1343 	case ixgbe_mac_X550EM_a:
1344 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1345 			return (TRUE);
1346 		return (FALSE);
1347 	default:
1348 		return (FALSE);
1349 	}
1350 } /* ixgbe_is_sfp */
1351 
1352 /************************************************************************
1353  * ixgbe_config_link
1354  ************************************************************************/
1355 static void
1356 ixgbe_config_link(if_ctx_t ctx)
1357 {
1358 	struct adapter  *adapter = iflib_get_softc(ctx);
1359 	struct ixgbe_hw *hw = &adapter->hw;
1360 	u32             autoneg, err = 0;
1361 	bool            sfp, negotiate;
1362 
1363 	sfp = ixgbe_is_sfp(hw);
1364 
1365 	if (sfp) {
1366 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1367 		iflib_admin_intr_deferred(ctx);
1368 	} else {
1369 		if (hw->mac.ops.check_link)
1370 			err = ixgbe_check_link(hw, &adapter->link_speed,
1371 			    &adapter->link_up, FALSE);
1372 		if (err)
1373 			return;
1374 		autoneg = hw->phy.autoneg_advertised;
1375 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1376 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1377 			    &negotiate);
1378 		if (err)
1379 			return;
1380 		if (hw->mac.ops.setup_link)
1381 			err = hw->mac.ops.setup_link(hw, autoneg,
1382 			    adapter->link_up);
1383 	}
1384 } /* ixgbe_config_link */
1385 
1386 /************************************************************************
1387  * ixgbe_update_stats_counters - Update board statistics counters.
1388  ************************************************************************/
1389 static void
1390 ixgbe_update_stats_counters(struct adapter *adapter)
1391 {
1392 	struct ixgbe_hw       *hw = &adapter->hw;
1393 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1394 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1395 	u64                   total_missed_rx = 0;
1396 
1397 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1398 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1399 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1400 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1401 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1402 
1403 	for (int i = 0; i < 16; i++) {
1404 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1405 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1406 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1407 	}
1408 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1409 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1410 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1411 
1412 	/* Hardware workaround, gprc counts missed packets */
1413 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1414 	stats->gprc -= missed_rx;
1415 
1416 	if (hw->mac.type != ixgbe_mac_82598EB) {
1417 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1418 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1419 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1420 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1421 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1422 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1423 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1424 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1425 	} else {
1426 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1427 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1428 		/* 82598 only has a counter in the high register */
1429 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1430 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1431 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1432 	}
1433 
1434 	/*
1435 	 * Workaround: mprc hardware is incorrectly counting
1436 	 * broadcasts, so for now we subtract those.
1437 	 */
1438 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1439 	stats->bprc += bprc;
1440 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1441 	if (hw->mac.type == ixgbe_mac_82598EB)
1442 		stats->mprc -= bprc;
1443 
1444 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1445 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1446 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1447 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1448 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1449 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1450 
1451 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1452 	stats->lxontxc += lxon;
1453 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1454 	stats->lxofftxc += lxoff;
1455 	total = lxon + lxoff;
1456 
1457 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1458 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1459 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1460 	stats->gptc -= total;
1461 	stats->mptc -= total;
1462 	stats->ptc64 -= total;
1463 	stats->gotc -= total * ETHER_MIN_LEN;
1464 
1465 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1466 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1467 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1468 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1469 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1470 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1471 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1472 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1473 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1474 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1475 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1476 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1477 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1478 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1479 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1480 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1481 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1482 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1483 	/* Only read FCOE on 82599 */
1484 	if (hw->mac.type != ixgbe_mac_82598EB) {
1485 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1486 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1487 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1488 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1489 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1490 	}
1491 
1492 	/* Fill out the OS statistics structure */
1493 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1494 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1495 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1496 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1497 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1498 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1499 	IXGBE_SET_COLLISIONS(adapter, 0);
1500 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1501 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1502 } /* ixgbe_update_stats_counters */
1503 
1504 /************************************************************************
1505  * ixgbe_add_hw_stats
1506  *
1507  *   Add sysctl variables, one per statistic, to the system.
1508  ************************************************************************/
1509 static void
1510 ixgbe_add_hw_stats(struct adapter *adapter)
1511 {
1512 	device_t               dev = iflib_get_dev(adapter->ctx);
1513 	struct ix_rx_queue     *rx_que;
1514 	struct ix_tx_queue     *tx_que;
1515 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1516 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1517 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1518 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1519 	struct sysctl_oid      *stat_node, *queue_node;
1520 	struct sysctl_oid_list *stat_list, *queue_list;
1521 	int                    i;
1522 
1523 #define QUEUE_NAME_LEN 32
1524 	char                   namebuf[QUEUE_NAME_LEN];
1525 
1526 	/* Driver Statistics */
1527 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1528 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1529 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1530 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1531 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1532 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1533 
1534 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1535 		struct tx_ring *txr = &tx_que->txr;
1536 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1537 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1538 		    CTLFLAG_RD, NULL, "Queue Name");
1539 		queue_list = SYSCTL_CHILDREN(queue_node);
1540 
1541 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1542 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1543 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1544 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1545 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1546 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1547 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1548 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1549 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1550 		    CTLFLAG_RD, &txr->total_packets,
1551 		    "Queue Packets Transmitted");
1552 	}
1553 
1554 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1555 		struct rx_ring *rxr = &rx_que->rxr;
1556 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1557 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1558 		    CTLFLAG_RD, NULL, "Queue Name");
1559 		queue_list = SYSCTL_CHILDREN(queue_node);
1560 
1561 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1562 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1563 		    sizeof(&adapter->rx_queues[i]),
1564 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1565 		    "Interrupt Rate");
1566 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1567 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1568 		    "irqs on this queue");
1569 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1570 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1571 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1572 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1573 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1574 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1575 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1576 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1577 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1578 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1579 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1580 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1581 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1582 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1583 	}
1584 
1585 	/* MAC stats get their own sub node */
1586 
1587 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1588 	    CTLFLAG_RD, NULL, "MAC Statistics");
1589 	stat_list = SYSCTL_CHILDREN(stat_node);
1590 
1591 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1592 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1593 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1594 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1595 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1596 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1597 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1598 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1599 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1600 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1601 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1602 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1603 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1604 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1605 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1606 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1607 
1608 	/* Flow Control stats */
1609 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1610 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1611 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1612 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1613 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1614 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1615 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1616 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1617 
1618 	/* Packet Reception Stats */
1619 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1620 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1621 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1622 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1623 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1624 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1625 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1626 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1627 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1628 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1629 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1630 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1631 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1632 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1633 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1634 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1635 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1636 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1637 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1638 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1639 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1640 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1641 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1642 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1643 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1644 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1645 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1646 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1648 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1650 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1652 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1654 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1656 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1657 
1658 	/* Packet Transmission Stats */
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1660 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1662 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1664 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1665 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1666 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1667 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1668 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1670 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1672 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1674 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1676 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1678 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1680 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1681 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1682 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1683 } /* ixgbe_add_hw_stats */
1684 
1685 /************************************************************************
1686  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1687  *
1688  *   Retrieves the TDH value from the hardware
1689  ************************************************************************/
1690 static int
1691 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1692 {
1693 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1694 	int            error;
1695 	unsigned int   val;
1696 
1697 	if (!txr)
1698 		return (0);
1699 
1700 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1701 	error = sysctl_handle_int(oidp, &val, 0, req);
1702 	if (error || !req->newptr)
1703 		return error;
1704 
1705 	return (0);
1706 } /* ixgbe_sysctl_tdh_handler */
1707 
1708 /************************************************************************
1709  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1710  *
1711  *   Retrieves the TDT value from the hardware
1712  ************************************************************************/
1713 static int
1714 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1715 {
1716 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1717 	int            error;
1718 	unsigned int   val;
1719 
1720 	if (!txr)
1721 		return (0);
1722 
1723 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1724 	error = sysctl_handle_int(oidp, &val, 0, req);
1725 	if (error || !req->newptr)
1726 		return error;
1727 
1728 	return (0);
1729 } /* ixgbe_sysctl_tdt_handler */
1730 
1731 /************************************************************************
1732  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1733  *
1734  *   Retrieves the RDH value from the hardware
1735  ************************************************************************/
1736 static int
1737 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1738 {
1739 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1740 	int            error;
1741 	unsigned int   val;
1742 
1743 	if (!rxr)
1744 		return (0);
1745 
1746 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1747 	error = sysctl_handle_int(oidp, &val, 0, req);
1748 	if (error || !req->newptr)
1749 		return error;
1750 
1751 	return (0);
1752 } /* ixgbe_sysctl_rdh_handler */
1753 
1754 /************************************************************************
1755  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1756  *
1757  *   Retrieves the RDT value from the hardware
1758  ************************************************************************/
1759 static int
1760 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1761 {
1762 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1763 	int            error;
1764 	unsigned int   val;
1765 
1766 	if (!rxr)
1767 		return (0);
1768 
1769 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1770 	error = sysctl_handle_int(oidp, &val, 0, req);
1771 	if (error || !req->newptr)
1772 		return error;
1773 
1774 	return (0);
1775 } /* ixgbe_sysctl_rdt_handler */
1776 
1777 /************************************************************************
1778  * ixgbe_if_vlan_register
1779  *
1780  *   Run via vlan config EVENT, it enables us to use the
1781  *   HW Filter table since we can get the vlan id. This
1782  *   just creates the entry in the soft version of the
1783  *   VFTA, init will repopulate the real table.
1784  ************************************************************************/
1785 static void
1786 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1787 {
1788 	struct adapter *adapter = iflib_get_softc(ctx);
1789 	u16            index, bit;
1790 
1791 	index = (vtag >> 5) & 0x7F;
1792 	bit = vtag & 0x1F;
1793 	adapter->shadow_vfta[index] |= (1 << bit);
1794 	++adapter->num_vlans;
1795 	ixgbe_setup_vlan_hw_support(ctx);
1796 } /* ixgbe_if_vlan_register */
1797 
1798 /************************************************************************
1799  * ixgbe_if_vlan_unregister
1800  *
1801  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1802  ************************************************************************/
1803 static void
1804 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1805 {
1806 	struct adapter *adapter = iflib_get_softc(ctx);
1807 	u16            index, bit;
1808 
1809 	index = (vtag >> 5) & 0x7F;
1810 	bit = vtag & 0x1F;
1811 	adapter->shadow_vfta[index] &= ~(1 << bit);
1812 	--adapter->num_vlans;
1813 	/* Re-init to load the changes */
1814 	ixgbe_setup_vlan_hw_support(ctx);
1815 } /* ixgbe_if_vlan_unregister */
1816 
1817 /************************************************************************
1818  * ixgbe_setup_vlan_hw_support
1819  ************************************************************************/
1820 static void
1821 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1822 {
1823 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1824 	struct adapter  *adapter = iflib_get_softc(ctx);
1825 	struct ixgbe_hw *hw = &adapter->hw;
1826 	struct rx_ring  *rxr;
1827 	int             i;
1828 	u32             ctrl;
1829 
1830 
1831 	/*
1832 	 * We get here thru init_locked, meaning
1833 	 * a soft reset, this has already cleared
1834 	 * the VFTA and other state, so if there
1835 	 * have been no vlan's registered do nothing.
1836 	 */
1837 	if (adapter->num_vlans == 0)
1838 		return;
1839 
1840 	/* Setup the queues for vlans */
1841 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1842 		for (i = 0; i < adapter->num_rx_queues; i++) {
1843 			rxr = &adapter->rx_queues[i].rxr;
1844 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1845 			if (hw->mac.type != ixgbe_mac_82598EB) {
1846 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1847 				ctrl |= IXGBE_RXDCTL_VME;
1848 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1849 			}
1850 			rxr->vtag_strip = TRUE;
1851 		}
1852 	}
1853 
1854 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1855 		return;
1856 	/*
1857 	 * A soft reset zero's out the VFTA, so
1858 	 * we need to repopulate it now.
1859 	 */
1860 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1861 		if (adapter->shadow_vfta[i] != 0)
1862 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1863 			    adapter->shadow_vfta[i]);
1864 
1865 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1866 	/* Enable the Filter Table if enabled */
1867 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1868 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1869 		ctrl |= IXGBE_VLNCTRL_VFE;
1870 	}
1871 	if (hw->mac.type == ixgbe_mac_82598EB)
1872 		ctrl |= IXGBE_VLNCTRL_VME;
1873 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1874 } /* ixgbe_setup_vlan_hw_support */
1875 
1876 /************************************************************************
1877  * ixgbe_get_slot_info
1878  *
1879  *   Get the width and transaction speed of
1880  *   the slot this adapter is plugged into.
1881  ************************************************************************/
1882 static void
1883 ixgbe_get_slot_info(struct adapter *adapter)
1884 {
1885 	device_t        dev = iflib_get_dev(adapter->ctx);
1886 	struct ixgbe_hw *hw = &adapter->hw;
1887 	int             bus_info_valid = TRUE;
1888 	u32             offset;
1889 	u16             link;
1890 
1891 	/* Some devices are behind an internal bridge */
1892 	switch (hw->device_id) {
1893 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1894 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1895 		goto get_parent_info;
1896 	default:
1897 		break;
1898 	}
1899 
1900 	ixgbe_get_bus_info(hw);
1901 
1902 	/*
1903 	 * Some devices don't use PCI-E, but there is no need
1904 	 * to display "Unknown" for bus speed and width.
1905 	 */
1906 	switch (hw->mac.type) {
1907 	case ixgbe_mac_X550EM_x:
1908 	case ixgbe_mac_X550EM_a:
1909 		return;
1910 	default:
1911 		goto display;
1912 	}
1913 
1914 get_parent_info:
1915 	/*
1916 	 * For the Quad port adapter we need to parse back
1917 	 * up the PCI tree to find the speed of the expansion
1918 	 * slot into which this adapter is plugged. A bit more work.
1919 	 */
1920 	dev = device_get_parent(device_get_parent(dev));
1921 #ifdef IXGBE_DEBUG
1922 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1923 	    pci_get_slot(dev), pci_get_function(dev));
1924 #endif
1925 	dev = device_get_parent(device_get_parent(dev));
1926 #ifdef IXGBE_DEBUG
1927 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1928 	    pci_get_slot(dev), pci_get_function(dev));
1929 #endif
1930 	/* Now get the PCI Express Capabilities offset */
1931 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1932 		/*
1933 		 * Hmm...can't get PCI-Express capabilities.
1934 		 * Falling back to default method.
1935 		 */
1936 		bus_info_valid = FALSE;
1937 		ixgbe_get_bus_info(hw);
1938 		goto display;
1939 	}
1940 	/* ...and read the Link Status Register */
1941 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1942 	ixgbe_set_pci_config_data_generic(hw, link);
1943 
1944 display:
1945 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1946 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1947 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1948 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1949 	     "Unknown"),
1950 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1951 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1952 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1953 	     "Unknown"));
1954 
1955 	if (bus_info_valid) {
1956 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1957 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1958 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1959 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1960 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1961 		}
1962 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1963 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1964 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1965 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1966 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1967 		}
1968 	} else
1969 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1970 
1971 	return;
1972 } /* ixgbe_get_slot_info */
1973 
1974 /************************************************************************
1975  * ixgbe_if_msix_intr_assign
1976  *
1977  *   Setup MSI-X Interrupt resources and handlers
1978  ************************************************************************/
1979 static int
1980 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1981 {
1982 	struct adapter     *adapter = iflib_get_softc(ctx);
1983 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1984 	struct ix_tx_queue *tx_que;
1985 	int                error, rid, vector = 0;
1986 	int                cpu_id = 0;
1987 	char               buf[16];
1988 
1989 	/* Admin Que is vector 0*/
1990 	rid = vector + 1;
1991 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1992 		rid = vector + 1;
1993 
1994 		snprintf(buf, sizeof(buf), "rxq%d", i);
1995 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1996 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
1997 
1998 		if (error) {
1999 			device_printf(iflib_get_dev(ctx),
2000 			    "Failed to allocate que int %d err: %d", i, error);
2001 			adapter->num_rx_queues = i + 1;
2002 			goto fail;
2003 		}
2004 
2005 		rx_que->msix = vector;
2006 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2007 			/*
2008 			 * The queue ID is used as the RSS layer bucket ID.
2009 			 * We look up the queue ID -> RSS CPU ID and select
2010 			 * that.
2011 			 */
2012 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2013 		} else {
2014 			/*
2015 			 * Bind the MSI-X vector, and thus the
2016 			 * rings to the corresponding cpu.
2017 			 *
2018 			 * This just happens to match the default RSS
2019 			 * round-robin bucket -> queue -> CPU allocation.
2020 			 */
2021 			if (adapter->num_rx_queues > 1)
2022 				cpu_id = i;
2023 		}
2024 
2025 	}
2026 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2027 		snprintf(buf, sizeof(buf), "txq%d", i);
2028 		tx_que = &adapter->tx_queues[i];
2029 		tx_que->msix = i % adapter->num_rx_queues;
2030 		iflib_softirq_alloc_generic(ctx,
2031 		    &adapter->rx_queues[tx_que->msix].que_irq,
2032 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2033 	}
2034 	rid = vector + 1;
2035 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2036 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2037 	if (error) {
2038 		device_printf(iflib_get_dev(ctx),
2039 		    "Failed to register admin handler");
2040 		return (error);
2041 	}
2042 
2043 	adapter->vector = vector;
2044 
2045 	return (0);
2046 fail:
2047 	iflib_irq_free(ctx, &adapter->irq);
2048 	rx_que = adapter->rx_queues;
2049 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2050 		iflib_irq_free(ctx, &rx_que->que_irq);
2051 
2052 	return (error);
2053 } /* ixgbe_if_msix_intr_assign */
2054 
2055 /*********************************************************************
2056  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2057  **********************************************************************/
2058 static int
2059 ixgbe_msix_que(void *arg)
2060 {
2061 	struct ix_rx_queue *que = arg;
2062 	struct adapter     *adapter = que->adapter;
2063 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2064 
2065 	/* Protect against spurious interrupts */
2066 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2067 		return (FILTER_HANDLED);
2068 
2069 	ixgbe_disable_queue(adapter, que->msix);
2070 	++que->irqs;
2071 
2072 	return (FILTER_SCHEDULE_THREAD);
2073 } /* ixgbe_msix_que */
2074 
2075 /************************************************************************
2076  * ixgbe_media_status - Media Ioctl callback
2077  *
2078  *   Called whenever the user queries the status of
2079  *   the interface using ifconfig.
2080  ************************************************************************/
2081 static void
2082 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2083 {
2084 	struct adapter  *adapter = iflib_get_softc(ctx);
2085 	struct ixgbe_hw *hw = &adapter->hw;
2086 	int             layer;
2087 
2088 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2089 
2090 	ifmr->ifm_status = IFM_AVALID;
2091 	ifmr->ifm_active = IFM_ETHER;
2092 
2093 	if (!adapter->link_active)
2094 		return;
2095 
2096 	ifmr->ifm_status |= IFM_ACTIVE;
2097 	layer = adapter->phy_layer;
2098 
2099 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2100 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2101 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2102 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2103 		switch (adapter->link_speed) {
2104 		case IXGBE_LINK_SPEED_10GB_FULL:
2105 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2106 			break;
2107 		case IXGBE_LINK_SPEED_1GB_FULL:
2108 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2109 			break;
2110 		case IXGBE_LINK_SPEED_100_FULL:
2111 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2112 			break;
2113 		case IXGBE_LINK_SPEED_10_FULL:
2114 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2115 			break;
2116 		}
2117 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2118 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2119 		switch (adapter->link_speed) {
2120 		case IXGBE_LINK_SPEED_10GB_FULL:
2121 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2122 			break;
2123 		}
2124 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2125 		switch (adapter->link_speed) {
2126 		case IXGBE_LINK_SPEED_10GB_FULL:
2127 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2128 			break;
2129 		case IXGBE_LINK_SPEED_1GB_FULL:
2130 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2131 			break;
2132 		}
2133 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2134 		switch (adapter->link_speed) {
2135 		case IXGBE_LINK_SPEED_10GB_FULL:
2136 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2137 			break;
2138 		case IXGBE_LINK_SPEED_1GB_FULL:
2139 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2140 			break;
2141 		}
2142 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2143 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2144 		switch (adapter->link_speed) {
2145 		case IXGBE_LINK_SPEED_10GB_FULL:
2146 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2147 			break;
2148 		case IXGBE_LINK_SPEED_1GB_FULL:
2149 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2150 			break;
2151 		}
2152 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2153 		switch (adapter->link_speed) {
2154 		case IXGBE_LINK_SPEED_10GB_FULL:
2155 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2156 			break;
2157 		}
2158 	/*
2159 	 * XXX: These need to use the proper media types once
2160 	 * they're added.
2161 	 */
2162 #ifndef IFM_ETH_XTYPE
2163 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2164 		switch (adapter->link_speed) {
2165 		case IXGBE_LINK_SPEED_10GB_FULL:
2166 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2167 			break;
2168 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2169 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2170 			break;
2171 		case IXGBE_LINK_SPEED_1GB_FULL:
2172 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2173 			break;
2174 		}
2175 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2176 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2177 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2178 		switch (adapter->link_speed) {
2179 		case IXGBE_LINK_SPEED_10GB_FULL:
2180 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2181 			break;
2182 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2183 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2184 			break;
2185 		case IXGBE_LINK_SPEED_1GB_FULL:
2186 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2187 			break;
2188 		}
2189 #else
2190 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2191 		switch (adapter->link_speed) {
2192 		case IXGBE_LINK_SPEED_10GB_FULL:
2193 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2194 			break;
2195 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2196 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2197 			break;
2198 		case IXGBE_LINK_SPEED_1GB_FULL:
2199 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2200 			break;
2201 		}
2202 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2203 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2204 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2205 		switch (adapter->link_speed) {
2206 		case IXGBE_LINK_SPEED_10GB_FULL:
2207 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2208 			break;
2209 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2210 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2211 			break;
2212 		case IXGBE_LINK_SPEED_1GB_FULL:
2213 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2214 			break;
2215 		}
2216 #endif
2217 
2218 	/* If nothing is recognized... */
2219 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2220 		ifmr->ifm_active |= IFM_UNKNOWN;
2221 
2222 	/* Display current flow control setting used on link */
2223 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2224 	    hw->fc.current_mode == ixgbe_fc_full)
2225 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2226 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2227 	    hw->fc.current_mode == ixgbe_fc_full)
2228 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2229 } /* ixgbe_media_status */
2230 
2231 /************************************************************************
2232  * ixgbe_media_change - Media Ioctl callback
2233  *
2234  *   Called when the user changes speed/duplex using
2235  *   media/mediopt option with ifconfig.
2236  ************************************************************************/
2237 static int
2238 ixgbe_if_media_change(if_ctx_t ctx)
2239 {
2240 	struct adapter   *adapter = iflib_get_softc(ctx);
2241 	struct ifmedia   *ifm = iflib_get_media(ctx);
2242 	struct ixgbe_hw  *hw = &adapter->hw;
2243 	ixgbe_link_speed speed = 0;
2244 
2245 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2246 
2247 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2248 		return (EINVAL);
2249 
2250 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2251 		return (EPERM);
2252 
2253 	/*
2254 	 * We don't actually need to check against the supported
2255 	 * media types of the adapter; ifmedia will take care of
2256 	 * that for us.
2257 	 */
2258 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2259 	case IFM_AUTO:
2260 	case IFM_10G_T:
2261 		speed |= IXGBE_LINK_SPEED_100_FULL;
2262 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2263 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2264 		break;
2265 	case IFM_10G_LRM:
2266 	case IFM_10G_LR:
2267 #ifndef IFM_ETH_XTYPE
2268 	case IFM_10G_SR: /* KR, too */
2269 	case IFM_10G_CX4: /* KX4 */
2270 #else
2271 	case IFM_10G_KR:
2272 	case IFM_10G_KX4:
2273 #endif
2274 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2275 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2276 		break;
2277 #ifndef IFM_ETH_XTYPE
2278 	case IFM_1000_CX: /* KX */
2279 #else
2280 	case IFM_1000_KX:
2281 #endif
2282 	case IFM_1000_LX:
2283 	case IFM_1000_SX:
2284 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2285 		break;
2286 	case IFM_1000_T:
2287 		speed |= IXGBE_LINK_SPEED_100_FULL;
2288 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2289 		break;
2290 	case IFM_10G_TWINAX:
2291 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2292 		break;
2293 	case IFM_100_TX:
2294 		speed |= IXGBE_LINK_SPEED_100_FULL;
2295 		break;
2296 	case IFM_10_T:
2297 		speed |= IXGBE_LINK_SPEED_10_FULL;
2298 		break;
2299 	default:
2300 		goto invalid;
2301 	}
2302 
2303 	hw->mac.autotry_restart = TRUE;
2304 	hw->mac.ops.setup_link(hw, speed, TRUE);
2305 	adapter->advertise =
2306 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2307 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2308 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2309 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2310 
2311 	return (0);
2312 
2313 invalid:
2314 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2315 
2316 	return (EINVAL);
2317 } /* ixgbe_if_media_change */
2318 
2319 /************************************************************************
2320  * ixgbe_set_promisc
2321  ************************************************************************/
2322 static int
2323 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2324 {
2325 	struct adapter *adapter = iflib_get_softc(ctx);
2326 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2327 	u32            rctl;
2328 	int            mcnt = 0;
2329 
2330 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2331 	rctl &= (~IXGBE_FCTRL_UPE);
2332 	if (ifp->if_flags & IFF_ALLMULTI)
2333 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2334 	else {
2335 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2336 	}
2337 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2338 		rctl &= (~IXGBE_FCTRL_MPE);
2339 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2340 
2341 	if (ifp->if_flags & IFF_PROMISC) {
2342 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2343 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2344 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2345 		rctl |= IXGBE_FCTRL_MPE;
2346 		rctl &= ~IXGBE_FCTRL_UPE;
2347 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2348 	}
2349 	return (0);
2350 } /* ixgbe_if_promisc_set */
2351 
2352 /************************************************************************
2353  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2354  ************************************************************************/
2355 static int
2356 ixgbe_msix_link(void *arg)
2357 {
2358 	struct adapter  *adapter = arg;
2359 	struct ixgbe_hw *hw = &adapter->hw;
2360 	u32             eicr, eicr_mask;
2361 	s32             retval;
2362 
2363 	++adapter->link_irq;
2364 
2365 	/* Pause other interrupts */
2366 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2367 
2368 	/* First get the cause */
2369 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2370 	/* Be sure the queue bits are not cleared */
2371 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2372 	/* Clear interrupt with write */
2373 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2374 
2375 	/* Link status change */
2376 	if (eicr & IXGBE_EICR_LSC) {
2377 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2378 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2379 	}
2380 
2381 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2382 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2383 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2384 			/* This is probably overkill :) */
2385 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2386 				return (FILTER_HANDLED);
2387 			/* Disable the interrupt */
2388 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2389 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2390 		} else
2391 			if (eicr & IXGBE_EICR_ECC) {
2392 				device_printf(iflib_get_dev(adapter->ctx),
2393 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2394 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2395 			}
2396 
2397 		/* Check for over temp condition */
2398 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2399 			switch (adapter->hw.mac.type) {
2400 			case ixgbe_mac_X550EM_a:
2401 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2402 					break;
2403 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2404 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2405 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2406 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2407 				retval = hw->phy.ops.check_overtemp(hw);
2408 				if (retval != IXGBE_ERR_OVERTEMP)
2409 					break;
2410 				device_printf(iflib_get_dev(adapter->ctx),
2411 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2412 				device_printf(iflib_get_dev(adapter->ctx),
2413 				    "System shutdown required!\n");
2414 				break;
2415 			default:
2416 				if (!(eicr & IXGBE_EICR_TS))
2417 					break;
2418 				retval = hw->phy.ops.check_overtemp(hw);
2419 				if (retval != IXGBE_ERR_OVERTEMP)
2420 					break;
2421 				device_printf(iflib_get_dev(adapter->ctx),
2422 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2423 				device_printf(iflib_get_dev(adapter->ctx),
2424 				    "System shutdown required!\n");
2425 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2426 				break;
2427 			}
2428 		}
2429 
2430 		/* Check for VF message */
2431 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2432 		    (eicr & IXGBE_EICR_MAILBOX))
2433 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2434 	}
2435 
2436 	if (ixgbe_is_sfp(hw)) {
2437 		/* Pluggable optics-related interrupt */
2438 		if (hw->mac.type >= ixgbe_mac_X540)
2439 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2440 		else
2441 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2442 
2443 		if (eicr & eicr_mask) {
2444 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2445 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2446 		}
2447 
2448 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2449 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2450 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2451 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2452 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2453 		}
2454 	}
2455 
2456 	/* Check for fan failure */
2457 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2458 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2459 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2460 	}
2461 
2462 	/* External PHY interrupt */
2463 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2464 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2465 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2466 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2467 	}
2468 
2469 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2470 } /* ixgbe_msix_link */
2471 
2472 /************************************************************************
2473  * ixgbe_sysctl_interrupt_rate_handler
2474  ************************************************************************/
2475 static int
2476 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2477 {
2478 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2479 	int                error;
2480 	unsigned int       reg, usec, rate;
2481 
2482 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2483 	usec = ((reg & 0x0FF8) >> 3);
2484 	if (usec > 0)
2485 		rate = 500000 / usec;
2486 	else
2487 		rate = 0;
2488 	error = sysctl_handle_int(oidp, &rate, 0, req);
2489 	if (error || !req->newptr)
2490 		return error;
2491 	reg &= ~0xfff; /* default, no limitation */
2492 	ixgbe_max_interrupt_rate = 0;
2493 	if (rate > 0 && rate < 500000) {
2494 		if (rate < 1000)
2495 			rate = 1000;
2496 		ixgbe_max_interrupt_rate = rate;
2497 		reg |= ((4000000/rate) & 0xff8);
2498 	}
2499 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2500 
2501 	return (0);
2502 } /* ixgbe_sysctl_interrupt_rate_handler */
2503 
2504 /************************************************************************
2505  * ixgbe_add_device_sysctls
2506  ************************************************************************/
2507 static void
2508 ixgbe_add_device_sysctls(if_ctx_t ctx)
2509 {
2510 	struct adapter         *adapter = iflib_get_softc(ctx);
2511 	device_t               dev = iflib_get_dev(ctx);
2512 	struct ixgbe_hw        *hw = &adapter->hw;
2513 	struct sysctl_oid_list *child;
2514 	struct sysctl_ctx_list *ctx_list;
2515 
2516 	ctx_list = device_get_sysctl_ctx(dev);
2517 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2518 
2519 	/* Sysctls for all devices */
2520 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2521 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2522 	    IXGBE_SYSCTL_DESC_SET_FC);
2523 
2524 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2525 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2526 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2527 
2528 #ifdef IXGBE_DEBUG
2529 	/* testing sysctls (for all devices) */
2530 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2531 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2532 	    "I", "PCI Power State");
2533 
2534 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2535 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2536 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2537 #endif
2538 	/* for X550 series devices */
2539 	if (hw->mac.type >= ixgbe_mac_X550)
2540 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2541 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2542 		    "I", "DMA Coalesce");
2543 
2544 	/* for WoL-capable devices */
2545 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2546 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2547 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2548 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2549 
2550 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2551 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2552 		    "I", "Enable/Disable Wake Up Filters");
2553 	}
2554 
2555 	/* for X552/X557-AT devices */
2556 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2557 		struct sysctl_oid *phy_node;
2558 		struct sysctl_oid_list *phy_list;
2559 
2560 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2561 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2562 		phy_list = SYSCTL_CHILDREN(phy_node);
2563 
2564 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2565 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2566 		    "I", "Current External PHY Temperature (Celsius)");
2567 
2568 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2569 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2570 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2571 		    "External PHY High Temperature Event Occurred");
2572 	}
2573 
2574 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2575 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2576 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2577 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2578 	}
2579 } /* ixgbe_add_device_sysctls */
2580 
2581 /************************************************************************
2582  * ixgbe_allocate_pci_resources
2583  ************************************************************************/
2584 static int
2585 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2586 {
2587 	struct adapter *adapter = iflib_get_softc(ctx);
2588 	device_t        dev = iflib_get_dev(ctx);
2589 	int             rid;
2590 
2591 	rid = PCIR_BAR(0);
2592 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2593 	    RF_ACTIVE);
2594 
2595 	if (!(adapter->pci_mem)) {
2596 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2597 		return (ENXIO);
2598 	}
2599 
2600 	/* Save bus_space values for READ/WRITE_REG macros */
2601 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2602 	adapter->osdep.mem_bus_space_handle =
2603 	    rman_get_bushandle(adapter->pci_mem);
2604 	/* Set hw values for shared code */
2605 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2606 
2607 	return (0);
2608 } /* ixgbe_allocate_pci_resources */
2609 
2610 /************************************************************************
2611  * ixgbe_detach - Device removal routine
2612  *
2613  *   Called when the driver is being removed.
2614  *   Stops the adapter and deallocates all the resources
2615  *   that were allocated for driver operation.
2616  *
2617  *   return 0 on success, positive on failure
2618  ************************************************************************/
2619 static int
2620 ixgbe_if_detach(if_ctx_t ctx)
2621 {
2622 	struct adapter *adapter = iflib_get_softc(ctx);
2623 	device_t       dev = iflib_get_dev(ctx);
2624 	u32            ctrl_ext;
2625 
2626 	INIT_DEBUGOUT("ixgbe_detach: begin");
2627 
2628 	if (ixgbe_pci_iov_detach(dev) != 0) {
2629 		device_printf(dev, "SR-IOV in use; detach first.\n");
2630 		return (EBUSY);
2631 	}
2632 
2633 	ixgbe_setup_low_power_mode(ctx);
2634 
2635 	/* let hardware know driver is unloading */
2636 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2637 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2638 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2639 
2640 	ixgbe_free_pci_resources(ctx);
2641 	free(adapter->mta, M_IXGBE);
2642 
2643 	return (0);
2644 } /* ixgbe_if_detach */
2645 
2646 /************************************************************************
2647  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2648  *
2649  *   Prepare the adapter/port for LPLU and/or WoL
2650  ************************************************************************/
2651 static int
2652 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2653 {
2654 	struct adapter  *adapter = iflib_get_softc(ctx);
2655 	struct ixgbe_hw *hw = &adapter->hw;
2656 	device_t        dev = iflib_get_dev(ctx);
2657 	s32             error = 0;
2658 
2659 	if (!hw->wol_enabled)
2660 		ixgbe_set_phy_power(hw, FALSE);
2661 
2662 	/* Limit power management flow to X550EM baseT */
2663 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2664 	    hw->phy.ops.enter_lplu) {
2665 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2666 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2667 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2668 
2669 		/*
2670 		 * Clear Wake Up Status register to prevent any previous wakeup
2671 		 * events from waking us up immediately after we suspend.
2672 		 */
2673 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2674 
2675 		/*
2676 		 * Program the Wakeup Filter Control register with user filter
2677 		 * settings
2678 		 */
2679 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2680 
2681 		/* Enable wakeups and power management in Wakeup Control */
2682 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2683 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2684 
2685 		/* X550EM baseT adapters need a special LPLU flow */
2686 		hw->phy.reset_disable = TRUE;
2687 		ixgbe_if_stop(ctx);
2688 		error = hw->phy.ops.enter_lplu(hw);
2689 		if (error)
2690 			device_printf(dev, "Error entering LPLU: %d\n", error);
2691 		hw->phy.reset_disable = FALSE;
2692 	} else {
2693 		/* Just stop for other adapters */
2694 		ixgbe_if_stop(ctx);
2695 	}
2696 
2697 	return error;
2698 } /* ixgbe_setup_low_power_mode */
2699 
2700 /************************************************************************
2701  * ixgbe_shutdown - Shutdown entry point
2702  ************************************************************************/
2703 static int
2704 ixgbe_if_shutdown(if_ctx_t ctx)
2705 {
2706 	int error = 0;
2707 
2708 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2709 
2710 	error = ixgbe_setup_low_power_mode(ctx);
2711 
2712 	return (error);
2713 } /* ixgbe_if_shutdown */
2714 
2715 /************************************************************************
2716  * ixgbe_suspend
2717  *
2718  *   From D0 to D3
2719  ************************************************************************/
2720 static int
2721 ixgbe_if_suspend(if_ctx_t ctx)
2722 {
2723 	int error = 0;
2724 
2725 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2726 
2727 	error = ixgbe_setup_low_power_mode(ctx);
2728 
2729 	return (error);
2730 } /* ixgbe_if_suspend */
2731 
2732 /************************************************************************
2733  * ixgbe_resume
2734  *
2735  *   From D3 to D0
2736  ************************************************************************/
2737 static int
2738 ixgbe_if_resume(if_ctx_t ctx)
2739 {
2740 	struct adapter  *adapter = iflib_get_softc(ctx);
2741 	device_t        dev = iflib_get_dev(ctx);
2742 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2743 	struct ixgbe_hw *hw = &adapter->hw;
2744 	u32             wus;
2745 
2746 	INIT_DEBUGOUT("ixgbe_resume: begin");
2747 
2748 	/* Read & clear WUS register */
2749 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2750 	if (wus)
2751 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2752 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2753 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2754 	/* And clear WUFC until next low-power transition */
2755 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2756 
2757 	/*
2758 	 * Required after D3->D0 transition;
2759 	 * will re-advertise all previous advertised speeds
2760 	 */
2761 	if (ifp->if_flags & IFF_UP)
2762 		ixgbe_if_init(ctx);
2763 
2764 	return (0);
2765 } /* ixgbe_if_resume */
2766 
2767 /************************************************************************
2768  * ixgbe_if_mtu_set - Ioctl mtu entry point
2769  *
2770  *   Return 0 on success, EINVAL on failure
2771  ************************************************************************/
2772 static int
2773 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2774 {
2775 	struct adapter *adapter = iflib_get_softc(ctx);
2776 	int error = 0;
2777 
2778 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2779 
2780 	if (mtu > IXGBE_MAX_MTU) {
2781 		error = EINVAL;
2782 	} else {
2783 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2784 	}
2785 
2786 	return error;
2787 } /* ixgbe_if_mtu_set */
2788 
2789 /************************************************************************
2790  * ixgbe_if_crcstrip_set
2791  ************************************************************************/
2792 static void
2793 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2794 {
2795 	struct adapter *sc = iflib_get_softc(ctx);
2796 	struct ixgbe_hw *hw = &sc->hw;
2797 	/* crc stripping is set in two places:
2798 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2799 	 * IXGBE_RDRXCTL (set by the original driver in
2800 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2801 	 *	We disable the setting when netmap is compiled in).
2802 	 * We update the values here, but also in ixgbe.c because
2803 	 * init_locked sometimes is called outside our control.
2804 	 */
2805 	uint32_t hl, rxc;
2806 
2807 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2808 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2809 #ifdef NETMAP
2810 	if (netmap_verbose)
2811 		D("%s read  HLREG 0x%x rxc 0x%x",
2812 			onoff ? "enter" : "exit", hl, rxc);
2813 #endif
2814 	/* hw requirements ... */
2815 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2816 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2817 	if (onoff && !crcstrip) {
2818 		/* keep the crc. Fast rx */
2819 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2820 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2821 	} else {
2822 		/* reset default mode */
2823 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2824 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2825 	}
2826 #ifdef NETMAP
2827 	if (netmap_verbose)
2828 		D("%s write HLREG 0x%x rxc 0x%x",
2829 			onoff ? "enter" : "exit", hl, rxc);
2830 #endif
2831 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2832 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2833 } /* ixgbe_if_crcstrip_set */
2834 
2835 /*********************************************************************
2836  * ixgbe_if_init - Init entry point
2837  *
2838  *   Used in two ways: It is used by the stack as an init
2839  *   entry point in network interface structure. It is also
2840  *   used by the driver as a hw/sw initialization routine to
2841  *   get to a consistent state.
2842  *
2843  *   Return 0 on success, positive on failure
2844  **********************************************************************/
2845 void
2846 ixgbe_if_init(if_ctx_t ctx)
2847 {
2848 	struct adapter     *adapter = iflib_get_softc(ctx);
2849 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2850 	device_t           dev = iflib_get_dev(ctx);
2851 	struct ixgbe_hw *hw = &adapter->hw;
2852 	struct ix_rx_queue *rx_que;
2853 	struct ix_tx_queue *tx_que;
2854 	u32             txdctl, mhadd;
2855 	u32             rxdctl, rxctrl;
2856 	u32             ctrl_ext;
2857 
2858 	int             i, j, err;
2859 
2860 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2861 
2862 	/* Queue indices may change with IOV mode */
2863 	ixgbe_align_all_queue_indices(adapter);
2864 
2865 	/* reprogram the RAR[0] in case user changed it. */
2866 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2867 
2868 	/* Get the latest mac address, User can use a LAA */
2869 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2870 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2871 	hw->addr_ctrl.rar_used_count = 1;
2872 
2873 	ixgbe_init_hw(hw);
2874 
2875 	ixgbe_initialize_iov(adapter);
2876 
2877 	ixgbe_initialize_transmit_units(ctx);
2878 
2879 	/* Setup Multicast table */
2880 	ixgbe_if_multi_set(ctx);
2881 
2882 	/* Determine the correct mbuf pool, based on frame size */
2883 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2884 
2885 	/* Configure RX settings */
2886 	ixgbe_initialize_receive_units(ctx);
2887 
2888 	/*
2889 	 * Initialize variable holding task enqueue requests
2890 	 * from MSI-X interrupts
2891 	 */
2892 	adapter->task_requests = 0;
2893 
2894 	/* Enable SDP & MSI-X interrupts based on adapter */
2895 	ixgbe_config_gpie(adapter);
2896 
2897 	/* Set MTU size */
2898 	if (ifp->if_mtu > ETHERMTU) {
2899 		/* aka IXGBE_MAXFRS on 82599 and newer */
2900 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2901 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2902 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2903 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2904 	}
2905 
2906 	/* Now enable all the queues */
2907 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2908 		struct tx_ring *txr = &tx_que->txr;
2909 
2910 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2911 		txdctl |= IXGBE_TXDCTL_ENABLE;
2912 		/* Set WTHRESH to 8, burst writeback */
2913 		txdctl |= (8 << 16);
2914 		/*
2915 		 * When the internal queue falls below PTHRESH (32),
2916 		 * start prefetching as long as there are at least
2917 		 * HTHRESH (1) buffers ready. The values are taken
2918 		 * from the Intel linux driver 3.8.21.
2919 		 * Prefetching enables tx line rate even with 1 queue.
2920 		 */
2921 		txdctl |= (32 << 0) | (1 << 8);
2922 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2923 	}
2924 
2925 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2926 		struct rx_ring *rxr = &rx_que->rxr;
2927 
2928 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2929 		if (hw->mac.type == ixgbe_mac_82598EB) {
2930 			/*
2931 			 * PTHRESH = 21
2932 			 * HTHRESH = 4
2933 			 * WTHRESH = 8
2934 			 */
2935 			rxdctl &= ~0x3FFFFF;
2936 			rxdctl |= 0x080420;
2937 		}
2938 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2939 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2940 		for (j = 0; j < 10; j++) {
2941 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2942 			    IXGBE_RXDCTL_ENABLE)
2943 				break;
2944 			else
2945 				msec_delay(1);
2946 		}
2947 		wmb();
2948 	}
2949 
2950 	/* Enable Receive engine */
2951 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2952 	if (hw->mac.type == ixgbe_mac_82598EB)
2953 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2954 	rxctrl |= IXGBE_RXCTRL_RXEN;
2955 	ixgbe_enable_rx_dma(hw, rxctrl);
2956 
2957 	/* Set up MSI/MSI-X routing */
2958 	if (ixgbe_enable_msix)  {
2959 		ixgbe_configure_ivars(adapter);
2960 		/* Set up auto-mask */
2961 		if (hw->mac.type == ixgbe_mac_82598EB)
2962 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2963 		else {
2964 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2965 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2966 		}
2967 	} else {  /* Simple settings for Legacy/MSI */
2968 		ixgbe_set_ivar(adapter, 0, 0, 0);
2969 		ixgbe_set_ivar(adapter, 0, 0, 1);
2970 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2971 	}
2972 
2973 	ixgbe_init_fdir(adapter);
2974 
2975 	/*
2976 	 * Check on any SFP devices that
2977 	 * need to be kick-started
2978 	 */
2979 	if (hw->phy.type == ixgbe_phy_none) {
2980 		err = hw->phy.ops.identify(hw);
2981 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2982 			device_printf(dev,
2983 			    "Unsupported SFP+ module type was detected.\n");
2984 			return;
2985 		}
2986 	}
2987 
2988 	/* Set moderation on the Link interrupt */
2989 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
2990 
2991 	/* Enable power to the phy. */
2992 	ixgbe_set_phy_power(hw, TRUE);
2993 
2994 	/* Config/Enable Link */
2995 	ixgbe_config_link(ctx);
2996 
2997 	/* Hardware Packet Buffer & Flow Control setup */
2998 	ixgbe_config_delay_values(adapter);
2999 
3000 	/* Initialize the FC settings */
3001 	ixgbe_start_hw(hw);
3002 
3003 	/* Set up VLAN support and filter */
3004 	ixgbe_setup_vlan_hw_support(ctx);
3005 
3006 	/* Setup DMA Coalescing */
3007 	ixgbe_config_dmac(adapter);
3008 
3009 	/* And now turn on interrupts */
3010 	ixgbe_if_enable_intr(ctx);
3011 
3012 	/* Enable the use of the MBX by the VF's */
3013 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3014 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3015 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3016 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3017 	}
3018 
3019 } /* ixgbe_init_locked */
3020 
3021 /************************************************************************
3022  * ixgbe_set_ivar
3023  *
3024  *   Setup the correct IVAR register for a particular MSI-X interrupt
3025  *     (yes this is all very magic and confusing :)
3026  *    - entry is the register array entry
3027  *    - vector is the MSI-X vector for this queue
3028  *    - type is RX/TX/MISC
3029  ************************************************************************/
3030 static void
3031 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3032 {
3033 	struct ixgbe_hw *hw = &adapter->hw;
3034 	u32 ivar, index;
3035 
3036 	vector |= IXGBE_IVAR_ALLOC_VAL;
3037 
3038 	switch (hw->mac.type) {
3039 	case ixgbe_mac_82598EB:
3040 		if (type == -1)
3041 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3042 		else
3043 			entry += (type * 64);
3044 		index = (entry >> 2) & 0x1F;
3045 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3046 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3047 		ivar |= (vector << (8 * (entry & 0x3)));
3048 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3049 		break;
3050 	case ixgbe_mac_82599EB:
3051 	case ixgbe_mac_X540:
3052 	case ixgbe_mac_X550:
3053 	case ixgbe_mac_X550EM_x:
3054 	case ixgbe_mac_X550EM_a:
3055 		if (type == -1) { /* MISC IVAR */
3056 			index = (entry & 1) * 8;
3057 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3058 			ivar &= ~(0xFF << index);
3059 			ivar |= (vector << index);
3060 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3061 		} else {          /* RX/TX IVARS */
3062 			index = (16 * (entry & 1)) + (8 * type);
3063 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3064 			ivar &= ~(0xFF << index);
3065 			ivar |= (vector << index);
3066 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3067 		}
3068 	default:
3069 		break;
3070 	}
3071 } /* ixgbe_set_ivar */
3072 
3073 /************************************************************************
3074  * ixgbe_configure_ivars
3075  ************************************************************************/
3076 static void
3077 ixgbe_configure_ivars(struct adapter *adapter)
3078 {
3079 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3080 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3081 	u32                newitr;
3082 
3083 	if (ixgbe_max_interrupt_rate > 0)
3084 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3085 	else {
3086 		/*
3087 		 * Disable DMA coalescing if interrupt moderation is
3088 		 * disabled.
3089 		 */
3090 		adapter->dmac = 0;
3091 		newitr = 0;
3092 	}
3093 
3094 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3095 		struct rx_ring *rxr = &rx_que->rxr;
3096 
3097 		/* First the RX queue entry */
3098 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3099 
3100 		/* Set an Initial EITR value */
3101 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3102 	}
3103 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3104 		struct tx_ring *txr = &tx_que->txr;
3105 
3106 		/* ... and the TX */
3107 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3108 	}
3109 	/* For the Link interrupt */
3110 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3111 } /* ixgbe_configure_ivars */
3112 
3113 /************************************************************************
3114  * ixgbe_config_gpie
3115  ************************************************************************/
3116 static void
3117 ixgbe_config_gpie(struct adapter *adapter)
3118 {
3119 	struct ixgbe_hw *hw = &adapter->hw;
3120 	u32             gpie;
3121 
3122 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3123 
3124 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3125 		/* Enable Enhanced MSI-X mode */
3126 		gpie |= IXGBE_GPIE_MSIX_MODE
3127 		     |  IXGBE_GPIE_EIAME
3128 		     |  IXGBE_GPIE_PBA_SUPPORT
3129 		     |  IXGBE_GPIE_OCD;
3130 	}
3131 
3132 	/* Fan Failure Interrupt */
3133 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3134 		gpie |= IXGBE_SDP1_GPIEN;
3135 
3136 	/* Thermal Sensor Interrupt */
3137 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3138 		gpie |= IXGBE_SDP0_GPIEN_X540;
3139 
3140 	/* Link detection */
3141 	switch (hw->mac.type) {
3142 	case ixgbe_mac_82599EB:
3143 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3144 		break;
3145 	case ixgbe_mac_X550EM_x:
3146 	case ixgbe_mac_X550EM_a:
3147 		gpie |= IXGBE_SDP0_GPIEN_X540;
3148 		break;
3149 	default:
3150 		break;
3151 	}
3152 
3153 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3154 
3155 } /* ixgbe_config_gpie */
3156 
3157 /************************************************************************
3158  * ixgbe_config_delay_values
3159  *
3160  *   Requires adapter->max_frame_size to be set.
3161  ************************************************************************/
3162 static void
3163 ixgbe_config_delay_values(struct adapter *adapter)
3164 {
3165 	struct ixgbe_hw *hw = &adapter->hw;
3166 	u32             rxpb, frame, size, tmp;
3167 
3168 	frame = adapter->max_frame_size;
3169 
3170 	/* Calculate High Water */
3171 	switch (hw->mac.type) {
3172 	case ixgbe_mac_X540:
3173 	case ixgbe_mac_X550:
3174 	case ixgbe_mac_X550EM_x:
3175 	case ixgbe_mac_X550EM_a:
3176 		tmp = IXGBE_DV_X540(frame, frame);
3177 		break;
3178 	default:
3179 		tmp = IXGBE_DV(frame, frame);
3180 		break;
3181 	}
3182 	size = IXGBE_BT2KB(tmp);
3183 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3184 	hw->fc.high_water[0] = rxpb - size;
3185 
3186 	/* Now calculate Low Water */
3187 	switch (hw->mac.type) {
3188 	case ixgbe_mac_X540:
3189 	case ixgbe_mac_X550:
3190 	case ixgbe_mac_X550EM_x:
3191 	case ixgbe_mac_X550EM_a:
3192 		tmp = IXGBE_LOW_DV_X540(frame);
3193 		break;
3194 	default:
3195 		tmp = IXGBE_LOW_DV(frame);
3196 		break;
3197 	}
3198 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3199 
3200 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3201 	hw->fc.send_xon = TRUE;
3202 } /* ixgbe_config_delay_values */
3203 
3204 /************************************************************************
3205  * ixgbe_set_multi - Multicast Update
3206  *
3207  *   Called whenever multicast address list is updated.
3208  ************************************************************************/
3209 static int
3210 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3211 {
3212 	struct adapter *adapter = arg;
3213 	struct ixgbe_mc_addr *mta = adapter->mta;
3214 
3215 	if (ifma->ifma_addr->sa_family != AF_LINK)
3216 		return (0);
3217 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3218 		return (0);
3219 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3220 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3221 	mta[count].vmdq = adapter->pool;
3222 
3223 	return (1);
3224 } /* ixgbe_mc_filter_apply */
3225 
3226 static void
3227 ixgbe_if_multi_set(if_ctx_t ctx)
3228 {
3229 	struct adapter       *adapter = iflib_get_softc(ctx);
3230 	struct ixgbe_mc_addr *mta;
3231 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3232 	u8                   *update_ptr;
3233 	int                  mcnt = 0;
3234 	u32                  fctrl;
3235 
3236 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3237 
3238 	mta = adapter->mta;
3239 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3240 
3241 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3242 
3243 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3244 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3245 	if (ifp->if_flags & IFF_PROMISC)
3246 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3247 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3248 	    ifp->if_flags & IFF_ALLMULTI) {
3249 		fctrl |= IXGBE_FCTRL_MPE;
3250 		fctrl &= ~IXGBE_FCTRL_UPE;
3251 	} else
3252 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3253 
3254 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3255 
3256 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3257 		update_ptr = (u8 *)mta;
3258 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3259 		    ixgbe_mc_array_itr, TRUE);
3260 	}
3261 
3262 } /* ixgbe_if_multi_set */
3263 
3264 /************************************************************************
3265  * ixgbe_mc_array_itr
3266  *
3267  *   An iterator function needed by the multicast shared code.
3268  *   It feeds the shared code routine the addresses in the
3269  *   array of ixgbe_set_multi() one by one.
3270  ************************************************************************/
3271 static u8 *
3272 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3273 {
3274 	struct ixgbe_mc_addr *mta;
3275 
3276 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3277 	*vmdq = mta->vmdq;
3278 
3279 	*update_ptr = (u8*)(mta + 1);
3280 
3281 	return (mta->addr);
3282 } /* ixgbe_mc_array_itr */
3283 
3284 /************************************************************************
3285  * ixgbe_local_timer - Timer routine
3286  *
3287  *   Checks for link status, updates statistics,
3288  *   and runs the watchdog check.
3289  ************************************************************************/
3290 static void
3291 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3292 {
3293 	struct adapter *adapter = iflib_get_softc(ctx);
3294 
3295 	if (qid != 0)
3296 		return;
3297 
3298 	/* Check for pluggable optics */
3299 	if (adapter->sfp_probe)
3300 		if (!ixgbe_sfp_probe(ctx))
3301 			return; /* Nothing to do */
3302 
3303 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3304 	    &adapter->link_up, 0);
3305 
3306 	/* Fire off the adminq task */
3307 	iflib_admin_intr_deferred(ctx);
3308 
3309 } /* ixgbe_if_timer */
3310 
3311 /************************************************************************
3312  * ixgbe_sfp_probe
3313  *
3314  *   Determine if a port had optics inserted.
3315  ************************************************************************/
3316 static bool
3317 ixgbe_sfp_probe(if_ctx_t ctx)
3318 {
3319 	struct adapter  *adapter = iflib_get_softc(ctx);
3320 	struct ixgbe_hw *hw = &adapter->hw;
3321 	device_t        dev = iflib_get_dev(ctx);
3322 	bool            result = FALSE;
3323 
3324 	if ((hw->phy.type == ixgbe_phy_nl) &&
3325 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3326 		s32 ret = hw->phy.ops.identify_sfp(hw);
3327 		if (ret)
3328 			goto out;
3329 		ret = hw->phy.ops.reset(hw);
3330 		adapter->sfp_probe = FALSE;
3331 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3332 			device_printf(dev, "Unsupported SFP+ module detected!");
3333 			device_printf(dev,
3334 			    "Reload driver with supported module.\n");
3335 			goto out;
3336 		} else
3337 			device_printf(dev, "SFP+ module detected!\n");
3338 		/* We now have supported optics */
3339 		result = TRUE;
3340 	}
3341 out:
3342 
3343 	return (result);
3344 } /* ixgbe_sfp_probe */
3345 
3346 /************************************************************************
3347  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3348  ************************************************************************/
3349 static void
3350 ixgbe_handle_mod(void *context)
3351 {
3352 	if_ctx_t        ctx = context;
3353 	struct adapter  *adapter = iflib_get_softc(ctx);
3354 	struct ixgbe_hw *hw = &adapter->hw;
3355 	device_t        dev = iflib_get_dev(ctx);
3356 	u32             err, cage_full = 0;
3357 
3358 	if (adapter->hw.need_crosstalk_fix) {
3359 		switch (hw->mac.type) {
3360 		case ixgbe_mac_82599EB:
3361 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3362 			    IXGBE_ESDP_SDP2;
3363 			break;
3364 		case ixgbe_mac_X550EM_x:
3365 		case ixgbe_mac_X550EM_a:
3366 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3367 			    IXGBE_ESDP_SDP0;
3368 			break;
3369 		default:
3370 			break;
3371 		}
3372 
3373 		if (!cage_full)
3374 			goto handle_mod_out;
3375 	}
3376 
3377 	err = hw->phy.ops.identify_sfp(hw);
3378 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3379 		device_printf(dev,
3380 		    "Unsupported SFP+ module type was detected.\n");
3381 		goto handle_mod_out;
3382 	}
3383 
3384 	if (hw->mac.type == ixgbe_mac_82598EB)
3385 		err = hw->phy.ops.reset(hw);
3386 	else
3387 		err = hw->mac.ops.setup_sfp(hw);
3388 
3389 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3390 		device_printf(dev,
3391 		    "Setup failure - unsupported SFP+ module type.\n");
3392 		goto handle_mod_out;
3393 	}
3394 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3395 	return;
3396 
3397 handle_mod_out:
3398 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3399 } /* ixgbe_handle_mod */
3400 
3401 
3402 /************************************************************************
3403  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3404  ************************************************************************/
3405 static void
3406 ixgbe_handle_msf(void *context)
3407 {
3408 	if_ctx_t        ctx = context;
3409 	struct adapter  *adapter = iflib_get_softc(ctx);
3410 	struct ixgbe_hw *hw = &adapter->hw;
3411 	u32             autoneg;
3412 	bool            negotiate;
3413 
3414 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3415 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3416 
3417 	autoneg = hw->phy.autoneg_advertised;
3418 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3419 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3420 	if (hw->mac.ops.setup_link)
3421 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3422 
3423 	/* Adjust media types shown in ifconfig */
3424 	ifmedia_removeall(adapter->media);
3425 	ixgbe_add_media_types(adapter->ctx);
3426 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3427 } /* ixgbe_handle_msf */
3428 
3429 /************************************************************************
3430  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3431  ************************************************************************/
3432 static void
3433 ixgbe_handle_phy(void *context)
3434 {
3435 	if_ctx_t        ctx = context;
3436 	struct adapter  *adapter = iflib_get_softc(ctx);
3437 	struct ixgbe_hw *hw = &adapter->hw;
3438 	int             error;
3439 
3440 	error = hw->phy.ops.handle_lasi(hw);
3441 	if (error == IXGBE_ERR_OVERTEMP)
3442 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3443 	else if (error)
3444 		device_printf(adapter->dev,
3445 		    "Error handling LASI interrupt: %d\n", error);
3446 } /* ixgbe_handle_phy */
3447 
3448 /************************************************************************
3449  * ixgbe_if_stop - Stop the hardware
3450  *
3451  *   Disables all traffic on the adapter by issuing a
3452  *   global reset on the MAC and deallocates TX/RX buffers.
3453  ************************************************************************/
3454 static void
3455 ixgbe_if_stop(if_ctx_t ctx)
3456 {
3457 	struct adapter  *adapter = iflib_get_softc(ctx);
3458 	struct ixgbe_hw *hw = &adapter->hw;
3459 
3460 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3461 
3462 	ixgbe_reset_hw(hw);
3463 	hw->adapter_stopped = FALSE;
3464 	ixgbe_stop_adapter(hw);
3465 	if (hw->mac.type == ixgbe_mac_82599EB)
3466 		ixgbe_stop_mac_link_on_d3_82599(hw);
3467 	/* Turn off the laser - noop with no optics */
3468 	ixgbe_disable_tx_laser(hw);
3469 
3470 	/* Update the stack */
3471 	adapter->link_up = FALSE;
3472 	ixgbe_if_update_admin_status(ctx);
3473 
3474 	/* reprogram the RAR[0] in case user changed it. */
3475 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3476 
3477 	return;
3478 } /* ixgbe_if_stop */
3479 
3480 /************************************************************************
3481  * ixgbe_update_link_status - Update OS on link state
3482  *
3483  * Note: Only updates the OS on the cached link state.
3484  *       The real check of the hardware only happens with
3485  *       a link interrupt.
3486  ************************************************************************/
3487 static void
3488 ixgbe_if_update_admin_status(if_ctx_t ctx)
3489 {
3490 	struct adapter *adapter = iflib_get_softc(ctx);
3491 	device_t       dev = iflib_get_dev(ctx);
3492 
3493 	if (adapter->link_up) {
3494 		if (adapter->link_active == FALSE) {
3495 			if (bootverbose)
3496 				device_printf(dev, "Link is up %d Gbps %s \n",
3497 				    ((adapter->link_speed == 128) ? 10 : 1),
3498 				    "Full Duplex");
3499 			adapter->link_active = TRUE;
3500 			/* Update any Flow Control changes */
3501 			ixgbe_fc_enable(&adapter->hw);
3502 			/* Update DMA coalescing config */
3503 			ixgbe_config_dmac(adapter);
3504 			/* should actually be negotiated value */
3505 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3506 
3507 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3508 				ixgbe_ping_all_vfs(adapter);
3509 		}
3510 	} else { /* Link down */
3511 		if (adapter->link_active == TRUE) {
3512 			if (bootverbose)
3513 				device_printf(dev, "Link is Down\n");
3514 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3515 			adapter->link_active = FALSE;
3516 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3517 				ixgbe_ping_all_vfs(adapter);
3518 		}
3519 	}
3520 
3521 	/* Handle task requests from msix_link() */
3522 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3523 		ixgbe_handle_mod(ctx);
3524 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3525 		ixgbe_handle_msf(ctx);
3526 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3527 		ixgbe_handle_mbx(ctx);
3528 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3529 		ixgbe_reinit_fdir(ctx);
3530 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3531 		ixgbe_handle_phy(ctx);
3532 	adapter->task_requests = 0;
3533 
3534 	ixgbe_update_stats_counters(adapter);
3535 } /* ixgbe_if_update_admin_status */
3536 
3537 /************************************************************************
3538  * ixgbe_config_dmac - Configure DMA Coalescing
3539  ************************************************************************/
3540 static void
3541 ixgbe_config_dmac(struct adapter *adapter)
3542 {
3543 	struct ixgbe_hw          *hw = &adapter->hw;
3544 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3545 
3546 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3547 		return;
3548 
3549 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3550 	    dcfg->link_speed ^ adapter->link_speed) {
3551 		dcfg->watchdog_timer = adapter->dmac;
3552 		dcfg->fcoe_en = FALSE;
3553 		dcfg->link_speed = adapter->link_speed;
3554 		dcfg->num_tcs = 1;
3555 
3556 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3557 		    dcfg->watchdog_timer, dcfg->link_speed);
3558 
3559 		hw->mac.ops.dmac_config(hw);
3560 	}
3561 } /* ixgbe_config_dmac */
3562 
3563 /************************************************************************
3564  * ixgbe_if_enable_intr
3565  ************************************************************************/
3566 void
3567 ixgbe_if_enable_intr(if_ctx_t ctx)
3568 {
3569 	struct adapter     *adapter = iflib_get_softc(ctx);
3570 	struct ixgbe_hw    *hw = &adapter->hw;
3571 	struct ix_rx_queue *que = adapter->rx_queues;
3572 	u32                mask, fwsm;
3573 
3574 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3575 
3576 	switch (adapter->hw.mac.type) {
3577 	case ixgbe_mac_82599EB:
3578 		mask |= IXGBE_EIMS_ECC;
3579 		/* Temperature sensor on some adapters */
3580 		mask |= IXGBE_EIMS_GPI_SDP0;
3581 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3582 		mask |= IXGBE_EIMS_GPI_SDP1;
3583 		mask |= IXGBE_EIMS_GPI_SDP2;
3584 		break;
3585 	case ixgbe_mac_X540:
3586 		/* Detect if Thermal Sensor is enabled */
3587 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3588 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3589 			mask |= IXGBE_EIMS_TS;
3590 		mask |= IXGBE_EIMS_ECC;
3591 		break;
3592 	case ixgbe_mac_X550:
3593 		/* MAC thermal sensor is automatically enabled */
3594 		mask |= IXGBE_EIMS_TS;
3595 		mask |= IXGBE_EIMS_ECC;
3596 		break;
3597 	case ixgbe_mac_X550EM_x:
3598 	case ixgbe_mac_X550EM_a:
3599 		/* Some devices use SDP0 for important information */
3600 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3601 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3602 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3603 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3604 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3605 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3606 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3607 		mask |= IXGBE_EIMS_ECC;
3608 		break;
3609 	default:
3610 		break;
3611 	}
3612 
3613 	/* Enable Fan Failure detection */
3614 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3615 		mask |= IXGBE_EIMS_GPI_SDP1;
3616 	/* Enable SR-IOV */
3617 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3618 		mask |= IXGBE_EIMS_MAILBOX;
3619 	/* Enable Flow Director */
3620 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3621 		mask |= IXGBE_EIMS_FLOW_DIR;
3622 
3623 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3624 
3625 	/* With MSI-X we use auto clear */
3626 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3627 		mask = IXGBE_EIMS_ENABLE_MASK;
3628 		/* Don't autoclear Link */
3629 		mask &= ~IXGBE_EIMS_OTHER;
3630 		mask &= ~IXGBE_EIMS_LSC;
3631 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3632 			mask &= ~IXGBE_EIMS_MAILBOX;
3633 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3634 	}
3635 
3636 	/*
3637 	 * Now enable all queues, this is done separately to
3638 	 * allow for handling the extended (beyond 32) MSI-X
3639 	 * vectors that can be used by 82599
3640 	 */
3641 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3642 		ixgbe_enable_queue(adapter, que->msix);
3643 
3644 	IXGBE_WRITE_FLUSH(hw);
3645 
3646 } /* ixgbe_if_enable_intr */
3647 
3648 /************************************************************************
3649  * ixgbe_disable_intr
3650  ************************************************************************/
3651 static void
3652 ixgbe_if_disable_intr(if_ctx_t ctx)
3653 {
3654 	struct adapter *adapter = iflib_get_softc(ctx);
3655 
3656 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3657 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3658 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3659 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3660 	} else {
3661 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3662 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3663 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3664 	}
3665 	IXGBE_WRITE_FLUSH(&adapter->hw);
3666 
3667 } /* ixgbe_if_disable_intr */
3668 
3669 /************************************************************************
3670  * ixgbe_link_intr_enable
3671  ************************************************************************/
3672 static void
3673 ixgbe_link_intr_enable(if_ctx_t ctx)
3674 {
3675 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3676 
3677 	/* Re-enable other interrupts */
3678 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3679 } /* ixgbe_link_intr_enable */
3680 
3681 /************************************************************************
3682  * ixgbe_if_rx_queue_intr_enable
3683  ************************************************************************/
3684 static int
3685 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3686 {
3687 	struct adapter     *adapter = iflib_get_softc(ctx);
3688 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3689 
3690 	ixgbe_enable_queue(adapter, que->msix);
3691 
3692 	return (0);
3693 } /* ixgbe_if_rx_queue_intr_enable */
3694 
3695 /************************************************************************
3696  * ixgbe_enable_queue
3697  ************************************************************************/
3698 static void
3699 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3700 {
3701 	struct ixgbe_hw *hw = &adapter->hw;
3702 	u64             queue = 1ULL << vector;
3703 	u32             mask;
3704 
3705 	if (hw->mac.type == ixgbe_mac_82598EB) {
3706 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3707 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3708 	} else {
3709 		mask = (queue & 0xFFFFFFFF);
3710 		if (mask)
3711 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3712 		mask = (queue >> 32);
3713 		if (mask)
3714 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3715 	}
3716 } /* ixgbe_enable_queue */
3717 
3718 /************************************************************************
3719  * ixgbe_disable_queue
3720  ************************************************************************/
3721 static void
3722 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3723 {
3724 	struct ixgbe_hw *hw = &adapter->hw;
3725 	u64             queue = 1ULL << vector;
3726 	u32             mask;
3727 
3728 	if (hw->mac.type == ixgbe_mac_82598EB) {
3729 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3730 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3731 	} else {
3732 		mask = (queue & 0xFFFFFFFF);
3733 		if (mask)
3734 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3735 		mask = (queue >> 32);
3736 		if (mask)
3737 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3738 	}
3739 } /* ixgbe_disable_queue */
3740 
3741 /************************************************************************
3742  * ixgbe_intr - Legacy Interrupt Service Routine
3743  ************************************************************************/
3744 int
3745 ixgbe_intr(void *arg)
3746 {
3747 	struct adapter     *adapter = arg;
3748 	struct ix_rx_queue *que = adapter->rx_queues;
3749 	struct ixgbe_hw    *hw = &adapter->hw;
3750 	if_ctx_t           ctx = adapter->ctx;
3751 	u32                eicr, eicr_mask;
3752 
3753 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3754 
3755 	++que->irqs;
3756 	if (eicr == 0) {
3757 		ixgbe_if_enable_intr(ctx);
3758 		return (FILTER_HANDLED);
3759 	}
3760 
3761 	/* Check for fan failure */
3762 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3763 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3764 		device_printf(adapter->dev,
3765 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3766 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3767 	}
3768 
3769 	/* Link status change */
3770 	if (eicr & IXGBE_EICR_LSC) {
3771 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3772 		iflib_admin_intr_deferred(ctx);
3773 	}
3774 
3775 	if (ixgbe_is_sfp(hw)) {
3776 		/* Pluggable optics-related interrupt */
3777 		if (hw->mac.type >= ixgbe_mac_X540)
3778 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3779 		else
3780 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3781 
3782 		if (eicr & eicr_mask) {
3783 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3784 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3785 		}
3786 
3787 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3788 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3789 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3790 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3791 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3792 		}
3793 	}
3794 
3795 	/* External PHY interrupt */
3796 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3797 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3798 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3799 
3800 	return (FILTER_SCHEDULE_THREAD);
3801 } /* ixgbe_intr */
3802 
3803 /************************************************************************
3804  * ixgbe_free_pci_resources
3805  ************************************************************************/
3806 static void
3807 ixgbe_free_pci_resources(if_ctx_t ctx)
3808 {
3809 	struct adapter *adapter = iflib_get_softc(ctx);
3810 	struct         ix_rx_queue *que = adapter->rx_queues;
3811 	device_t       dev = iflib_get_dev(ctx);
3812 
3813 	/* Release all MSI-X queue resources */
3814 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3815 		iflib_irq_free(ctx, &adapter->irq);
3816 
3817 	if (que != NULL) {
3818 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3819 			iflib_irq_free(ctx, &que->que_irq);
3820 		}
3821 	}
3822 
3823 	if (adapter->pci_mem != NULL)
3824 		bus_release_resource(dev, SYS_RES_MEMORY,
3825 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3826 } /* ixgbe_free_pci_resources */
3827 
3828 /************************************************************************
3829  * ixgbe_sysctl_flowcntl
3830  *
3831  *   SYSCTL wrapper around setting Flow Control
3832  ************************************************************************/
3833 static int
3834 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3835 {
3836 	struct adapter *adapter;
3837 	int            error, fc;
3838 
3839 	adapter = (struct adapter *)arg1;
3840 	fc = adapter->hw.fc.current_mode;
3841 
3842 	error = sysctl_handle_int(oidp, &fc, 0, req);
3843 	if ((error) || (req->newptr == NULL))
3844 		return (error);
3845 
3846 	/* Don't bother if it's not changed */
3847 	if (fc == adapter->hw.fc.current_mode)
3848 		return (0);
3849 
3850 	return ixgbe_set_flowcntl(adapter, fc);
3851 } /* ixgbe_sysctl_flowcntl */
3852 
3853 /************************************************************************
3854  * ixgbe_set_flowcntl - Set flow control
3855  *
3856  *   Flow control values:
3857  *     0 - off
3858  *     1 - rx pause
3859  *     2 - tx pause
3860  *     3 - full
3861  ************************************************************************/
3862 static int
3863 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3864 {
3865 	switch (fc) {
3866 	case ixgbe_fc_rx_pause:
3867 	case ixgbe_fc_tx_pause:
3868 	case ixgbe_fc_full:
3869 		adapter->hw.fc.requested_mode = fc;
3870 		if (adapter->num_rx_queues > 1)
3871 			ixgbe_disable_rx_drop(adapter);
3872 		break;
3873 	case ixgbe_fc_none:
3874 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3875 		if (adapter->num_rx_queues > 1)
3876 			ixgbe_enable_rx_drop(adapter);
3877 		break;
3878 	default:
3879 		return (EINVAL);
3880 	}
3881 
3882 	/* Don't autoneg if forcing a value */
3883 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3884 	ixgbe_fc_enable(&adapter->hw);
3885 
3886 	return (0);
3887 } /* ixgbe_set_flowcntl */
3888 
3889 /************************************************************************
3890  * ixgbe_enable_rx_drop
3891  *
3892  *   Enable the hardware to drop packets when the buffer is
3893  *   full. This is useful with multiqueue, so that no single
3894  *   queue being full stalls the entire RX engine. We only
3895  *   enable this when Multiqueue is enabled AND Flow Control
3896  *   is disabled.
3897  ************************************************************************/
3898 static void
3899 ixgbe_enable_rx_drop(struct adapter *adapter)
3900 {
3901 	struct ixgbe_hw *hw = &adapter->hw;
3902 	struct rx_ring  *rxr;
3903 	u32             srrctl;
3904 
3905 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3906 		rxr = &adapter->rx_queues[i].rxr;
3907 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3908 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3909 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3910 	}
3911 
3912 	/* enable drop for each vf */
3913 	for (int i = 0; i < adapter->num_vfs; i++) {
3914 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3915 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3916 		                IXGBE_QDE_ENABLE));
3917 	}
3918 } /* ixgbe_enable_rx_drop */
3919 
3920 /************************************************************************
3921  * ixgbe_disable_rx_drop
3922  ************************************************************************/
3923 static void
3924 ixgbe_disable_rx_drop(struct adapter *adapter)
3925 {
3926 	struct ixgbe_hw *hw = &adapter->hw;
3927 	struct rx_ring  *rxr;
3928 	u32             srrctl;
3929 
3930 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3931 		rxr = &adapter->rx_queues[i].rxr;
3932 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3933 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3934 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3935 	}
3936 
3937 	/* disable drop for each vf */
3938 	for (int i = 0; i < adapter->num_vfs; i++) {
3939 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3940 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3941 	}
3942 } /* ixgbe_disable_rx_drop */
3943 
3944 /************************************************************************
3945  * ixgbe_sysctl_advertise
3946  *
3947  *   SYSCTL wrapper around setting advertised speed
3948  ************************************************************************/
3949 static int
3950 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3951 {
3952 	struct adapter *adapter;
3953 	int            error, advertise;
3954 
3955 	adapter = (struct adapter *)arg1;
3956 	advertise = adapter->advertise;
3957 
3958 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3959 	if ((error) || (req->newptr == NULL))
3960 		return (error);
3961 
3962 	return ixgbe_set_advertise(adapter, advertise);
3963 } /* ixgbe_sysctl_advertise */
3964 
3965 /************************************************************************
3966  * ixgbe_set_advertise - Control advertised link speed
3967  *
3968  *   Flags:
3969  *     0x1 - advertise 100 Mb
3970  *     0x2 - advertise 1G
3971  *     0x4 - advertise 10G
3972  *     0x8 - advertise 10 Mb (yes, Mb)
3973  ************************************************************************/
3974 static int
3975 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3976 {
3977 	device_t         dev = iflib_get_dev(adapter->ctx);
3978 	struct ixgbe_hw  *hw;
3979 	ixgbe_link_speed speed = 0;
3980 	ixgbe_link_speed link_caps = 0;
3981 	s32              err = IXGBE_NOT_IMPLEMENTED;
3982 	bool             negotiate = FALSE;
3983 
3984 	/* Checks to validate new value */
3985 	if (adapter->advertise == advertise) /* no change */
3986 		return (0);
3987 
3988 	hw = &adapter->hw;
3989 
3990 	/* No speed changes for backplane media */
3991 	if (hw->phy.media_type == ixgbe_media_type_backplane)
3992 		return (ENODEV);
3993 
3994 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
3995 	      (hw->phy.multispeed_fiber))) {
3996 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
3997 		return (EINVAL);
3998 	}
3999 
4000 	if (advertise < 0x1 || advertise > 0xF) {
4001 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4002 		return (EINVAL);
4003 	}
4004 
4005 	if (hw->mac.ops.get_link_capabilities) {
4006 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4007 		    &negotiate);
4008 		if (err != IXGBE_SUCCESS) {
4009 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4010 			return (ENODEV);
4011 		}
4012 	}
4013 
4014 	/* Set new value and report new advertised mode */
4015 	if (advertise & 0x1) {
4016 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4017 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4018 			return (EINVAL);
4019 		}
4020 		speed |= IXGBE_LINK_SPEED_100_FULL;
4021 	}
4022 	if (advertise & 0x2) {
4023 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4024 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4025 			return (EINVAL);
4026 		}
4027 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4028 	}
4029 	if (advertise & 0x4) {
4030 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4031 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4032 			return (EINVAL);
4033 		}
4034 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4035 	}
4036 	if (advertise & 0x8) {
4037 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4038 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4039 			return (EINVAL);
4040 		}
4041 		speed |= IXGBE_LINK_SPEED_10_FULL;
4042 	}
4043 
4044 	hw->mac.autotry_restart = TRUE;
4045 	hw->mac.ops.setup_link(hw, speed, TRUE);
4046 	adapter->advertise = advertise;
4047 
4048 	return (0);
4049 } /* ixgbe_set_advertise */
4050 
4051 /************************************************************************
4052  * ixgbe_get_advertise - Get current advertised speed settings
4053  *
4054  *   Formatted for sysctl usage.
4055  *   Flags:
4056  *     0x1 - advertise 100 Mb
4057  *     0x2 - advertise 1G
4058  *     0x4 - advertise 10G
4059  *     0x8 - advertise 10 Mb (yes, Mb)
4060  ************************************************************************/
4061 static int
4062 ixgbe_get_advertise(struct adapter *adapter)
4063 {
4064 	struct ixgbe_hw  *hw = &adapter->hw;
4065 	int              speed;
4066 	ixgbe_link_speed link_caps = 0;
4067 	s32              err;
4068 	bool             negotiate = FALSE;
4069 
4070 	/*
4071 	 * Advertised speed means nothing unless it's copper or
4072 	 * multi-speed fiber
4073 	 */
4074 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4075 	    !(hw->phy.multispeed_fiber))
4076 		return (0);
4077 
4078 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4079 	if (err != IXGBE_SUCCESS)
4080 		return (0);
4081 
4082 	speed =
4083 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4084 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4085 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4086 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4087 
4088 	return speed;
4089 } /* ixgbe_get_advertise */
4090 
4091 /************************************************************************
4092  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4093  *
4094  *   Control values:
4095  *     0/1 - off / on (use default value of 1000)
4096  *
4097  *     Legal timer values are:
4098  *     50,100,250,500,1000,2000,5000,10000
4099  *
4100  *     Turning off interrupt moderation will also turn this off.
4101  ************************************************************************/
4102 static int
4103 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4104 {
4105 	struct adapter *adapter = (struct adapter *)arg1;
4106 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4107 	int            error;
4108 	u16            newval;
4109 
4110 	newval = adapter->dmac;
4111 	error = sysctl_handle_16(oidp, &newval, 0, req);
4112 	if ((error) || (req->newptr == NULL))
4113 		return (error);
4114 
4115 	switch (newval) {
4116 	case 0:
4117 		/* Disabled */
4118 		adapter->dmac = 0;
4119 		break;
4120 	case 1:
4121 		/* Enable and use default */
4122 		adapter->dmac = 1000;
4123 		break;
4124 	case 50:
4125 	case 100:
4126 	case 250:
4127 	case 500:
4128 	case 1000:
4129 	case 2000:
4130 	case 5000:
4131 	case 10000:
4132 		/* Legal values - allow */
4133 		adapter->dmac = newval;
4134 		break;
4135 	default:
4136 		/* Do nothing, illegal value */
4137 		return (EINVAL);
4138 	}
4139 
4140 	/* Re-initialize hardware if it's already running */
4141 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4142 		ifp->if_init(ifp);
4143 
4144 	return (0);
4145 } /* ixgbe_sysctl_dmac */
4146 
4147 #ifdef IXGBE_DEBUG
4148 /************************************************************************
4149  * ixgbe_sysctl_power_state
4150  *
4151  *   Sysctl to test power states
4152  *   Values:
4153  *     0      - set device to D0
4154  *     3      - set device to D3
4155  *     (none) - get current device power state
4156  ************************************************************************/
4157 static int
4158 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4159 {
4160 	struct adapter *adapter = (struct adapter *)arg1;
4161 	device_t       dev = adapter->dev;
4162 	int            curr_ps, new_ps, error = 0;
4163 
4164 	curr_ps = new_ps = pci_get_powerstate(dev);
4165 
4166 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4167 	if ((error) || (req->newptr == NULL))
4168 		return (error);
4169 
4170 	if (new_ps == curr_ps)
4171 		return (0);
4172 
4173 	if (new_ps == 3 && curr_ps == 0)
4174 		error = DEVICE_SUSPEND(dev);
4175 	else if (new_ps == 0 && curr_ps == 3)
4176 		error = DEVICE_RESUME(dev);
4177 	else
4178 		return (EINVAL);
4179 
4180 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4181 
4182 	return (error);
4183 } /* ixgbe_sysctl_power_state */
4184 #endif
4185 
4186 /************************************************************************
4187  * ixgbe_sysctl_wol_enable
4188  *
4189  *   Sysctl to enable/disable the WoL capability,
4190  *   if supported by the adapter.
4191  *
4192  *   Values:
4193  *     0 - disabled
4194  *     1 - enabled
4195  ************************************************************************/
4196 static int
4197 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4198 {
4199 	struct adapter  *adapter = (struct adapter *)arg1;
4200 	struct ixgbe_hw *hw = &adapter->hw;
4201 	int             new_wol_enabled;
4202 	int             error = 0;
4203 
4204 	new_wol_enabled = hw->wol_enabled;
4205 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4206 	if ((error) || (req->newptr == NULL))
4207 		return (error);
4208 	new_wol_enabled = !!(new_wol_enabled);
4209 	if (new_wol_enabled == hw->wol_enabled)
4210 		return (0);
4211 
4212 	if (new_wol_enabled > 0 && !adapter->wol_support)
4213 		return (ENODEV);
4214 	else
4215 		hw->wol_enabled = new_wol_enabled;
4216 
4217 	return (0);
4218 } /* ixgbe_sysctl_wol_enable */
4219 
4220 /************************************************************************
4221  * ixgbe_sysctl_wufc - Wake Up Filter Control
4222  *
4223  *   Sysctl to enable/disable the types of packets that the
4224  *   adapter will wake up on upon receipt.
4225  *   Flags:
4226  *     0x1  - Link Status Change
4227  *     0x2  - Magic Packet
4228  *     0x4  - Direct Exact
4229  *     0x8  - Directed Multicast
4230  *     0x10 - Broadcast
4231  *     0x20 - ARP/IPv4 Request Packet
4232  *     0x40 - Direct IPv4 Packet
4233  *     0x80 - Direct IPv6 Packet
4234  *
4235  *   Settings not listed above will cause the sysctl to return an error.
4236  ************************************************************************/
4237 static int
4238 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4239 {
4240 	struct adapter *adapter = (struct adapter *)arg1;
4241 	int            error = 0;
4242 	u32            new_wufc;
4243 
4244 	new_wufc = adapter->wufc;
4245 
4246 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4247 	if ((error) || (req->newptr == NULL))
4248 		return (error);
4249 	if (new_wufc == adapter->wufc)
4250 		return (0);
4251 
4252 	if (new_wufc & 0xffffff00)
4253 		return (EINVAL);
4254 
4255 	new_wufc &= 0xff;
4256 	new_wufc |= (0xffffff & adapter->wufc);
4257 	adapter->wufc = new_wufc;
4258 
4259 	return (0);
4260 } /* ixgbe_sysctl_wufc */
4261 
4262 #ifdef IXGBE_DEBUG
4263 /************************************************************************
4264  * ixgbe_sysctl_print_rss_config
4265  ************************************************************************/
4266 static int
4267 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4268 {
4269 	struct adapter  *adapter = (struct adapter *)arg1;
4270 	struct ixgbe_hw *hw = &adapter->hw;
4271 	device_t        dev = adapter->dev;
4272 	struct sbuf     *buf;
4273 	int             error = 0, reta_size;
4274 	u32             reg;
4275 
4276 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4277 	if (!buf) {
4278 		device_printf(dev, "Could not allocate sbuf for output.\n");
4279 		return (ENOMEM);
4280 	}
4281 
4282 	// TODO: use sbufs to make a string to print out
4283 	/* Set multiplier for RETA setup and table size based on MAC */
4284 	switch (adapter->hw.mac.type) {
4285 	case ixgbe_mac_X550:
4286 	case ixgbe_mac_X550EM_x:
4287 	case ixgbe_mac_X550EM_a:
4288 		reta_size = 128;
4289 		break;
4290 	default:
4291 		reta_size = 32;
4292 		break;
4293 	}
4294 
4295 	/* Print out the redirection table */
4296 	sbuf_cat(buf, "\n");
4297 	for (int i = 0; i < reta_size; i++) {
4298 		if (i < 32) {
4299 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4300 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4301 		} else {
4302 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4303 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4304 		}
4305 	}
4306 
4307 	// TODO: print more config
4308 
4309 	error = sbuf_finish(buf);
4310 	if (error)
4311 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4312 
4313 	sbuf_delete(buf);
4314 
4315 	return (0);
4316 } /* ixgbe_sysctl_print_rss_config */
4317 #endif /* IXGBE_DEBUG */
4318 
4319 /************************************************************************
4320  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4321  *
4322  *   For X552/X557-AT devices using an external PHY
4323  ************************************************************************/
4324 static int
4325 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4326 {
4327 	struct adapter  *adapter = (struct adapter *)arg1;
4328 	struct ixgbe_hw *hw = &adapter->hw;
4329 	u16             reg;
4330 
4331 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4332 		device_printf(iflib_get_dev(adapter->ctx),
4333 		    "Device has no supported external thermal sensor.\n");
4334 		return (ENODEV);
4335 	}
4336 
4337 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4338 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4339 		device_printf(iflib_get_dev(adapter->ctx),
4340 		    "Error reading from PHY's current temperature register\n");
4341 		return (EAGAIN);
4342 	}
4343 
4344 	/* Shift temp for output */
4345 	reg = reg >> 8;
4346 
4347 	return (sysctl_handle_16(oidp, NULL, reg, req));
4348 } /* ixgbe_sysctl_phy_temp */
4349 
4350 /************************************************************************
4351  * ixgbe_sysctl_phy_overtemp_occurred
4352  *
4353  *   Reports (directly from the PHY) whether the current PHY
4354  *   temperature is over the overtemp threshold.
4355  ************************************************************************/
4356 static int
4357 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4358 {
4359 	struct adapter  *adapter = (struct adapter *)arg1;
4360 	struct ixgbe_hw *hw = &adapter->hw;
4361 	u16             reg;
4362 
4363 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4364 		device_printf(iflib_get_dev(adapter->ctx),
4365 		    "Device has no supported external thermal sensor.\n");
4366 		return (ENODEV);
4367 	}
4368 
4369 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4370 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4371 		device_printf(iflib_get_dev(adapter->ctx),
4372 		    "Error reading from PHY's temperature status register\n");
4373 		return (EAGAIN);
4374 	}
4375 
4376 	/* Get occurrence bit */
4377 	reg = !!(reg & 0x4000);
4378 
4379 	return (sysctl_handle_16(oidp, 0, reg, req));
4380 } /* ixgbe_sysctl_phy_overtemp_occurred */
4381 
4382 /************************************************************************
4383  * ixgbe_sysctl_eee_state
4384  *
4385  *   Sysctl to set EEE power saving feature
4386  *   Values:
4387  *     0      - disable EEE
4388  *     1      - enable EEE
4389  *     (none) - get current device EEE state
4390  ************************************************************************/
4391 static int
4392 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4393 {
4394 	struct adapter *adapter = (struct adapter *)arg1;
4395 	device_t       dev = adapter->dev;
4396 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4397 	int            curr_eee, new_eee, error = 0;
4398 	s32            retval;
4399 
4400 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4401 
4402 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4403 	if ((error) || (req->newptr == NULL))
4404 		return (error);
4405 
4406 	/* Nothing to do */
4407 	if (new_eee == curr_eee)
4408 		return (0);
4409 
4410 	/* Not supported */
4411 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4412 		return (EINVAL);
4413 
4414 	/* Bounds checking */
4415 	if ((new_eee < 0) || (new_eee > 1))
4416 		return (EINVAL);
4417 
4418 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4419 	if (retval) {
4420 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4421 		return (EINVAL);
4422 	}
4423 
4424 	/* Restart auto-neg */
4425 	ifp->if_init(ifp);
4426 
4427 	device_printf(dev, "New EEE state: %d\n", new_eee);
4428 
4429 	/* Cache new value */
4430 	if (new_eee)
4431 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4432 	else
4433 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4434 
4435 	return (error);
4436 } /* ixgbe_sysctl_eee_state */
4437 
4438 /************************************************************************
4439  * ixgbe_init_device_features
4440  ************************************************************************/
4441 static void
4442 ixgbe_init_device_features(struct adapter *adapter)
4443 {
4444 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4445 	                  | IXGBE_FEATURE_RSS
4446 	                  | IXGBE_FEATURE_MSI
4447 	                  | IXGBE_FEATURE_MSIX
4448 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4449 
4450 	/* Set capabilities first... */
4451 	switch (adapter->hw.mac.type) {
4452 	case ixgbe_mac_82598EB:
4453 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4454 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4455 		break;
4456 	case ixgbe_mac_X540:
4457 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4458 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4459 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4460 		    (adapter->hw.bus.func == 0))
4461 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4462 		break;
4463 	case ixgbe_mac_X550:
4464 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4465 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4466 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4467 		break;
4468 	case ixgbe_mac_X550EM_x:
4469 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4470 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4471 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4472 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4473 		break;
4474 	case ixgbe_mac_X550EM_a:
4475 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4476 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4477 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4478 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4479 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4480 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4481 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4482 		}
4483 		break;
4484 	case ixgbe_mac_82599EB:
4485 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4486 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4487 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4488 		    (adapter->hw.bus.func == 0))
4489 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4490 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4491 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4492 		break;
4493 	default:
4494 		break;
4495 	}
4496 
4497 	/* Enabled by default... */
4498 	/* Fan failure detection */
4499 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4500 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4501 	/* Netmap */
4502 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4503 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4504 	/* EEE */
4505 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4506 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4507 	/* Thermal Sensor */
4508 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4509 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4510 
4511 	/* Enabled via global sysctl... */
4512 	/* Flow Director */
4513 	if (ixgbe_enable_fdir) {
4514 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4515 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4516 		else
4517 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4518 	}
4519 	/*
4520 	 * Message Signal Interrupts - Extended (MSI-X)
4521 	 * Normal MSI is only enabled if MSI-X calls fail.
4522 	 */
4523 	if (!ixgbe_enable_msix)
4524 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4525 	/* Receive-Side Scaling (RSS) */
4526 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4527 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4528 
4529 	/* Disable features with unmet dependencies... */
4530 	/* No MSI-X */
4531 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4532 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4533 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4534 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4535 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4536 	}
4537 } /* ixgbe_init_device_features */
4538 
4539 /************************************************************************
4540  * ixgbe_check_fan_failure
4541  ************************************************************************/
4542 static void
4543 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4544 {
4545 	u32 mask;
4546 
4547 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4548 	    IXGBE_ESDP_SDP1;
4549 
4550 	if (reg & mask)
4551 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4552 } /* ixgbe_check_fan_failure */
4553