xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision f18976136625a7d016e97bfd9eabddf640b3e06d)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 int ixgbe_intr(void *arg);
143 
144 /************************************************************************
145  * Function prototypes
146  ************************************************************************/
147 #if __FreeBSD_version >= 1100036
148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
149 #endif
150 
151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
154 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
155 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
156 
157 static void ixgbe_config_dmac(struct adapter *adapter);
158 static void ixgbe_configure_ivars(struct adapter *adapter);
159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
160                            s8 type);
161 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static bool ixgbe_sfp_probe(if_ctx_t ctx);
163 
164 static void ixgbe_free_pci_resources(if_ctx_t ctx);
165 
166 static int  ixgbe_msix_link(void *arg);
167 static int  ixgbe_msix_que(void *arg);
168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
169 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
171 
172 static int  ixgbe_setup_interface(if_ctx_t ctx);
173 static void ixgbe_init_device_features(struct adapter *adapter);
174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
175 static void ixgbe_add_media_types(if_ctx_t ctx);
176 static void ixgbe_update_stats_counters(struct adapter *adapter);
177 static void ixgbe_config_link(if_ctx_t ctx);
178 static void ixgbe_get_slot_info(struct adapter *);
179 static void ixgbe_check_wol_support(struct adapter *adapter);
180 static void ixgbe_enable_rx_drop(struct adapter *);
181 static void ixgbe_disable_rx_drop(struct adapter *);
182 
183 static void ixgbe_add_hw_stats(struct adapter *adapter);
184 static int  ixgbe_set_flowcntl(struct adapter *, int);
185 static int  ixgbe_set_advertise(struct adapter *, int);
186 static int  ixgbe_get_advertise(struct adapter *);
187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
188 static void ixgbe_config_gpie(struct adapter *adapter);
189 static void ixgbe_config_delay_values(struct adapter *adapter);
190 
191 /* Sysctl handlers */
192 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
198 #ifdef IXGBE_DEBUG
199 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
200 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
201 #endif
202 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
209 
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_msf(void *);
212 static void ixgbe_handle_mod(void *);
213 static void ixgbe_handle_phy(void *);
214 
215 /************************************************************************
216  *  FreeBSD Device Interface Entry Points
217  ************************************************************************/
218 static device_method_t ix_methods[] = {
219 	/* Device interface */
220 	DEVMETHOD(device_register, ixgbe_register),
221 	DEVMETHOD(device_probe, iflib_device_probe),
222 	DEVMETHOD(device_attach, iflib_device_attach),
223 	DEVMETHOD(device_detach, iflib_device_detach),
224 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
225 	DEVMETHOD(device_suspend, iflib_device_suspend),
226 	DEVMETHOD(device_resume, iflib_device_resume),
227 #ifdef PCI_IOV
228 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
229 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
230 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
231 #endif /* PCI_IOV */
232 	DEVMETHOD_END
233 };
234 
235 static driver_t ix_driver = {
236 	"ix", ix_methods, sizeof(struct adapter),
237 };
238 
239 devclass_t ix_devclass;
240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
242 MODULE_DEPEND(ix, pci, 1, 1, 1);
243 MODULE_DEPEND(ix, ether, 1, 1, 1);
244 MODULE_DEPEND(ix, iflib, 1, 1, 1);
245 
246 static device_method_t ixgbe_if_methods[] = {
247 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
248 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
249 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
250 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
251 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
252 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
253 	DEVMETHOD(ifdi_init, ixgbe_if_init),
254 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
255 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
256 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
257 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
258 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
259 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
260 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
262 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
263 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
264 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
265 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
266 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
267 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
268 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
269 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
270 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
271 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
272 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
273 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
274 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
275 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
276 #ifdef PCI_IOV
277 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
278 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
279 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
280 #endif /* PCI_IOV */
281 	DEVMETHOD_END
282 };
283 
284 /*
285  * TUNEABLE PARAMETERS:
286  */
287 
288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
289 static driver_t ixgbe_if_driver = {
290   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
291 };
292 
293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
295     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
296 
297 /* Flow control setting, default to full */
298 static int ixgbe_flow_control = ixgbe_fc_full;
299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
300     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
301 
302 /* Advertise Speed, default to 0 (auto) */
303 static int ixgbe_advertise_speed = 0;
304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
305     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 
307 /*
308  * Smart speed setting, default to on
309  * this only works as a compile option
310  * right now as its during attach, set
311  * this to 'ixgbe_smart_speed_off' to
312  * disable.
313  */
314 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 
316 /*
317  * MSI-X should be the default for best performance,
318  * but this allows it to be forced off for testing.
319  */
320 static int ixgbe_enable_msix = 1;
321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
322     "Enable MSI-X interrupts");
323 
324 /*
325  * Defining this on will allow the use
326  * of unsupported SFP+ modules, note that
327  * doing so you are on your own :)
328  */
329 static int allow_unsupported_sfp = FALSE;
330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
331     &allow_unsupported_sfp, 0,
332     "Allow unsupported SFP modules...use at your own risk");
333 
334 /*
335  * Not sure if Flow Director is fully baked,
336  * so we'll default to turning it off.
337  */
338 static int ixgbe_enable_fdir = 0;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
340     "Enable Flow Director");
341 
342 /* Receive-Side Scaling */
343 static int ixgbe_enable_rss = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
345     "Enable Receive-Side Scaling (RSS)");
346 
347 #if 0
348 /* Keep running tab on them for sanity check */
349 static int ixgbe_total_ports;
350 #endif
351 
352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
353 
354 /*
355  * For Flow Director: this is the number of TX packets we sample
356  * for the filter pool, this means every 20th packet will be probed.
357  *
358  * This feature can be disabled by setting this to 0.
359  */
360 static int atr_sample_rate = 20;
361 
362 extern struct if_txrx ixgbe_txrx;
363 
364 static struct if_shared_ctx ixgbe_sctx_init = {
365 	.isc_magic = IFLIB_MAGIC,
366 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
367 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 	.isc_tx_maxsegsize = PAGE_SIZE,
369 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
370 	.isc_tso_maxsegsize = PAGE_SIZE,
371 	.isc_rx_maxsize = PAGE_SIZE*4,
372 	.isc_rx_nsegments = 1,
373 	.isc_rx_maxsegsize = PAGE_SIZE*4,
374 	.isc_nfl = 1,
375 	.isc_ntxqs = 1,
376 	.isc_nrxqs = 1,
377 
378 	.isc_admin_intrcnt = 1,
379 	.isc_vendor_info = ixgbe_vendor_info_array,
380 	.isc_driver_version = ixgbe_driver_version,
381 	.isc_driver = &ixgbe_if_driver,
382 	.isc_flags = IFLIB_TSO_INIT_IP,
383 
384 	.isc_nrxd_min = {MIN_RXD},
385 	.isc_ntxd_min = {MIN_TXD},
386 	.isc_nrxd_max = {MAX_RXD},
387 	.isc_ntxd_max = {MAX_TXD},
388 	.isc_nrxd_default = {DEFAULT_RXD},
389 	.isc_ntxd_default = {DEFAULT_TXD},
390 };
391 
392 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
393 
394 /************************************************************************
395  * ixgbe_if_tx_queues_alloc
396  ************************************************************************/
397 static int
398 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
399                          int ntxqs, int ntxqsets)
400 {
401 	struct adapter     *adapter = iflib_get_softc(ctx);
402 	if_softc_ctx_t     scctx = adapter->shared;
403 	struct ix_tx_queue *que;
404 	int                i, j, error;
405 
406 	MPASS(adapter->num_tx_queues > 0);
407 	MPASS(adapter->num_tx_queues == ntxqsets);
408 	MPASS(ntxqs == 1);
409 
410 	/* Allocate queue structure memory */
411 	adapter->tx_queues =
412 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
413 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
414 	if (!adapter->tx_queues) {
415 		device_printf(iflib_get_dev(ctx),
416 		    "Unable to allocate TX ring memory\n");
417 		return (ENOMEM);
418 	}
419 
420 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
421 		struct tx_ring *txr = &que->txr;
422 
423 		/* In case SR-IOV is enabled, align the index properly */
424 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
425 		    i);
426 
427 		txr->adapter = que->adapter = adapter;
428 
429 		/* Allocate report status array */
430 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
431 		if (txr->tx_rsq == NULL) {
432 			error = ENOMEM;
433 			goto fail;
434 		}
435 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
436 			txr->tx_rsq[j] = QIDX_INVALID;
437 		/* get the virtual and physical address of the hardware queues */
438 		txr->tail = IXGBE_TDT(txr->me);
439 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
440 		txr->tx_paddr = paddrs[i];
441 
442 		txr->bytes = 0;
443 		txr->total_packets = 0;
444 
445 		/* Set the rate at which we sample packets */
446 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
447 			txr->atr_sample = atr_sample_rate;
448 
449 	}
450 
451 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
452 	    adapter->num_tx_queues);
453 
454 	return (0);
455 
456 fail:
457 	ixgbe_if_queues_free(ctx);
458 
459 	return (error);
460 } /* ixgbe_if_tx_queues_alloc */
461 
462 /************************************************************************
463  * ixgbe_if_rx_queues_alloc
464  ************************************************************************/
465 static int
466 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
467                          int nrxqs, int nrxqsets)
468 {
469 	struct adapter     *adapter = iflib_get_softc(ctx);
470 	struct ix_rx_queue *que;
471 	int                i;
472 
473 	MPASS(adapter->num_rx_queues > 0);
474 	MPASS(adapter->num_rx_queues == nrxqsets);
475 	MPASS(nrxqs == 1);
476 
477 	/* Allocate queue structure memory */
478 	adapter->rx_queues =
479 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
480 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
481 	if (!adapter->rx_queues) {
482 		device_printf(iflib_get_dev(ctx),
483 		    "Unable to allocate TX ring memory\n");
484 		return (ENOMEM);
485 	}
486 
487 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
488 		struct rx_ring *rxr = &que->rxr;
489 
490 		/* In case SR-IOV is enabled, align the index properly */
491 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
492 		    i);
493 
494 		rxr->adapter = que->adapter = adapter;
495 
496 		/* get the virtual and physical address of the hw queues */
497 		rxr->tail = IXGBE_RDT(rxr->me);
498 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
499 		rxr->rx_paddr = paddrs[i];
500 		rxr->bytes = 0;
501 		rxr->que = que;
502 	}
503 
504 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
505 	    adapter->num_rx_queues);
506 
507 	return (0);
508 } /* ixgbe_if_rx_queues_alloc */
509 
510 /************************************************************************
511  * ixgbe_if_queues_free
512  ************************************************************************/
513 static void
514 ixgbe_if_queues_free(if_ctx_t ctx)
515 {
516 	struct adapter     *adapter = iflib_get_softc(ctx);
517 	struct ix_tx_queue *tx_que = adapter->tx_queues;
518 	struct ix_rx_queue *rx_que = adapter->rx_queues;
519 	int                i;
520 
521 	if (tx_que != NULL) {
522 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
523 			struct tx_ring *txr = &tx_que->txr;
524 			if (txr->tx_rsq == NULL)
525 				break;
526 
527 			free(txr->tx_rsq, M_IXGBE);
528 			txr->tx_rsq = NULL;
529 		}
530 
531 		free(adapter->tx_queues, M_IXGBE);
532 		adapter->tx_queues = NULL;
533 	}
534 	if (rx_que != NULL) {
535 		free(adapter->rx_queues, M_IXGBE);
536 		adapter->rx_queues = NULL;
537 	}
538 } /* ixgbe_if_queues_free */
539 
540 /************************************************************************
541  * ixgbe_initialize_rss_mapping
542  ************************************************************************/
543 static void
544 ixgbe_initialize_rss_mapping(struct adapter *adapter)
545 {
546 	struct ixgbe_hw *hw = &adapter->hw;
547 	u32             reta = 0, mrqc, rss_key[10];
548 	int             queue_id, table_size, index_mult;
549 	int             i, j;
550 	u32             rss_hash_config;
551 
552 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
553 		/* Fetch the configured RSS key */
554 		rss_getkey((uint8_t *)&rss_key);
555 	} else {
556 		/* set up random bits */
557 		arc4rand(&rss_key, sizeof(rss_key), 0);
558 	}
559 
560 	/* Set multiplier for RETA setup and table size based on MAC */
561 	index_mult = 0x1;
562 	table_size = 128;
563 	switch (adapter->hw.mac.type) {
564 	case ixgbe_mac_82598EB:
565 		index_mult = 0x11;
566 		break;
567 	case ixgbe_mac_X550:
568 	case ixgbe_mac_X550EM_x:
569 	case ixgbe_mac_X550EM_a:
570 		table_size = 512;
571 		break;
572 	default:
573 		break;
574 	}
575 
576 	/* Set up the redirection table */
577 	for (i = 0, j = 0; i < table_size; i++, j++) {
578 		if (j == adapter->num_rx_queues)
579 			j = 0;
580 
581 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
582 			/*
583 			 * Fetch the RSS bucket id for the given indirection
584 			 * entry. Cap it at the number of configured buckets
585 			 * (which is num_rx_queues.)
586 			 */
587 			queue_id = rss_get_indirection_to_bucket(i);
588 			queue_id = queue_id % adapter->num_rx_queues;
589 		} else
590 			queue_id = (j * index_mult);
591 
592 		/*
593 		 * The low 8 bits are for hash value (n+0);
594 		 * The next 8 bits are for hash value (n+1), etc.
595 		 */
596 		reta = reta >> 8;
597 		reta = reta | (((uint32_t)queue_id) << 24);
598 		if ((i & 3) == 3) {
599 			if (i < 128)
600 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
601 			else
602 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
603 				    reta);
604 			reta = 0;
605 		}
606 	}
607 
608 	/* Now fill our hash function seeds */
609 	for (i = 0; i < 10; i++)
610 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
611 
612 	/* Perform hash on these packet types */
613 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
614 		rss_hash_config = rss_gethashconfig();
615 	else {
616 		/*
617 		 * Disable UDP - IP fragments aren't currently being handled
618 		 * and so we end up with a mix of 2-tuple and 4-tuple
619 		 * traffic.
620 		 */
621 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
622 		                | RSS_HASHTYPE_RSS_TCP_IPV4
623 		                | RSS_HASHTYPE_RSS_IPV6
624 		                | RSS_HASHTYPE_RSS_TCP_IPV6
625 		                | RSS_HASHTYPE_RSS_IPV6_EX
626 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
627 	}
628 
629 	mrqc = IXGBE_MRQC_RSSEN;
630 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
631 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
632 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
633 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
634 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
635 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
636 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
637 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
638 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
639 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
640 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
641 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
642 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
643 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
644 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
645 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
646 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
647 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
648 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
649 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
650 } /* ixgbe_initialize_rss_mapping */
651 
652 /************************************************************************
653  * ixgbe_initialize_receive_units - Setup receive registers and features.
654  ************************************************************************/
655 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
656 
657 static void
658 ixgbe_initialize_receive_units(if_ctx_t ctx)
659 {
660 	struct adapter     *adapter = iflib_get_softc(ctx);
661 	if_softc_ctx_t     scctx = adapter->shared;
662 	struct ixgbe_hw    *hw = &adapter->hw;
663 	struct ifnet       *ifp = iflib_get_ifp(ctx);
664 	struct ix_rx_queue *que;
665 	int                i, j;
666 	u32                bufsz, fctrl, srrctl, rxcsum;
667 	u32                hlreg;
668 
669 	/*
670 	 * Make sure receives are disabled while
671 	 * setting up the descriptor ring
672 	 */
673 	ixgbe_disable_rx(hw);
674 
675 	/* Enable broadcasts */
676 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
677 	fctrl |= IXGBE_FCTRL_BAM;
678 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
679 		fctrl |= IXGBE_FCTRL_DPF;
680 		fctrl |= IXGBE_FCTRL_PMCF;
681 	}
682 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
683 
684 	/* Set for Jumbo Frames? */
685 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
686 	if (ifp->if_mtu > ETHERMTU)
687 		hlreg |= IXGBE_HLREG0_JUMBOEN;
688 	else
689 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
690 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
691 
692 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
693 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
694 
695 	/* Setup the Base and Length of the Rx Descriptor Ring */
696 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
697 		struct rx_ring *rxr = &que->rxr;
698 		u64            rdba = rxr->rx_paddr;
699 
700 		j = rxr->me;
701 
702 		/* Setup the Base and Length of the Rx Descriptor Ring */
703 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
704 		    (rdba & 0x00000000ffffffffULL));
705 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
706 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
707 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
708 
709 		/* Set up the SRRCTL register */
710 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
711 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
712 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
713 		srrctl |= bufsz;
714 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
715 
716 		/*
717 		 * Set DROP_EN iff we have no flow control and >1 queue.
718 		 * Note that srrctl was cleared shortly before during reset,
719 		 * so we do not need to clear the bit, but do it just in case
720 		 * this code is moved elsewhere.
721 		 */
722 		if (adapter->num_rx_queues > 1 &&
723 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
724 			srrctl |= IXGBE_SRRCTL_DROP_EN;
725 		} else {
726 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
727 		}
728 
729 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
730 
731 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
732 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
733 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
734 
735 		/* Set the driver rx tail address */
736 		rxr->tail =  IXGBE_RDT(rxr->me);
737 	}
738 
739 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
740 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
741 		            | IXGBE_PSRTYPE_UDPHDR
742 		            | IXGBE_PSRTYPE_IPV4HDR
743 		            | IXGBE_PSRTYPE_IPV6HDR;
744 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
745 	}
746 
747 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
748 
749 	ixgbe_initialize_rss_mapping(adapter);
750 
751 	if (adapter->num_rx_queues > 1) {
752 		/* RSS and RX IPP Checksum are mutually exclusive */
753 		rxcsum |= IXGBE_RXCSUM_PCSD;
754 	}
755 
756 	if (ifp->if_capenable & IFCAP_RXCSUM)
757 		rxcsum |= IXGBE_RXCSUM_PCSD;
758 
759 	/* This is useful for calculating UDP/IP fragment checksums */
760 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
761 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
762 
763 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
764 
765 } /* ixgbe_initialize_receive_units */
766 
767 /************************************************************************
768  * ixgbe_initialize_transmit_units - Enable transmit units.
769  ************************************************************************/
770 static void
771 ixgbe_initialize_transmit_units(if_ctx_t ctx)
772 {
773 	struct adapter     *adapter = iflib_get_softc(ctx);
774 	struct ixgbe_hw    *hw = &adapter->hw;
775 	if_softc_ctx_t     scctx = adapter->shared;
776 	struct ix_tx_queue *que;
777 	int i;
778 
779 	/* Setup the Base and Length of the Tx Descriptor Ring */
780 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
781 	    i++, que++) {
782 		struct tx_ring	   *txr = &que->txr;
783 		u64 tdba = txr->tx_paddr;
784 		u32 txctrl = 0;
785 		int j = txr->me;
786 
787 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
788 		    (tdba & 0x00000000ffffffffULL));
789 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
790 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
791 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
792 
793 		/* Setup the HW Tx Head and Tail descriptor pointers */
794 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
795 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
796 
797 		/* Cache the tail address */
798 		txr->tail = IXGBE_TDT(txr->me);
799 
800 		txr->tx_rs_cidx = txr->tx_rs_pidx;
801 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
802 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
803 			txr->tx_rsq[k] = QIDX_INVALID;
804 
805 		/* Disable Head Writeback */
806 		/*
807 		 * Note: for X550 series devices, these registers are actually
808 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
809 		 * fields remain the same.
810 		 */
811 		switch (hw->mac.type) {
812 		case ixgbe_mac_82598EB:
813 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
814 			break;
815 		default:
816 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
817 			break;
818 		}
819 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
820 		switch (hw->mac.type) {
821 		case ixgbe_mac_82598EB:
822 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
823 			break;
824 		default:
825 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
826 			break;
827 		}
828 
829 	}
830 
831 	if (hw->mac.type != ixgbe_mac_82598EB) {
832 		u32 dmatxctl, rttdcs;
833 
834 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
835 		dmatxctl |= IXGBE_DMATXCTL_TE;
836 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
837 		/* Disable arbiter to set MTQC */
838 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
839 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
840 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
841 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
842 		    ixgbe_get_mtqc(adapter->iov_mode));
843 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
844 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
845 	}
846 
847 } /* ixgbe_initialize_transmit_units */
848 
849 /************************************************************************
850  * ixgbe_register
851  ************************************************************************/
852 static void *
853 ixgbe_register(device_t dev)
854 {
855 	return (ixgbe_sctx);
856 } /* ixgbe_register */
857 
858 /************************************************************************
859  * ixgbe_if_attach_pre - Device initialization routine, part 1
860  *
861  *   Called when the driver is being loaded.
862  *   Identifies the type of hardware, initializes the hardware,
863  *   and initializes iflib structures.
864  *
865  *   return 0 on success, positive on failure
866  ************************************************************************/
867 static int
868 ixgbe_if_attach_pre(if_ctx_t ctx)
869 {
870 	struct adapter  *adapter;
871 	device_t        dev;
872 	if_softc_ctx_t  scctx;
873 	struct ixgbe_hw *hw;
874 	int             error = 0;
875 	u32             ctrl_ext;
876 
877 	INIT_DEBUGOUT("ixgbe_attach: begin");
878 
879 	/* Allocate, clear, and link in our adapter structure */
880 	dev = iflib_get_dev(ctx);
881 	adapter = iflib_get_softc(ctx);
882 	adapter->hw.back = adapter;
883 	adapter->ctx = ctx;
884 	adapter->dev = dev;
885 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
886 	adapter->media = iflib_get_media(ctx);
887 	hw = &adapter->hw;
888 
889 	/* Determine hardware revision */
890 	hw->vendor_id = pci_get_vendor(dev);
891 	hw->device_id = pci_get_device(dev);
892 	hw->revision_id = pci_get_revid(dev);
893 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
894 	hw->subsystem_device_id = pci_get_subdevice(dev);
895 
896 	/* Do base PCI setup - map BAR0 */
897 	if (ixgbe_allocate_pci_resources(ctx)) {
898 		device_printf(dev, "Allocation of PCI resources failed\n");
899 		return (ENXIO);
900 	}
901 
902 	/* let hardware know driver is loaded */
903 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
904 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
905 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
906 
907 	/*
908 	 * Initialize the shared code
909 	 */
910 	if (ixgbe_init_shared_code(hw) != 0) {
911 		device_printf(dev, "Unable to initialize the shared code\n");
912 		error = ENXIO;
913 		goto err_pci;
914 	}
915 
916 	if (hw->mbx.ops.init_params)
917 		hw->mbx.ops.init_params(hw);
918 
919 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
920 
921 	if (hw->mac.type != ixgbe_mac_82598EB)
922 		hw->phy.smart_speed = ixgbe_smart_speed;
923 
924 	ixgbe_init_device_features(adapter);
925 
926 	/* Enable WoL (if supported) */
927 	ixgbe_check_wol_support(adapter);
928 
929 	/* Verify adapter fan is still functional (if applicable) */
930 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
931 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
932 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
933 	}
934 
935 	/* Ensure SW/FW semaphore is free */
936 	ixgbe_init_swfw_semaphore(hw);
937 
938 	/* Set an initial default flow control value */
939 	hw->fc.requested_mode = ixgbe_flow_control;
940 
941 	hw->phy.reset_if_overtemp = TRUE;
942 	error = ixgbe_reset_hw(hw);
943 	hw->phy.reset_if_overtemp = FALSE;
944 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
945 		/*
946 		 * No optics in this port, set up
947 		 * so the timer routine will probe
948 		 * for later insertion.
949 		 */
950 		adapter->sfp_probe = TRUE;
951 		error = 0;
952 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
953 		device_printf(dev, "Unsupported SFP+ module detected!\n");
954 		error = EIO;
955 		goto err_pci;
956 	} else if (error) {
957 		device_printf(dev, "Hardware initialization failed\n");
958 		error = EIO;
959 		goto err_pci;
960 	}
961 
962 	/* Make sure we have a good EEPROM before we read from it */
963 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
964 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
965 		error = EIO;
966 		goto err_pci;
967 	}
968 
969 	error = ixgbe_start_hw(hw);
970 	switch (error) {
971 	case IXGBE_ERR_EEPROM_VERSION:
972 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
973 		break;
974 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
975 		device_printf(dev, "Unsupported SFP+ Module\n");
976 		error = EIO;
977 		goto err_pci;
978 	case IXGBE_ERR_SFP_NOT_PRESENT:
979 		device_printf(dev, "No SFP+ Module found\n");
980 		/* falls thru */
981 	default:
982 		break;
983 	}
984 
985 	/* Most of the iflib initialization... */
986 
987 	iflib_set_mac(ctx, hw->mac.addr);
988 	switch (adapter->hw.mac.type) {
989 	case ixgbe_mac_X550:
990 	case ixgbe_mac_X550EM_x:
991 	case ixgbe_mac_X550EM_a:
992 		scctx->isc_rss_table_size = 512;
993 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
994 		break;
995 	default:
996 		scctx->isc_rss_table_size = 128;
997 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
998 	}
999 
1000 	/* Allow legacy interrupts */
1001 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1002 
1003 	scctx->isc_txqsizes[0] =
1004 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1005 	    sizeof(u32), DBA_ALIGN),
1006 	scctx->isc_rxqsizes[0] =
1007 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1008 	    DBA_ALIGN);
1009 
1010 	/* XXX */
1011 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1012 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1013 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1014 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1015 	} else {
1016 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1017 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1018 	}
1019 
1020 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1021 
1022 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1023 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1024 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1025 
1026 	scctx->isc_txrx = &ixgbe_txrx;
1027 
1028 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1029 
1030 	return (0);
1031 
1032 err_pci:
1033 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1034 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1035 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1036 	ixgbe_free_pci_resources(ctx);
1037 
1038 	return (error);
1039 } /* ixgbe_if_attach_pre */
1040 
1041  /*********************************************************************
1042  * ixgbe_if_attach_post - Device initialization routine, part 2
1043  *
1044  *   Called during driver load, but after interrupts and
1045  *   resources have been allocated and configured.
1046  *   Sets up some data structures not relevant to iflib.
1047  *
1048  *   return 0 on success, positive on failure
1049  *********************************************************************/
1050 static int
1051 ixgbe_if_attach_post(if_ctx_t ctx)
1052 {
1053 	device_t dev;
1054 	struct adapter  *adapter;
1055 	struct ixgbe_hw *hw;
1056 	int             error = 0;
1057 
1058 	dev = iflib_get_dev(ctx);
1059 	adapter = iflib_get_softc(ctx);
1060 	hw = &adapter->hw;
1061 
1062 
1063 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1064 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1065 		device_printf(dev, "Device does not support legacy interrupts");
1066 		error = ENXIO;
1067 		goto err;
1068 	}
1069 
1070 	/* Allocate multicast array memory. */
1071 	adapter->mta = malloc(sizeof(*adapter->mta) *
1072 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1073 	if (adapter->mta == NULL) {
1074 		device_printf(dev, "Can not allocate multicast setup array\n");
1075 		error = ENOMEM;
1076 		goto err;
1077 	}
1078 
1079 	/* hw.ix defaults init */
1080 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1081 
1082 	/* Enable the optics for 82599 SFP+ fiber */
1083 	ixgbe_enable_tx_laser(hw);
1084 
1085 	/* Enable power to the phy. */
1086 	ixgbe_set_phy_power(hw, TRUE);
1087 
1088 	ixgbe_initialize_iov(adapter);
1089 
1090 	error = ixgbe_setup_interface(ctx);
1091 	if (error) {
1092 		device_printf(dev, "Interface setup failed: %d\n", error);
1093 		goto err;
1094 	}
1095 
1096 	ixgbe_if_update_admin_status(ctx);
1097 
1098 	/* Initialize statistics */
1099 	ixgbe_update_stats_counters(adapter);
1100 	ixgbe_add_hw_stats(adapter);
1101 
1102 	/* Check PCIE slot type/speed/width */
1103 	ixgbe_get_slot_info(adapter);
1104 
1105 	/*
1106 	 * Do time init and sysctl init here, but
1107 	 * only on the first port of a bypass adapter.
1108 	 */
1109 	ixgbe_bypass_init(adapter);
1110 
1111 	/* Set an initial dmac value */
1112 	adapter->dmac = 0;
1113 	/* Set initial advertised speeds (if applicable) */
1114 	adapter->advertise = ixgbe_get_advertise(adapter);
1115 
1116 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1117 		ixgbe_define_iov_schemas(dev, &error);
1118 
1119 	/* Add sysctls */
1120 	ixgbe_add_device_sysctls(ctx);
1121 
1122 	return (0);
1123 err:
1124 	return (error);
1125 } /* ixgbe_if_attach_post */
1126 
1127 /************************************************************************
1128  * ixgbe_check_wol_support
1129  *
1130  *   Checks whether the adapter's ports are capable of
1131  *   Wake On LAN by reading the adapter's NVM.
1132  *
1133  *   Sets each port's hw->wol_enabled value depending
1134  *   on the value read here.
1135  ************************************************************************/
1136 static void
1137 ixgbe_check_wol_support(struct adapter *adapter)
1138 {
1139 	struct ixgbe_hw *hw = &adapter->hw;
1140 	u16             dev_caps = 0;
1141 
1142 	/* Find out WoL support for port */
1143 	adapter->wol_support = hw->wol_enabled = 0;
1144 	ixgbe_get_device_caps(hw, &dev_caps);
1145 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1146 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1147 	     hw->bus.func == 0))
1148 		adapter->wol_support = hw->wol_enabled = 1;
1149 
1150 	/* Save initial wake up filter configuration */
1151 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1152 
1153 	return;
1154 } /* ixgbe_check_wol_support */
1155 
1156 /************************************************************************
1157  * ixgbe_setup_interface
1158  *
1159  *   Setup networking device structure and register an interface.
1160  ************************************************************************/
1161 static int
1162 ixgbe_setup_interface(if_ctx_t ctx)
1163 {
1164 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1165 	struct adapter *adapter = iflib_get_softc(ctx);
1166 
1167 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1168 
1169 	if_setbaudrate(ifp, IF_Gbps(10));
1170 
1171 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1172 
1173 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1174 
1175 	ixgbe_add_media_types(ctx);
1176 
1177 	/* Autoselect media by default */
1178 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1179 
1180 	return (0);
1181 } /* ixgbe_setup_interface */
1182 
1183 /************************************************************************
1184  * ixgbe_if_get_counter
1185  ************************************************************************/
1186 static uint64_t
1187 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1188 {
1189 	struct adapter *adapter = iflib_get_softc(ctx);
1190 	if_t           ifp = iflib_get_ifp(ctx);
1191 
1192 	switch (cnt) {
1193 	case IFCOUNTER_IPACKETS:
1194 		return (adapter->ipackets);
1195 	case IFCOUNTER_OPACKETS:
1196 		return (adapter->opackets);
1197 	case IFCOUNTER_IBYTES:
1198 		return (adapter->ibytes);
1199 	case IFCOUNTER_OBYTES:
1200 		return (adapter->obytes);
1201 	case IFCOUNTER_IMCASTS:
1202 		return (adapter->imcasts);
1203 	case IFCOUNTER_OMCASTS:
1204 		return (adapter->omcasts);
1205 	case IFCOUNTER_COLLISIONS:
1206 		return (0);
1207 	case IFCOUNTER_IQDROPS:
1208 		return (adapter->iqdrops);
1209 	case IFCOUNTER_OQDROPS:
1210 		return (0);
1211 	case IFCOUNTER_IERRORS:
1212 		return (adapter->ierrors);
1213 	default:
1214 		return (if_get_counter_default(ifp, cnt));
1215 	}
1216 } /* ixgbe_if_get_counter */
1217 
1218 /************************************************************************
1219  * ixgbe_if_i2c_req
1220  ************************************************************************/
1221 static int
1222 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1223 {
1224 	struct adapter		*adapter = iflib_get_softc(ctx);
1225 	struct ixgbe_hw 	*hw = &adapter->hw;
1226 	int 			i;
1227 
1228 
1229 	if (hw->phy.ops.read_i2c_byte == NULL)
1230 		return (ENXIO);
1231 	for (i = 0; i < req->len; i++)
1232 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1233 		    req->dev_addr, &req->data[i]);
1234 	return (0);
1235 } /* ixgbe_if_i2c_req */
1236 
1237 /************************************************************************
1238  * ixgbe_add_media_types
1239  ************************************************************************/
1240 static void
1241 ixgbe_add_media_types(if_ctx_t ctx)
1242 {
1243 	struct adapter  *adapter = iflib_get_softc(ctx);
1244 	struct ixgbe_hw *hw = &adapter->hw;
1245 	device_t        dev = iflib_get_dev(ctx);
1246 	u64             layer;
1247 
1248 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1249 
1250 	/* Media types with matching FreeBSD media defines */
1251 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1252 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1253 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1254 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1255 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1256 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1257 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1258 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1259 
1260 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1261 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1262 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1263 		    NULL);
1264 
1265 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1266 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1267 		if (hw->phy.multispeed_fiber)
1268 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1269 			    NULL);
1270 	}
1271 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1272 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1273 		if (hw->phy.multispeed_fiber)
1274 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1275 			    NULL);
1276 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1277 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1278 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1279 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1280 
1281 #ifdef IFM_ETH_XTYPE
1282 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1283 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1284 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1285 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1286 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1287 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1288 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1289 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1290 #else
1291 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1292 		device_printf(dev, "Media supported: 10GbaseKR\n");
1293 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1294 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1295 	}
1296 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1297 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1298 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1299 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1300 	}
1301 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1302 		device_printf(dev, "Media supported: 1000baseKX\n");
1303 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1304 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1305 	}
1306 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1307 		device_printf(dev, "Media supported: 2500baseKX\n");
1308 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1309 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1310 	}
1311 #endif
1312 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1313 		device_printf(dev, "Media supported: 1000baseBX\n");
1314 
1315 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1316 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1317 		    0, NULL);
1318 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1319 	}
1320 
1321 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1322 } /* ixgbe_add_media_types */
1323 
1324 /************************************************************************
1325  * ixgbe_is_sfp
1326  ************************************************************************/
1327 static inline bool
1328 ixgbe_is_sfp(struct ixgbe_hw *hw)
1329 {
1330 	switch (hw->mac.type) {
1331 	case ixgbe_mac_82598EB:
1332 		if (hw->phy.type == ixgbe_phy_nl)
1333 			return (TRUE);
1334 		return (FALSE);
1335 	case ixgbe_mac_82599EB:
1336 		switch (hw->mac.ops.get_media_type(hw)) {
1337 		case ixgbe_media_type_fiber:
1338 		case ixgbe_media_type_fiber_qsfp:
1339 			return (TRUE);
1340 		default:
1341 			return (FALSE);
1342 		}
1343 	case ixgbe_mac_X550EM_x:
1344 	case ixgbe_mac_X550EM_a:
1345 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1346 			return (TRUE);
1347 		return (FALSE);
1348 	default:
1349 		return (FALSE);
1350 	}
1351 } /* ixgbe_is_sfp */
1352 
1353 /************************************************************************
1354  * ixgbe_config_link
1355  ************************************************************************/
1356 static void
1357 ixgbe_config_link(if_ctx_t ctx)
1358 {
1359 	struct adapter  *adapter = iflib_get_softc(ctx);
1360 	struct ixgbe_hw *hw = &adapter->hw;
1361 	u32             autoneg, err = 0;
1362 	bool            sfp, negotiate;
1363 
1364 	sfp = ixgbe_is_sfp(hw);
1365 
1366 	if (sfp) {
1367 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1368 		iflib_admin_intr_deferred(ctx);
1369 	} else {
1370 		if (hw->mac.ops.check_link)
1371 			err = ixgbe_check_link(hw, &adapter->link_speed,
1372 			    &adapter->link_up, FALSE);
1373 		if (err)
1374 			return;
1375 		autoneg = hw->phy.autoneg_advertised;
1376 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1377 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1378 			    &negotiate);
1379 		if (err)
1380 			return;
1381 		if (hw->mac.ops.setup_link)
1382 			err = hw->mac.ops.setup_link(hw, autoneg,
1383 			    adapter->link_up);
1384 	}
1385 } /* ixgbe_config_link */
1386 
1387 /************************************************************************
1388  * ixgbe_update_stats_counters - Update board statistics counters.
1389  ************************************************************************/
1390 static void
1391 ixgbe_update_stats_counters(struct adapter *adapter)
1392 {
1393 	struct ixgbe_hw       *hw = &adapter->hw;
1394 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1395 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1396 	u64                   total_missed_rx = 0;
1397 
1398 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1399 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1400 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1401 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1402 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1403 
1404 	for (int i = 0; i < 16; i++) {
1405 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1406 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1407 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1408 	}
1409 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1410 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1411 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1412 
1413 	/* Hardware workaround, gprc counts missed packets */
1414 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1415 	stats->gprc -= missed_rx;
1416 
1417 	if (hw->mac.type != ixgbe_mac_82598EB) {
1418 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1419 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1420 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1421 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1422 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1423 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1424 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1425 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1426 	} else {
1427 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1428 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1429 		/* 82598 only has a counter in the high register */
1430 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1431 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1432 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1433 	}
1434 
1435 	/*
1436 	 * Workaround: mprc hardware is incorrectly counting
1437 	 * broadcasts, so for now we subtract those.
1438 	 */
1439 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1440 	stats->bprc += bprc;
1441 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1442 	if (hw->mac.type == ixgbe_mac_82598EB)
1443 		stats->mprc -= bprc;
1444 
1445 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1446 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1447 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1448 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1449 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1450 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1451 
1452 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1453 	stats->lxontxc += lxon;
1454 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1455 	stats->lxofftxc += lxoff;
1456 	total = lxon + lxoff;
1457 
1458 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1459 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1460 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1461 	stats->gptc -= total;
1462 	stats->mptc -= total;
1463 	stats->ptc64 -= total;
1464 	stats->gotc -= total * ETHER_MIN_LEN;
1465 
1466 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1467 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1468 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1469 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1470 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1471 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1472 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1473 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1474 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1475 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1476 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1477 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1478 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1479 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1480 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1481 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1482 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1483 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1484 	/* Only read FCOE on 82599 */
1485 	if (hw->mac.type != ixgbe_mac_82598EB) {
1486 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1487 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1488 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1489 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1490 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1491 	}
1492 
1493 	/* Fill out the OS statistics structure */
1494 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1495 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1496 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1497 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1498 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1499 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1500 	IXGBE_SET_COLLISIONS(adapter, 0);
1501 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1502 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1503 } /* ixgbe_update_stats_counters */
1504 
1505 /************************************************************************
1506  * ixgbe_add_hw_stats
1507  *
1508  *   Add sysctl variables, one per statistic, to the system.
1509  ************************************************************************/
1510 static void
1511 ixgbe_add_hw_stats(struct adapter *adapter)
1512 {
1513 	device_t               dev = iflib_get_dev(adapter->ctx);
1514 	struct ix_rx_queue     *rx_que;
1515 	struct ix_tx_queue     *tx_que;
1516 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1517 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1518 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1519 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1520 	struct sysctl_oid      *stat_node, *queue_node;
1521 	struct sysctl_oid_list *stat_list, *queue_list;
1522 	int                    i;
1523 
1524 #define QUEUE_NAME_LEN 32
1525 	char                   namebuf[QUEUE_NAME_LEN];
1526 
1527 	/* Driver Statistics */
1528 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1529 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1530 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1531 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1532 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1533 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1534 
1535 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1536 		struct tx_ring *txr = &tx_que->txr;
1537 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1538 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1539 		    CTLFLAG_RD, NULL, "Queue Name");
1540 		queue_list = SYSCTL_CHILDREN(queue_node);
1541 
1542 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1543 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1544 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1545 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1546 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1547 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1548 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1549 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1550 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1551 		    CTLFLAG_RD, &txr->total_packets,
1552 		    "Queue Packets Transmitted");
1553 	}
1554 
1555 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1556 		struct rx_ring *rxr = &rx_que->rxr;
1557 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1558 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1559 		    CTLFLAG_RD, NULL, "Queue Name");
1560 		queue_list = SYSCTL_CHILDREN(queue_node);
1561 
1562 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1563 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1564 		    sizeof(&adapter->rx_queues[i]),
1565 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1566 		    "Interrupt Rate");
1567 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1568 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1569 		    "irqs on this queue");
1570 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1571 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1572 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1573 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1574 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1575 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1576 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1577 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1578 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1579 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1580 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1581 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1582 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1583 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1584 	}
1585 
1586 	/* MAC stats get their own sub node */
1587 
1588 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1589 	    CTLFLAG_RD, NULL, "MAC Statistics");
1590 	stat_list = SYSCTL_CHILDREN(stat_node);
1591 
1592 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1593 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1594 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1595 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1596 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1597 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1598 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1599 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1600 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1601 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1602 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1603 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1604 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1605 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1606 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1607 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1608 
1609 	/* Flow Control stats */
1610 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1611 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1612 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1613 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1614 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1615 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1616 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1617 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1618 
1619 	/* Packet Reception Stats */
1620 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1621 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1622 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1623 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1624 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1625 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1626 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1627 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1628 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1629 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1630 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1631 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1632 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1633 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1634 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1635 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1636 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1637 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1638 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1639 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1640 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1641 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1642 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1643 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1644 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1645 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1646 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1647 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1648 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1649 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1650 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1651 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1652 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1653 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1654 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1655 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1656 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1657 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1658 
1659 	/* Packet Transmission Stats */
1660 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1661 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1662 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1663 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1664 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1665 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1666 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1667 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1668 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1669 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1670 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1671 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1672 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1673 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1674 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1675 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1676 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1677 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1678 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1679 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1680 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1681 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1682 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1683 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1684 } /* ixgbe_add_hw_stats */
1685 
1686 /************************************************************************
1687  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1688  *
1689  *   Retrieves the TDH value from the hardware
1690  ************************************************************************/
1691 static int
1692 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1693 {
1694 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1695 	int            error;
1696 	unsigned int   val;
1697 
1698 	if (!txr)
1699 		return (0);
1700 
1701 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1702 	error = sysctl_handle_int(oidp, &val, 0, req);
1703 	if (error || !req->newptr)
1704 		return error;
1705 
1706 	return (0);
1707 } /* ixgbe_sysctl_tdh_handler */
1708 
1709 /************************************************************************
1710  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1711  *
1712  *   Retrieves the TDT value from the hardware
1713  ************************************************************************/
1714 static int
1715 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1716 {
1717 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1718 	int            error;
1719 	unsigned int   val;
1720 
1721 	if (!txr)
1722 		return (0);
1723 
1724 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1725 	error = sysctl_handle_int(oidp, &val, 0, req);
1726 	if (error || !req->newptr)
1727 		return error;
1728 
1729 	return (0);
1730 } /* ixgbe_sysctl_tdt_handler */
1731 
1732 /************************************************************************
1733  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1734  *
1735  *   Retrieves the RDH value from the hardware
1736  ************************************************************************/
1737 static int
1738 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1739 {
1740 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1741 	int            error;
1742 	unsigned int   val;
1743 
1744 	if (!rxr)
1745 		return (0);
1746 
1747 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1748 	error = sysctl_handle_int(oidp, &val, 0, req);
1749 	if (error || !req->newptr)
1750 		return error;
1751 
1752 	return (0);
1753 } /* ixgbe_sysctl_rdh_handler */
1754 
1755 /************************************************************************
1756  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1757  *
1758  *   Retrieves the RDT value from the hardware
1759  ************************************************************************/
1760 static int
1761 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1762 {
1763 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1764 	int            error;
1765 	unsigned int   val;
1766 
1767 	if (!rxr)
1768 		return (0);
1769 
1770 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1771 	error = sysctl_handle_int(oidp, &val, 0, req);
1772 	if (error || !req->newptr)
1773 		return error;
1774 
1775 	return (0);
1776 } /* ixgbe_sysctl_rdt_handler */
1777 
1778 /************************************************************************
1779  * ixgbe_if_vlan_register
1780  *
1781  *   Run via vlan config EVENT, it enables us to use the
1782  *   HW Filter table since we can get the vlan id. This
1783  *   just creates the entry in the soft version of the
1784  *   VFTA, init will repopulate the real table.
1785  ************************************************************************/
1786 static void
1787 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1788 {
1789 	struct adapter *adapter = iflib_get_softc(ctx);
1790 	u16            index, bit;
1791 
1792 	index = (vtag >> 5) & 0x7F;
1793 	bit = vtag & 0x1F;
1794 	adapter->shadow_vfta[index] |= (1 << bit);
1795 	++adapter->num_vlans;
1796 	ixgbe_setup_vlan_hw_support(ctx);
1797 } /* ixgbe_if_vlan_register */
1798 
1799 /************************************************************************
1800  * ixgbe_if_vlan_unregister
1801  *
1802  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1803  ************************************************************************/
1804 static void
1805 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1806 {
1807 	struct adapter *adapter = iflib_get_softc(ctx);
1808 	u16            index, bit;
1809 
1810 	index = (vtag >> 5) & 0x7F;
1811 	bit = vtag & 0x1F;
1812 	adapter->shadow_vfta[index] &= ~(1 << bit);
1813 	--adapter->num_vlans;
1814 	/* Re-init to load the changes */
1815 	ixgbe_setup_vlan_hw_support(ctx);
1816 } /* ixgbe_if_vlan_unregister */
1817 
1818 /************************************************************************
1819  * ixgbe_setup_vlan_hw_support
1820  ************************************************************************/
1821 static void
1822 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1823 {
1824 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1825 	struct adapter  *adapter = iflib_get_softc(ctx);
1826 	struct ixgbe_hw *hw = &adapter->hw;
1827 	struct rx_ring  *rxr;
1828 	int             i;
1829 	u32             ctrl;
1830 
1831 
1832 	/*
1833 	 * We get here thru init_locked, meaning
1834 	 * a soft reset, this has already cleared
1835 	 * the VFTA and other state, so if there
1836 	 * have been no vlan's registered do nothing.
1837 	 */
1838 	if (adapter->num_vlans == 0)
1839 		return;
1840 
1841 	/* Setup the queues for vlans */
1842 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1843 		for (i = 0; i < adapter->num_rx_queues; i++) {
1844 			rxr = &adapter->rx_queues[i].rxr;
1845 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1846 			if (hw->mac.type != ixgbe_mac_82598EB) {
1847 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1848 				ctrl |= IXGBE_RXDCTL_VME;
1849 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1850 			}
1851 			rxr->vtag_strip = TRUE;
1852 		}
1853 	}
1854 
1855 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1856 		return;
1857 	/*
1858 	 * A soft reset zero's out the VFTA, so
1859 	 * we need to repopulate it now.
1860 	 */
1861 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1862 		if (adapter->shadow_vfta[i] != 0)
1863 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1864 			    adapter->shadow_vfta[i]);
1865 
1866 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1867 	/* Enable the Filter Table if enabled */
1868 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1869 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1870 		ctrl |= IXGBE_VLNCTRL_VFE;
1871 	}
1872 	if (hw->mac.type == ixgbe_mac_82598EB)
1873 		ctrl |= IXGBE_VLNCTRL_VME;
1874 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1875 } /* ixgbe_setup_vlan_hw_support */
1876 
1877 /************************************************************************
1878  * ixgbe_get_slot_info
1879  *
1880  *   Get the width and transaction speed of
1881  *   the slot this adapter is plugged into.
1882  ************************************************************************/
1883 static void
1884 ixgbe_get_slot_info(struct adapter *adapter)
1885 {
1886 	device_t        dev = iflib_get_dev(adapter->ctx);
1887 	struct ixgbe_hw *hw = &adapter->hw;
1888 	int             bus_info_valid = TRUE;
1889 	u32             offset;
1890 	u16             link;
1891 
1892 	/* Some devices are behind an internal bridge */
1893 	switch (hw->device_id) {
1894 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1895 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1896 		goto get_parent_info;
1897 	default:
1898 		break;
1899 	}
1900 
1901 	ixgbe_get_bus_info(hw);
1902 
1903 	/*
1904 	 * Some devices don't use PCI-E, but there is no need
1905 	 * to display "Unknown" for bus speed and width.
1906 	 */
1907 	switch (hw->mac.type) {
1908 	case ixgbe_mac_X550EM_x:
1909 	case ixgbe_mac_X550EM_a:
1910 		return;
1911 	default:
1912 		goto display;
1913 	}
1914 
1915 get_parent_info:
1916 	/*
1917 	 * For the Quad port adapter we need to parse back
1918 	 * up the PCI tree to find the speed of the expansion
1919 	 * slot into which this adapter is plugged. A bit more work.
1920 	 */
1921 	dev = device_get_parent(device_get_parent(dev));
1922 #ifdef IXGBE_DEBUG
1923 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1924 	    pci_get_slot(dev), pci_get_function(dev));
1925 #endif
1926 	dev = device_get_parent(device_get_parent(dev));
1927 #ifdef IXGBE_DEBUG
1928 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1929 	    pci_get_slot(dev), pci_get_function(dev));
1930 #endif
1931 	/* Now get the PCI Express Capabilities offset */
1932 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1933 		/*
1934 		 * Hmm...can't get PCI-Express capabilities.
1935 		 * Falling back to default method.
1936 		 */
1937 		bus_info_valid = FALSE;
1938 		ixgbe_get_bus_info(hw);
1939 		goto display;
1940 	}
1941 	/* ...and read the Link Status Register */
1942 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1943 	ixgbe_set_pci_config_data_generic(hw, link);
1944 
1945 display:
1946 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1947 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1948 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1949 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1950 	     "Unknown"),
1951 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1952 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1953 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1954 	     "Unknown"));
1955 
1956 	if (bus_info_valid) {
1957 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1958 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1959 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1960 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1961 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1962 		}
1963 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1964 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1965 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1966 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1967 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1968 		}
1969 	} else
1970 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1971 
1972 	return;
1973 } /* ixgbe_get_slot_info */
1974 
1975 /************************************************************************
1976  * ixgbe_if_msix_intr_assign
1977  *
1978  *   Setup MSI-X Interrupt resources and handlers
1979  ************************************************************************/
1980 static int
1981 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1982 {
1983 	struct adapter     *adapter = iflib_get_softc(ctx);
1984 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1985 	struct ix_tx_queue *tx_que;
1986 	int                error, rid, vector = 0;
1987 	int                cpu_id = 0;
1988 	char               buf[16];
1989 
1990 	/* Admin Que is vector 0*/
1991 	rid = vector + 1;
1992 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1993 		rid = vector + 1;
1994 
1995 		snprintf(buf, sizeof(buf), "rxq%d", i);
1996 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1997 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
1998 
1999 		if (error) {
2000 			device_printf(iflib_get_dev(ctx),
2001 			    "Failed to allocate que int %d err: %d", i, error);
2002 			adapter->num_rx_queues = i + 1;
2003 			goto fail;
2004 		}
2005 
2006 		rx_que->msix = vector;
2007 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2008 			/*
2009 			 * The queue ID is used as the RSS layer bucket ID.
2010 			 * We look up the queue ID -> RSS CPU ID and select
2011 			 * that.
2012 			 */
2013 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2014 		} else {
2015 			/*
2016 			 * Bind the MSI-X vector, and thus the
2017 			 * rings to the corresponding cpu.
2018 			 *
2019 			 * This just happens to match the default RSS
2020 			 * round-robin bucket -> queue -> CPU allocation.
2021 			 */
2022 			if (adapter->num_rx_queues > 1)
2023 				cpu_id = i;
2024 		}
2025 
2026 	}
2027 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2028 		snprintf(buf, sizeof(buf), "txq%d", i);
2029 		tx_que = &adapter->tx_queues[i];
2030 		tx_que->msix = i % adapter->num_rx_queues;
2031 		iflib_softirq_alloc_generic(ctx,
2032 		    &adapter->rx_queues[tx_que->msix].que_irq,
2033 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2034 	}
2035 	rid = vector + 1;
2036 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2037 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2038 	if (error) {
2039 		device_printf(iflib_get_dev(ctx),
2040 		    "Failed to register admin handler");
2041 		return (error);
2042 	}
2043 
2044 	adapter->vector = vector;
2045 
2046 	return (0);
2047 fail:
2048 	iflib_irq_free(ctx, &adapter->irq);
2049 	rx_que = adapter->rx_queues;
2050 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2051 		iflib_irq_free(ctx, &rx_que->que_irq);
2052 
2053 	return (error);
2054 } /* ixgbe_if_msix_intr_assign */
2055 
2056 /*********************************************************************
2057  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2058  **********************************************************************/
2059 static int
2060 ixgbe_msix_que(void *arg)
2061 {
2062 	struct ix_rx_queue *que = arg;
2063 	struct adapter     *adapter = que->adapter;
2064 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2065 
2066 	/* Protect against spurious interrupts */
2067 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2068 		return (FILTER_HANDLED);
2069 
2070 	ixgbe_disable_queue(adapter, que->msix);
2071 	++que->irqs;
2072 
2073 	return (FILTER_SCHEDULE_THREAD);
2074 } /* ixgbe_msix_que */
2075 
2076 /************************************************************************
2077  * ixgbe_media_status - Media Ioctl callback
2078  *
2079  *   Called whenever the user queries the status of
2080  *   the interface using ifconfig.
2081  ************************************************************************/
2082 static void
2083 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2084 {
2085 	struct adapter  *adapter = iflib_get_softc(ctx);
2086 	struct ixgbe_hw *hw = &adapter->hw;
2087 	int             layer;
2088 
2089 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2090 
2091 	ifmr->ifm_status = IFM_AVALID;
2092 	ifmr->ifm_active = IFM_ETHER;
2093 
2094 	if (!adapter->link_active)
2095 		return;
2096 
2097 	ifmr->ifm_status |= IFM_ACTIVE;
2098 	layer = adapter->phy_layer;
2099 
2100 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2101 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2102 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2103 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2104 		switch (adapter->link_speed) {
2105 		case IXGBE_LINK_SPEED_10GB_FULL:
2106 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2107 			break;
2108 		case IXGBE_LINK_SPEED_1GB_FULL:
2109 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2110 			break;
2111 		case IXGBE_LINK_SPEED_100_FULL:
2112 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2113 			break;
2114 		case IXGBE_LINK_SPEED_10_FULL:
2115 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2116 			break;
2117 		}
2118 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2119 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2120 		switch (adapter->link_speed) {
2121 		case IXGBE_LINK_SPEED_10GB_FULL:
2122 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2123 			break;
2124 		}
2125 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2126 		switch (adapter->link_speed) {
2127 		case IXGBE_LINK_SPEED_10GB_FULL:
2128 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2129 			break;
2130 		case IXGBE_LINK_SPEED_1GB_FULL:
2131 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2132 			break;
2133 		}
2134 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2135 		switch (adapter->link_speed) {
2136 		case IXGBE_LINK_SPEED_10GB_FULL:
2137 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2138 			break;
2139 		case IXGBE_LINK_SPEED_1GB_FULL:
2140 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2141 			break;
2142 		}
2143 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2144 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2145 		switch (adapter->link_speed) {
2146 		case IXGBE_LINK_SPEED_10GB_FULL:
2147 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2148 			break;
2149 		case IXGBE_LINK_SPEED_1GB_FULL:
2150 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2151 			break;
2152 		}
2153 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2154 		switch (adapter->link_speed) {
2155 		case IXGBE_LINK_SPEED_10GB_FULL:
2156 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2157 			break;
2158 		}
2159 	/*
2160 	 * XXX: These need to use the proper media types once
2161 	 * they're added.
2162 	 */
2163 #ifndef IFM_ETH_XTYPE
2164 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2165 		switch (adapter->link_speed) {
2166 		case IXGBE_LINK_SPEED_10GB_FULL:
2167 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2168 			break;
2169 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2170 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2171 			break;
2172 		case IXGBE_LINK_SPEED_1GB_FULL:
2173 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2174 			break;
2175 		}
2176 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2177 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2178 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2179 		switch (adapter->link_speed) {
2180 		case IXGBE_LINK_SPEED_10GB_FULL:
2181 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2182 			break;
2183 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2184 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2185 			break;
2186 		case IXGBE_LINK_SPEED_1GB_FULL:
2187 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2188 			break;
2189 		}
2190 #else
2191 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2192 		switch (adapter->link_speed) {
2193 		case IXGBE_LINK_SPEED_10GB_FULL:
2194 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2195 			break;
2196 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2197 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2198 			break;
2199 		case IXGBE_LINK_SPEED_1GB_FULL:
2200 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2201 			break;
2202 		}
2203 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2204 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2205 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2206 		switch (adapter->link_speed) {
2207 		case IXGBE_LINK_SPEED_10GB_FULL:
2208 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2209 			break;
2210 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2211 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2212 			break;
2213 		case IXGBE_LINK_SPEED_1GB_FULL:
2214 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2215 			break;
2216 		}
2217 #endif
2218 
2219 	/* If nothing is recognized... */
2220 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2221 		ifmr->ifm_active |= IFM_UNKNOWN;
2222 
2223 	/* Display current flow control setting used on link */
2224 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2225 	    hw->fc.current_mode == ixgbe_fc_full)
2226 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2227 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2228 	    hw->fc.current_mode == ixgbe_fc_full)
2229 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2230 } /* ixgbe_media_status */
2231 
2232 /************************************************************************
2233  * ixgbe_media_change - Media Ioctl callback
2234  *
2235  *   Called when the user changes speed/duplex using
2236  *   media/mediopt option with ifconfig.
2237  ************************************************************************/
2238 static int
2239 ixgbe_if_media_change(if_ctx_t ctx)
2240 {
2241 	struct adapter   *adapter = iflib_get_softc(ctx);
2242 	struct ifmedia   *ifm = iflib_get_media(ctx);
2243 	struct ixgbe_hw  *hw = &adapter->hw;
2244 	ixgbe_link_speed speed = 0;
2245 
2246 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2247 
2248 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2249 		return (EINVAL);
2250 
2251 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2252 		return (EPERM);
2253 
2254 	/*
2255 	 * We don't actually need to check against the supported
2256 	 * media types of the adapter; ifmedia will take care of
2257 	 * that for us.
2258 	 */
2259 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2260 	case IFM_AUTO:
2261 	case IFM_10G_T:
2262 		speed |= IXGBE_LINK_SPEED_100_FULL;
2263 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2264 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2265 		break;
2266 	case IFM_10G_LRM:
2267 	case IFM_10G_LR:
2268 #ifndef IFM_ETH_XTYPE
2269 	case IFM_10G_SR: /* KR, too */
2270 	case IFM_10G_CX4: /* KX4 */
2271 #else
2272 	case IFM_10G_KR:
2273 	case IFM_10G_KX4:
2274 #endif
2275 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2276 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2277 		break;
2278 #ifndef IFM_ETH_XTYPE
2279 	case IFM_1000_CX: /* KX */
2280 #else
2281 	case IFM_1000_KX:
2282 #endif
2283 	case IFM_1000_LX:
2284 	case IFM_1000_SX:
2285 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2286 		break;
2287 	case IFM_1000_T:
2288 		speed |= IXGBE_LINK_SPEED_100_FULL;
2289 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2290 		break;
2291 	case IFM_10G_TWINAX:
2292 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2293 		break;
2294 	case IFM_100_TX:
2295 		speed |= IXGBE_LINK_SPEED_100_FULL;
2296 		break;
2297 	case IFM_10_T:
2298 		speed |= IXGBE_LINK_SPEED_10_FULL;
2299 		break;
2300 	default:
2301 		goto invalid;
2302 	}
2303 
2304 	hw->mac.autotry_restart = TRUE;
2305 	hw->mac.ops.setup_link(hw, speed, TRUE);
2306 	adapter->advertise =
2307 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2308 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2309 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2310 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2311 
2312 	return (0);
2313 
2314 invalid:
2315 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2316 
2317 	return (EINVAL);
2318 } /* ixgbe_if_media_change */
2319 
2320 /************************************************************************
2321  * ixgbe_set_promisc
2322  ************************************************************************/
2323 static int
2324 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2325 {
2326 	struct adapter *adapter = iflib_get_softc(ctx);
2327 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2328 	u32            rctl;
2329 	int            mcnt = 0;
2330 
2331 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2332 	rctl &= (~IXGBE_FCTRL_UPE);
2333 	if (ifp->if_flags & IFF_ALLMULTI)
2334 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2335 	else {
2336 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2337 	}
2338 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2339 		rctl &= (~IXGBE_FCTRL_MPE);
2340 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2341 
2342 	if (ifp->if_flags & IFF_PROMISC) {
2343 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2344 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2345 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2346 		rctl |= IXGBE_FCTRL_MPE;
2347 		rctl &= ~IXGBE_FCTRL_UPE;
2348 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2349 	}
2350 	return (0);
2351 } /* ixgbe_if_promisc_set */
2352 
2353 /************************************************************************
2354  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2355  ************************************************************************/
2356 static int
2357 ixgbe_msix_link(void *arg)
2358 {
2359 	struct adapter  *adapter = arg;
2360 	struct ixgbe_hw *hw = &adapter->hw;
2361 	u32             eicr, eicr_mask;
2362 	s32             retval;
2363 
2364 	++adapter->link_irq;
2365 
2366 	/* Pause other interrupts */
2367 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2368 
2369 	/* First get the cause */
2370 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2371 	/* Be sure the queue bits are not cleared */
2372 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2373 	/* Clear interrupt with write */
2374 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2375 
2376 	/* Link status change */
2377 	if (eicr & IXGBE_EICR_LSC) {
2378 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2379 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2380 	}
2381 
2382 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2383 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2384 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2385 			/* This is probably overkill :) */
2386 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2387 				return (FILTER_HANDLED);
2388 			/* Disable the interrupt */
2389 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2390 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2391 		} else
2392 			if (eicr & IXGBE_EICR_ECC) {
2393 				device_printf(iflib_get_dev(adapter->ctx),
2394 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2395 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2396 			}
2397 
2398 		/* Check for over temp condition */
2399 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2400 			switch (adapter->hw.mac.type) {
2401 			case ixgbe_mac_X550EM_a:
2402 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2403 					break;
2404 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2405 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2406 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2407 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2408 				retval = hw->phy.ops.check_overtemp(hw);
2409 				if (retval != IXGBE_ERR_OVERTEMP)
2410 					break;
2411 				device_printf(iflib_get_dev(adapter->ctx),
2412 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2413 				device_printf(iflib_get_dev(adapter->ctx),
2414 				    "System shutdown required!\n");
2415 				break;
2416 			default:
2417 				if (!(eicr & IXGBE_EICR_TS))
2418 					break;
2419 				retval = hw->phy.ops.check_overtemp(hw);
2420 				if (retval != IXGBE_ERR_OVERTEMP)
2421 					break;
2422 				device_printf(iflib_get_dev(adapter->ctx),
2423 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2424 				device_printf(iflib_get_dev(adapter->ctx),
2425 				    "System shutdown required!\n");
2426 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2427 				break;
2428 			}
2429 		}
2430 
2431 		/* Check for VF message */
2432 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2433 		    (eicr & IXGBE_EICR_MAILBOX))
2434 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2435 	}
2436 
2437 	if (ixgbe_is_sfp(hw)) {
2438 		/* Pluggable optics-related interrupt */
2439 		if (hw->mac.type >= ixgbe_mac_X540)
2440 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2441 		else
2442 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2443 
2444 		if (eicr & eicr_mask) {
2445 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2446 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2447 		}
2448 
2449 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2450 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2451 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2452 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2453 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2454 		}
2455 	}
2456 
2457 	/* Check for fan failure */
2458 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2459 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2460 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2461 	}
2462 
2463 	/* External PHY interrupt */
2464 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2465 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2466 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2467 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2468 	}
2469 
2470 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2471 } /* ixgbe_msix_link */
2472 
2473 /************************************************************************
2474  * ixgbe_sysctl_interrupt_rate_handler
2475  ************************************************************************/
2476 static int
2477 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2478 {
2479 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2480 	int                error;
2481 	unsigned int       reg, usec, rate;
2482 
2483 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2484 	usec = ((reg & 0x0FF8) >> 3);
2485 	if (usec > 0)
2486 		rate = 500000 / usec;
2487 	else
2488 		rate = 0;
2489 	error = sysctl_handle_int(oidp, &rate, 0, req);
2490 	if (error || !req->newptr)
2491 		return error;
2492 	reg &= ~0xfff; /* default, no limitation */
2493 	ixgbe_max_interrupt_rate = 0;
2494 	if (rate > 0 && rate < 500000) {
2495 		if (rate < 1000)
2496 			rate = 1000;
2497 		ixgbe_max_interrupt_rate = rate;
2498 		reg |= ((4000000/rate) & 0xff8);
2499 	}
2500 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2501 
2502 	return (0);
2503 } /* ixgbe_sysctl_interrupt_rate_handler */
2504 
2505 /************************************************************************
2506  * ixgbe_add_device_sysctls
2507  ************************************************************************/
2508 static void
2509 ixgbe_add_device_sysctls(if_ctx_t ctx)
2510 {
2511 	struct adapter         *adapter = iflib_get_softc(ctx);
2512 	device_t               dev = iflib_get_dev(ctx);
2513 	struct ixgbe_hw        *hw = &adapter->hw;
2514 	struct sysctl_oid_list *child;
2515 	struct sysctl_ctx_list *ctx_list;
2516 
2517 	ctx_list = device_get_sysctl_ctx(dev);
2518 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2519 
2520 	/* Sysctls for all devices */
2521 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2522 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2523 	    IXGBE_SYSCTL_DESC_SET_FC);
2524 
2525 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2526 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2527 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2528 
2529 #ifdef IXGBE_DEBUG
2530 	/* testing sysctls (for all devices) */
2531 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2532 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2533 	    "I", "PCI Power State");
2534 
2535 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2536 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2537 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2538 #endif
2539 	/* for X550 series devices */
2540 	if (hw->mac.type >= ixgbe_mac_X550)
2541 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2542 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2543 		    "I", "DMA Coalesce");
2544 
2545 	/* for WoL-capable devices */
2546 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2547 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2548 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2549 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2550 
2551 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2552 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2553 		    "I", "Enable/Disable Wake Up Filters");
2554 	}
2555 
2556 	/* for X552/X557-AT devices */
2557 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2558 		struct sysctl_oid *phy_node;
2559 		struct sysctl_oid_list *phy_list;
2560 
2561 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2562 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2563 		phy_list = SYSCTL_CHILDREN(phy_node);
2564 
2565 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2566 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2567 		    "I", "Current External PHY Temperature (Celsius)");
2568 
2569 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2570 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2571 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2572 		    "External PHY High Temperature Event Occurred");
2573 	}
2574 
2575 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2576 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2577 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2578 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2579 	}
2580 } /* ixgbe_add_device_sysctls */
2581 
2582 /************************************************************************
2583  * ixgbe_allocate_pci_resources
2584  ************************************************************************/
2585 static int
2586 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2587 {
2588 	struct adapter *adapter = iflib_get_softc(ctx);
2589 	device_t        dev = iflib_get_dev(ctx);
2590 	int             rid;
2591 
2592 	rid = PCIR_BAR(0);
2593 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2594 	    RF_ACTIVE);
2595 
2596 	if (!(adapter->pci_mem)) {
2597 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2598 		return (ENXIO);
2599 	}
2600 
2601 	/* Save bus_space values for READ/WRITE_REG macros */
2602 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2603 	adapter->osdep.mem_bus_space_handle =
2604 	    rman_get_bushandle(adapter->pci_mem);
2605 	/* Set hw values for shared code */
2606 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2607 
2608 	return (0);
2609 } /* ixgbe_allocate_pci_resources */
2610 
2611 /************************************************************************
2612  * ixgbe_detach - Device removal routine
2613  *
2614  *   Called when the driver is being removed.
2615  *   Stops the adapter and deallocates all the resources
2616  *   that were allocated for driver operation.
2617  *
2618  *   return 0 on success, positive on failure
2619  ************************************************************************/
2620 static int
2621 ixgbe_if_detach(if_ctx_t ctx)
2622 {
2623 	struct adapter *adapter = iflib_get_softc(ctx);
2624 	device_t       dev = iflib_get_dev(ctx);
2625 	u32            ctrl_ext;
2626 
2627 	INIT_DEBUGOUT("ixgbe_detach: begin");
2628 
2629 	if (ixgbe_pci_iov_detach(dev) != 0) {
2630 		device_printf(dev, "SR-IOV in use; detach first.\n");
2631 		return (EBUSY);
2632 	}
2633 
2634 	ixgbe_setup_low_power_mode(ctx);
2635 
2636 	/* let hardware know driver is unloading */
2637 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2638 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2639 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2640 
2641 	ixgbe_free_pci_resources(ctx);
2642 	free(adapter->mta, M_IXGBE);
2643 
2644 	return (0);
2645 } /* ixgbe_if_detach */
2646 
2647 /************************************************************************
2648  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2649  *
2650  *   Prepare the adapter/port for LPLU and/or WoL
2651  ************************************************************************/
2652 static int
2653 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2654 {
2655 	struct adapter  *adapter = iflib_get_softc(ctx);
2656 	struct ixgbe_hw *hw = &adapter->hw;
2657 	device_t        dev = iflib_get_dev(ctx);
2658 	s32             error = 0;
2659 
2660 	if (!hw->wol_enabled)
2661 		ixgbe_set_phy_power(hw, FALSE);
2662 
2663 	/* Limit power management flow to X550EM baseT */
2664 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2665 	    hw->phy.ops.enter_lplu) {
2666 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2667 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2668 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2669 
2670 		/*
2671 		 * Clear Wake Up Status register to prevent any previous wakeup
2672 		 * events from waking us up immediately after we suspend.
2673 		 */
2674 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2675 
2676 		/*
2677 		 * Program the Wakeup Filter Control register with user filter
2678 		 * settings
2679 		 */
2680 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2681 
2682 		/* Enable wakeups and power management in Wakeup Control */
2683 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2684 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2685 
2686 		/* X550EM baseT adapters need a special LPLU flow */
2687 		hw->phy.reset_disable = TRUE;
2688 		ixgbe_if_stop(ctx);
2689 		error = hw->phy.ops.enter_lplu(hw);
2690 		if (error)
2691 			device_printf(dev, "Error entering LPLU: %d\n", error);
2692 		hw->phy.reset_disable = FALSE;
2693 	} else {
2694 		/* Just stop for other adapters */
2695 		ixgbe_if_stop(ctx);
2696 	}
2697 
2698 	return error;
2699 } /* ixgbe_setup_low_power_mode */
2700 
2701 /************************************************************************
2702  * ixgbe_shutdown - Shutdown entry point
2703  ************************************************************************/
2704 static int
2705 ixgbe_if_shutdown(if_ctx_t ctx)
2706 {
2707 	int error = 0;
2708 
2709 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2710 
2711 	error = ixgbe_setup_low_power_mode(ctx);
2712 
2713 	return (error);
2714 } /* ixgbe_if_shutdown */
2715 
2716 /************************************************************************
2717  * ixgbe_suspend
2718  *
2719  *   From D0 to D3
2720  ************************************************************************/
2721 static int
2722 ixgbe_if_suspend(if_ctx_t ctx)
2723 {
2724 	int error = 0;
2725 
2726 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2727 
2728 	error = ixgbe_setup_low_power_mode(ctx);
2729 
2730 	return (error);
2731 } /* ixgbe_if_suspend */
2732 
2733 /************************************************************************
2734  * ixgbe_resume
2735  *
2736  *   From D3 to D0
2737  ************************************************************************/
2738 static int
2739 ixgbe_if_resume(if_ctx_t ctx)
2740 {
2741 	struct adapter  *adapter = iflib_get_softc(ctx);
2742 	device_t        dev = iflib_get_dev(ctx);
2743 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2744 	struct ixgbe_hw *hw = &adapter->hw;
2745 	u32             wus;
2746 
2747 	INIT_DEBUGOUT("ixgbe_resume: begin");
2748 
2749 	/* Read & clear WUS register */
2750 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2751 	if (wus)
2752 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2753 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2754 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2755 	/* And clear WUFC until next low-power transition */
2756 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2757 
2758 	/*
2759 	 * Required after D3->D0 transition;
2760 	 * will re-advertise all previous advertised speeds
2761 	 */
2762 	if (ifp->if_flags & IFF_UP)
2763 		ixgbe_if_init(ctx);
2764 
2765 	return (0);
2766 } /* ixgbe_if_resume */
2767 
2768 /************************************************************************
2769  * ixgbe_if_mtu_set - Ioctl mtu entry point
2770  *
2771  *   Return 0 on success, EINVAL on failure
2772  ************************************************************************/
2773 static int
2774 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2775 {
2776 	struct adapter *adapter = iflib_get_softc(ctx);
2777 	int error = 0;
2778 
2779 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2780 
2781 	if (mtu > IXGBE_MAX_MTU) {
2782 		error = EINVAL;
2783 	} else {
2784 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2785 	}
2786 
2787 	return error;
2788 } /* ixgbe_if_mtu_set */
2789 
2790 /************************************************************************
2791  * ixgbe_if_crcstrip_set
2792  ************************************************************************/
2793 static void
2794 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2795 {
2796 	struct adapter *sc = iflib_get_softc(ctx);
2797 	struct ixgbe_hw *hw = &sc->hw;
2798 	/* crc stripping is set in two places:
2799 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2800 	 * IXGBE_RDRXCTL (set by the original driver in
2801 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2802 	 *	We disable the setting when netmap is compiled in).
2803 	 * We update the values here, but also in ixgbe.c because
2804 	 * init_locked sometimes is called outside our control.
2805 	 */
2806 	uint32_t hl, rxc;
2807 
2808 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2809 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2810 #ifdef NETMAP
2811 	if (netmap_verbose)
2812 		D("%s read  HLREG 0x%x rxc 0x%x",
2813 			onoff ? "enter" : "exit", hl, rxc);
2814 #endif
2815 	/* hw requirements ... */
2816 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2817 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2818 	if (onoff && !crcstrip) {
2819 		/* keep the crc. Fast rx */
2820 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2821 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2822 	} else {
2823 		/* reset default mode */
2824 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2825 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2826 	}
2827 #ifdef NETMAP
2828 	if (netmap_verbose)
2829 		D("%s write HLREG 0x%x rxc 0x%x",
2830 			onoff ? "enter" : "exit", hl, rxc);
2831 #endif
2832 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2833 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2834 } /* ixgbe_if_crcstrip_set */
2835 
2836 /*********************************************************************
2837  * ixgbe_if_init - Init entry point
2838  *
2839  *   Used in two ways: It is used by the stack as an init
2840  *   entry point in network interface structure. It is also
2841  *   used by the driver as a hw/sw initialization routine to
2842  *   get to a consistent state.
2843  *
2844  *   Return 0 on success, positive on failure
2845  **********************************************************************/
2846 void
2847 ixgbe_if_init(if_ctx_t ctx)
2848 {
2849 	struct adapter     *adapter = iflib_get_softc(ctx);
2850 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2851 	device_t           dev = iflib_get_dev(ctx);
2852 	struct ixgbe_hw *hw = &adapter->hw;
2853 	struct ix_rx_queue *rx_que;
2854 	struct ix_tx_queue *tx_que;
2855 	u32             txdctl, mhadd;
2856 	u32             rxdctl, rxctrl;
2857 	u32             ctrl_ext;
2858 
2859 	int             i, j, err;
2860 
2861 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2862 
2863 	/* Queue indices may change with IOV mode */
2864 	ixgbe_align_all_queue_indices(adapter);
2865 
2866 	/* reprogram the RAR[0] in case user changed it. */
2867 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2868 
2869 	/* Get the latest mac address, User can use a LAA */
2870 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2871 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2872 	hw->addr_ctrl.rar_used_count = 1;
2873 
2874 	ixgbe_init_hw(hw);
2875 
2876 	ixgbe_initialize_iov(adapter);
2877 
2878 	ixgbe_initialize_transmit_units(ctx);
2879 
2880 	/* Setup Multicast table */
2881 	ixgbe_if_multi_set(ctx);
2882 
2883 	/* Determine the correct mbuf pool, based on frame size */
2884 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2885 
2886 	/* Configure RX settings */
2887 	ixgbe_initialize_receive_units(ctx);
2888 
2889 	/*
2890 	 * Initialize variable holding task enqueue requests
2891 	 * from MSI-X interrupts
2892 	 */
2893 	adapter->task_requests = 0;
2894 
2895 	/* Enable SDP & MSI-X interrupts based on adapter */
2896 	ixgbe_config_gpie(adapter);
2897 
2898 	/* Set MTU size */
2899 	if (ifp->if_mtu > ETHERMTU) {
2900 		/* aka IXGBE_MAXFRS on 82599 and newer */
2901 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2902 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2903 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2904 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2905 	}
2906 
2907 	/* Now enable all the queues */
2908 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2909 		struct tx_ring *txr = &tx_que->txr;
2910 
2911 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2912 		txdctl |= IXGBE_TXDCTL_ENABLE;
2913 		/* Set WTHRESH to 8, burst writeback */
2914 		txdctl |= (8 << 16);
2915 		/*
2916 		 * When the internal queue falls below PTHRESH (32),
2917 		 * start prefetching as long as there are at least
2918 		 * HTHRESH (1) buffers ready. The values are taken
2919 		 * from the Intel linux driver 3.8.21.
2920 		 * Prefetching enables tx line rate even with 1 queue.
2921 		 */
2922 		txdctl |= (32 << 0) | (1 << 8);
2923 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2924 	}
2925 
2926 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2927 		struct rx_ring *rxr = &rx_que->rxr;
2928 
2929 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2930 		if (hw->mac.type == ixgbe_mac_82598EB) {
2931 			/*
2932 			 * PTHRESH = 21
2933 			 * HTHRESH = 4
2934 			 * WTHRESH = 8
2935 			 */
2936 			rxdctl &= ~0x3FFFFF;
2937 			rxdctl |= 0x080420;
2938 		}
2939 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2940 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2941 		for (j = 0; j < 10; j++) {
2942 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2943 			    IXGBE_RXDCTL_ENABLE)
2944 				break;
2945 			else
2946 				msec_delay(1);
2947 		}
2948 		wmb();
2949 	}
2950 
2951 	/* Enable Receive engine */
2952 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2953 	if (hw->mac.type == ixgbe_mac_82598EB)
2954 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2955 	rxctrl |= IXGBE_RXCTRL_RXEN;
2956 	ixgbe_enable_rx_dma(hw, rxctrl);
2957 
2958 	/* Set up MSI/MSI-X routing */
2959 	if (ixgbe_enable_msix)  {
2960 		ixgbe_configure_ivars(adapter);
2961 		/* Set up auto-mask */
2962 		if (hw->mac.type == ixgbe_mac_82598EB)
2963 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2964 		else {
2965 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2966 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2967 		}
2968 	} else {  /* Simple settings for Legacy/MSI */
2969 		ixgbe_set_ivar(adapter, 0, 0, 0);
2970 		ixgbe_set_ivar(adapter, 0, 0, 1);
2971 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2972 	}
2973 
2974 	ixgbe_init_fdir(adapter);
2975 
2976 	/*
2977 	 * Check on any SFP devices that
2978 	 * need to be kick-started
2979 	 */
2980 	if (hw->phy.type == ixgbe_phy_none) {
2981 		err = hw->phy.ops.identify(hw);
2982 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2983 			device_printf(dev,
2984 			    "Unsupported SFP+ module type was detected.\n");
2985 			return;
2986 		}
2987 	}
2988 
2989 	/* Set moderation on the Link interrupt */
2990 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
2991 
2992 	/* Enable power to the phy. */
2993 	ixgbe_set_phy_power(hw, TRUE);
2994 
2995 	/* Config/Enable Link */
2996 	ixgbe_config_link(ctx);
2997 
2998 	/* Hardware Packet Buffer & Flow Control setup */
2999 	ixgbe_config_delay_values(adapter);
3000 
3001 	/* Initialize the FC settings */
3002 	ixgbe_start_hw(hw);
3003 
3004 	/* Set up VLAN support and filter */
3005 	ixgbe_setup_vlan_hw_support(ctx);
3006 
3007 	/* Setup DMA Coalescing */
3008 	ixgbe_config_dmac(adapter);
3009 
3010 	/* And now turn on interrupts */
3011 	ixgbe_if_enable_intr(ctx);
3012 
3013 	/* Enable the use of the MBX by the VF's */
3014 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3015 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3016 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3017 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3018 	}
3019 
3020 } /* ixgbe_init_locked */
3021 
3022 /************************************************************************
3023  * ixgbe_set_ivar
3024  *
3025  *   Setup the correct IVAR register for a particular MSI-X interrupt
3026  *     (yes this is all very magic and confusing :)
3027  *    - entry is the register array entry
3028  *    - vector is the MSI-X vector for this queue
3029  *    - type is RX/TX/MISC
3030  ************************************************************************/
3031 static void
3032 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3033 {
3034 	struct ixgbe_hw *hw = &adapter->hw;
3035 	u32 ivar, index;
3036 
3037 	vector |= IXGBE_IVAR_ALLOC_VAL;
3038 
3039 	switch (hw->mac.type) {
3040 	case ixgbe_mac_82598EB:
3041 		if (type == -1)
3042 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3043 		else
3044 			entry += (type * 64);
3045 		index = (entry >> 2) & 0x1F;
3046 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3047 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3048 		ivar |= (vector << (8 * (entry & 0x3)));
3049 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3050 		break;
3051 	case ixgbe_mac_82599EB:
3052 	case ixgbe_mac_X540:
3053 	case ixgbe_mac_X550:
3054 	case ixgbe_mac_X550EM_x:
3055 	case ixgbe_mac_X550EM_a:
3056 		if (type == -1) { /* MISC IVAR */
3057 			index = (entry & 1) * 8;
3058 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3059 			ivar &= ~(0xFF << index);
3060 			ivar |= (vector << index);
3061 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3062 		} else {          /* RX/TX IVARS */
3063 			index = (16 * (entry & 1)) + (8 * type);
3064 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3065 			ivar &= ~(0xFF << index);
3066 			ivar |= (vector << index);
3067 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3068 		}
3069 	default:
3070 		break;
3071 	}
3072 } /* ixgbe_set_ivar */
3073 
3074 /************************************************************************
3075  * ixgbe_configure_ivars
3076  ************************************************************************/
3077 static void
3078 ixgbe_configure_ivars(struct adapter *adapter)
3079 {
3080 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3081 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3082 	u32                newitr;
3083 
3084 	if (ixgbe_max_interrupt_rate > 0)
3085 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3086 	else {
3087 		/*
3088 		 * Disable DMA coalescing if interrupt moderation is
3089 		 * disabled.
3090 		 */
3091 		adapter->dmac = 0;
3092 		newitr = 0;
3093 	}
3094 
3095 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3096 		struct rx_ring *rxr = &rx_que->rxr;
3097 
3098 		/* First the RX queue entry */
3099 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3100 
3101 		/* Set an Initial EITR value */
3102 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3103 	}
3104 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3105 		struct tx_ring *txr = &tx_que->txr;
3106 
3107 		/* ... and the TX */
3108 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3109 	}
3110 	/* For the Link interrupt */
3111 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3112 } /* ixgbe_configure_ivars */
3113 
3114 /************************************************************************
3115  * ixgbe_config_gpie
3116  ************************************************************************/
3117 static void
3118 ixgbe_config_gpie(struct adapter *adapter)
3119 {
3120 	struct ixgbe_hw *hw = &adapter->hw;
3121 	u32             gpie;
3122 
3123 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3124 
3125 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3126 		/* Enable Enhanced MSI-X mode */
3127 		gpie |= IXGBE_GPIE_MSIX_MODE
3128 		     |  IXGBE_GPIE_EIAME
3129 		     |  IXGBE_GPIE_PBA_SUPPORT
3130 		     |  IXGBE_GPIE_OCD;
3131 	}
3132 
3133 	/* Fan Failure Interrupt */
3134 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3135 		gpie |= IXGBE_SDP1_GPIEN;
3136 
3137 	/* Thermal Sensor Interrupt */
3138 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3139 		gpie |= IXGBE_SDP0_GPIEN_X540;
3140 
3141 	/* Link detection */
3142 	switch (hw->mac.type) {
3143 	case ixgbe_mac_82599EB:
3144 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3145 		break;
3146 	case ixgbe_mac_X550EM_x:
3147 	case ixgbe_mac_X550EM_a:
3148 		gpie |= IXGBE_SDP0_GPIEN_X540;
3149 		break;
3150 	default:
3151 		break;
3152 	}
3153 
3154 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3155 
3156 } /* ixgbe_config_gpie */
3157 
3158 /************************************************************************
3159  * ixgbe_config_delay_values
3160  *
3161  *   Requires adapter->max_frame_size to be set.
3162  ************************************************************************/
3163 static void
3164 ixgbe_config_delay_values(struct adapter *adapter)
3165 {
3166 	struct ixgbe_hw *hw = &adapter->hw;
3167 	u32             rxpb, frame, size, tmp;
3168 
3169 	frame = adapter->max_frame_size;
3170 
3171 	/* Calculate High Water */
3172 	switch (hw->mac.type) {
3173 	case ixgbe_mac_X540:
3174 	case ixgbe_mac_X550:
3175 	case ixgbe_mac_X550EM_x:
3176 	case ixgbe_mac_X550EM_a:
3177 		tmp = IXGBE_DV_X540(frame, frame);
3178 		break;
3179 	default:
3180 		tmp = IXGBE_DV(frame, frame);
3181 		break;
3182 	}
3183 	size = IXGBE_BT2KB(tmp);
3184 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3185 	hw->fc.high_water[0] = rxpb - size;
3186 
3187 	/* Now calculate Low Water */
3188 	switch (hw->mac.type) {
3189 	case ixgbe_mac_X540:
3190 	case ixgbe_mac_X550:
3191 	case ixgbe_mac_X550EM_x:
3192 	case ixgbe_mac_X550EM_a:
3193 		tmp = IXGBE_LOW_DV_X540(frame);
3194 		break;
3195 	default:
3196 		tmp = IXGBE_LOW_DV(frame);
3197 		break;
3198 	}
3199 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3200 
3201 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3202 	hw->fc.send_xon = TRUE;
3203 } /* ixgbe_config_delay_values */
3204 
3205 /************************************************************************
3206  * ixgbe_set_multi - Multicast Update
3207  *
3208  *   Called whenever multicast address list is updated.
3209  ************************************************************************/
3210 static int
3211 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3212 {
3213 	struct adapter *adapter = arg;
3214 	struct ixgbe_mc_addr *mta = adapter->mta;
3215 
3216 	if (ifma->ifma_addr->sa_family != AF_LINK)
3217 		return (0);
3218 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3219 		return (0);
3220 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3221 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3222 	mta[count].vmdq = adapter->pool;
3223 
3224 	return (1);
3225 } /* ixgbe_mc_filter_apply */
3226 
3227 static void
3228 ixgbe_if_multi_set(if_ctx_t ctx)
3229 {
3230 	struct adapter       *adapter = iflib_get_softc(ctx);
3231 	struct ixgbe_mc_addr *mta;
3232 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3233 	u8                   *update_ptr;
3234 	int                  mcnt = 0;
3235 	u32                  fctrl;
3236 
3237 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3238 
3239 	mta = adapter->mta;
3240 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3241 
3242 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3243 
3244 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3245 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3246 	if (ifp->if_flags & IFF_PROMISC)
3247 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3248 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3249 	    ifp->if_flags & IFF_ALLMULTI) {
3250 		fctrl |= IXGBE_FCTRL_MPE;
3251 		fctrl &= ~IXGBE_FCTRL_UPE;
3252 	} else
3253 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3254 
3255 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3256 
3257 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3258 		update_ptr = (u8 *)mta;
3259 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3260 		    ixgbe_mc_array_itr, TRUE);
3261 	}
3262 
3263 } /* ixgbe_if_multi_set */
3264 
3265 /************************************************************************
3266  * ixgbe_mc_array_itr
3267  *
3268  *   An iterator function needed by the multicast shared code.
3269  *   It feeds the shared code routine the addresses in the
3270  *   array of ixgbe_set_multi() one by one.
3271  ************************************************************************/
3272 static u8 *
3273 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3274 {
3275 	struct ixgbe_mc_addr *mta;
3276 
3277 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3278 	*vmdq = mta->vmdq;
3279 
3280 	*update_ptr = (u8*)(mta + 1);
3281 
3282 	return (mta->addr);
3283 } /* ixgbe_mc_array_itr */
3284 
3285 /************************************************************************
3286  * ixgbe_local_timer - Timer routine
3287  *
3288  *   Checks for link status, updates statistics,
3289  *   and runs the watchdog check.
3290  ************************************************************************/
3291 static void
3292 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3293 {
3294 	struct adapter *adapter = iflib_get_softc(ctx);
3295 
3296 	if (qid != 0)
3297 		return;
3298 
3299 	/* Check for pluggable optics */
3300 	if (adapter->sfp_probe)
3301 		if (!ixgbe_sfp_probe(ctx))
3302 			return; /* Nothing to do */
3303 
3304 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3305 	    &adapter->link_up, 0);
3306 
3307 	/* Fire off the adminq task */
3308 	iflib_admin_intr_deferred(ctx);
3309 
3310 } /* ixgbe_if_timer */
3311 
3312 /************************************************************************
3313  * ixgbe_sfp_probe
3314  *
3315  *   Determine if a port had optics inserted.
3316  ************************************************************************/
3317 static bool
3318 ixgbe_sfp_probe(if_ctx_t ctx)
3319 {
3320 	struct adapter  *adapter = iflib_get_softc(ctx);
3321 	struct ixgbe_hw *hw = &adapter->hw;
3322 	device_t        dev = iflib_get_dev(ctx);
3323 	bool            result = FALSE;
3324 
3325 	if ((hw->phy.type == ixgbe_phy_nl) &&
3326 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3327 		s32 ret = hw->phy.ops.identify_sfp(hw);
3328 		if (ret)
3329 			goto out;
3330 		ret = hw->phy.ops.reset(hw);
3331 		adapter->sfp_probe = FALSE;
3332 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3333 			device_printf(dev, "Unsupported SFP+ module detected!");
3334 			device_printf(dev,
3335 			    "Reload driver with supported module.\n");
3336 			goto out;
3337 		} else
3338 			device_printf(dev, "SFP+ module detected!\n");
3339 		/* We now have supported optics */
3340 		result = TRUE;
3341 	}
3342 out:
3343 
3344 	return (result);
3345 } /* ixgbe_sfp_probe */
3346 
3347 /************************************************************************
3348  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3349  ************************************************************************/
3350 static void
3351 ixgbe_handle_mod(void *context)
3352 {
3353 	if_ctx_t        ctx = context;
3354 	struct adapter  *adapter = iflib_get_softc(ctx);
3355 	struct ixgbe_hw *hw = &adapter->hw;
3356 	device_t        dev = iflib_get_dev(ctx);
3357 	u32             err, cage_full = 0;
3358 
3359 	if (adapter->hw.need_crosstalk_fix) {
3360 		switch (hw->mac.type) {
3361 		case ixgbe_mac_82599EB:
3362 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3363 			    IXGBE_ESDP_SDP2;
3364 			break;
3365 		case ixgbe_mac_X550EM_x:
3366 		case ixgbe_mac_X550EM_a:
3367 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3368 			    IXGBE_ESDP_SDP0;
3369 			break;
3370 		default:
3371 			break;
3372 		}
3373 
3374 		if (!cage_full)
3375 			goto handle_mod_out;
3376 	}
3377 
3378 	err = hw->phy.ops.identify_sfp(hw);
3379 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3380 		device_printf(dev,
3381 		    "Unsupported SFP+ module type was detected.\n");
3382 		goto handle_mod_out;
3383 	}
3384 
3385 	if (hw->mac.type == ixgbe_mac_82598EB)
3386 		err = hw->phy.ops.reset(hw);
3387 	else
3388 		err = hw->mac.ops.setup_sfp(hw);
3389 
3390 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3391 		device_printf(dev,
3392 		    "Setup failure - unsupported SFP+ module type.\n");
3393 		goto handle_mod_out;
3394 	}
3395 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3396 	return;
3397 
3398 handle_mod_out:
3399 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3400 } /* ixgbe_handle_mod */
3401 
3402 
3403 /************************************************************************
3404  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3405  ************************************************************************/
3406 static void
3407 ixgbe_handle_msf(void *context)
3408 {
3409 	if_ctx_t        ctx = context;
3410 	struct adapter  *adapter = iflib_get_softc(ctx);
3411 	struct ixgbe_hw *hw = &adapter->hw;
3412 	u32             autoneg;
3413 	bool            negotiate;
3414 
3415 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3416 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3417 
3418 	autoneg = hw->phy.autoneg_advertised;
3419 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3420 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3421 	if (hw->mac.ops.setup_link)
3422 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3423 
3424 	/* Adjust media types shown in ifconfig */
3425 	ifmedia_removeall(adapter->media);
3426 	ixgbe_add_media_types(adapter->ctx);
3427 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3428 } /* ixgbe_handle_msf */
3429 
3430 /************************************************************************
3431  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3432  ************************************************************************/
3433 static void
3434 ixgbe_handle_phy(void *context)
3435 {
3436 	if_ctx_t        ctx = context;
3437 	struct adapter  *adapter = iflib_get_softc(ctx);
3438 	struct ixgbe_hw *hw = &adapter->hw;
3439 	int             error;
3440 
3441 	error = hw->phy.ops.handle_lasi(hw);
3442 	if (error == IXGBE_ERR_OVERTEMP)
3443 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3444 	else if (error)
3445 		device_printf(adapter->dev,
3446 		    "Error handling LASI interrupt: %d\n", error);
3447 } /* ixgbe_handle_phy */
3448 
3449 /************************************************************************
3450  * ixgbe_if_stop - Stop the hardware
3451  *
3452  *   Disables all traffic on the adapter by issuing a
3453  *   global reset on the MAC and deallocates TX/RX buffers.
3454  ************************************************************************/
3455 static void
3456 ixgbe_if_stop(if_ctx_t ctx)
3457 {
3458 	struct adapter  *adapter = iflib_get_softc(ctx);
3459 	struct ixgbe_hw *hw = &adapter->hw;
3460 
3461 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3462 
3463 	ixgbe_reset_hw(hw);
3464 	hw->adapter_stopped = FALSE;
3465 	ixgbe_stop_adapter(hw);
3466 	if (hw->mac.type == ixgbe_mac_82599EB)
3467 		ixgbe_stop_mac_link_on_d3_82599(hw);
3468 	/* Turn off the laser - noop with no optics */
3469 	ixgbe_disable_tx_laser(hw);
3470 
3471 	/* Update the stack */
3472 	adapter->link_up = FALSE;
3473 	ixgbe_if_update_admin_status(ctx);
3474 
3475 	/* reprogram the RAR[0] in case user changed it. */
3476 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3477 
3478 	return;
3479 } /* ixgbe_if_stop */
3480 
3481 /************************************************************************
3482  * ixgbe_update_link_status - Update OS on link state
3483  *
3484  * Note: Only updates the OS on the cached link state.
3485  *       The real check of the hardware only happens with
3486  *       a link interrupt.
3487  ************************************************************************/
3488 static void
3489 ixgbe_if_update_admin_status(if_ctx_t ctx)
3490 {
3491 	struct adapter *adapter = iflib_get_softc(ctx);
3492 	device_t       dev = iflib_get_dev(ctx);
3493 
3494 	if (adapter->link_up) {
3495 		if (adapter->link_active == FALSE) {
3496 			if (bootverbose)
3497 				device_printf(dev, "Link is up %d Gbps %s \n",
3498 				    ((adapter->link_speed == 128) ? 10 : 1),
3499 				    "Full Duplex");
3500 			adapter->link_active = TRUE;
3501 			/* Update any Flow Control changes */
3502 			ixgbe_fc_enable(&adapter->hw);
3503 			/* Update DMA coalescing config */
3504 			ixgbe_config_dmac(adapter);
3505 			/* should actually be negotiated value */
3506 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3507 
3508 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3509 				ixgbe_ping_all_vfs(adapter);
3510 		}
3511 	} else { /* Link down */
3512 		if (adapter->link_active == TRUE) {
3513 			if (bootverbose)
3514 				device_printf(dev, "Link is Down\n");
3515 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3516 			adapter->link_active = FALSE;
3517 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3518 				ixgbe_ping_all_vfs(adapter);
3519 		}
3520 	}
3521 
3522 	/* Handle task requests from msix_link() */
3523 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3524 		ixgbe_handle_mod(ctx);
3525 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3526 		ixgbe_handle_msf(ctx);
3527 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3528 		ixgbe_handle_mbx(ctx);
3529 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3530 		ixgbe_reinit_fdir(ctx);
3531 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3532 		ixgbe_handle_phy(ctx);
3533 	adapter->task_requests = 0;
3534 
3535 	ixgbe_update_stats_counters(adapter);
3536 } /* ixgbe_if_update_admin_status */
3537 
3538 /************************************************************************
3539  * ixgbe_config_dmac - Configure DMA Coalescing
3540  ************************************************************************/
3541 static void
3542 ixgbe_config_dmac(struct adapter *adapter)
3543 {
3544 	struct ixgbe_hw          *hw = &adapter->hw;
3545 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3546 
3547 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3548 		return;
3549 
3550 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3551 	    dcfg->link_speed ^ adapter->link_speed) {
3552 		dcfg->watchdog_timer = adapter->dmac;
3553 		dcfg->fcoe_en = FALSE;
3554 		dcfg->link_speed = adapter->link_speed;
3555 		dcfg->num_tcs = 1;
3556 
3557 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3558 		    dcfg->watchdog_timer, dcfg->link_speed);
3559 
3560 		hw->mac.ops.dmac_config(hw);
3561 	}
3562 } /* ixgbe_config_dmac */
3563 
3564 /************************************************************************
3565  * ixgbe_if_enable_intr
3566  ************************************************************************/
3567 void
3568 ixgbe_if_enable_intr(if_ctx_t ctx)
3569 {
3570 	struct adapter     *adapter = iflib_get_softc(ctx);
3571 	struct ixgbe_hw    *hw = &adapter->hw;
3572 	struct ix_rx_queue *que = adapter->rx_queues;
3573 	u32                mask, fwsm;
3574 
3575 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3576 
3577 	switch (adapter->hw.mac.type) {
3578 	case ixgbe_mac_82599EB:
3579 		mask |= IXGBE_EIMS_ECC;
3580 		/* Temperature sensor on some adapters */
3581 		mask |= IXGBE_EIMS_GPI_SDP0;
3582 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3583 		mask |= IXGBE_EIMS_GPI_SDP1;
3584 		mask |= IXGBE_EIMS_GPI_SDP2;
3585 		break;
3586 	case ixgbe_mac_X540:
3587 		/* Detect if Thermal Sensor is enabled */
3588 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3589 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3590 			mask |= IXGBE_EIMS_TS;
3591 		mask |= IXGBE_EIMS_ECC;
3592 		break;
3593 	case ixgbe_mac_X550:
3594 		/* MAC thermal sensor is automatically enabled */
3595 		mask |= IXGBE_EIMS_TS;
3596 		mask |= IXGBE_EIMS_ECC;
3597 		break;
3598 	case ixgbe_mac_X550EM_x:
3599 	case ixgbe_mac_X550EM_a:
3600 		/* Some devices use SDP0 for important information */
3601 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3602 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3603 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3604 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3605 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3606 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3607 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3608 		mask |= IXGBE_EIMS_ECC;
3609 		break;
3610 	default:
3611 		break;
3612 	}
3613 
3614 	/* Enable Fan Failure detection */
3615 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3616 		mask |= IXGBE_EIMS_GPI_SDP1;
3617 	/* Enable SR-IOV */
3618 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3619 		mask |= IXGBE_EIMS_MAILBOX;
3620 	/* Enable Flow Director */
3621 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3622 		mask |= IXGBE_EIMS_FLOW_DIR;
3623 
3624 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3625 
3626 	/* With MSI-X we use auto clear */
3627 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3628 		mask = IXGBE_EIMS_ENABLE_MASK;
3629 		/* Don't autoclear Link */
3630 		mask &= ~IXGBE_EIMS_OTHER;
3631 		mask &= ~IXGBE_EIMS_LSC;
3632 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3633 			mask &= ~IXGBE_EIMS_MAILBOX;
3634 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3635 	}
3636 
3637 	/*
3638 	 * Now enable all queues, this is done separately to
3639 	 * allow for handling the extended (beyond 32) MSI-X
3640 	 * vectors that can be used by 82599
3641 	 */
3642 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3643 		ixgbe_enable_queue(adapter, que->msix);
3644 
3645 	IXGBE_WRITE_FLUSH(hw);
3646 
3647 } /* ixgbe_if_enable_intr */
3648 
3649 /************************************************************************
3650  * ixgbe_disable_intr
3651  ************************************************************************/
3652 static void
3653 ixgbe_if_disable_intr(if_ctx_t ctx)
3654 {
3655 	struct adapter *adapter = iflib_get_softc(ctx);
3656 
3657 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3658 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3659 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3660 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3661 	} else {
3662 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3663 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3664 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3665 	}
3666 	IXGBE_WRITE_FLUSH(&adapter->hw);
3667 
3668 } /* ixgbe_if_disable_intr */
3669 
3670 /************************************************************************
3671  * ixgbe_link_intr_enable
3672  ************************************************************************/
3673 static void
3674 ixgbe_link_intr_enable(if_ctx_t ctx)
3675 {
3676 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3677 
3678 	/* Re-enable other interrupts */
3679 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3680 } /* ixgbe_link_intr_enable */
3681 
3682 /************************************************************************
3683  * ixgbe_if_rx_queue_intr_enable
3684  ************************************************************************/
3685 static int
3686 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3687 {
3688 	struct adapter     *adapter = iflib_get_softc(ctx);
3689 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3690 
3691 	ixgbe_enable_queue(adapter, que->msix);
3692 
3693 	return (0);
3694 } /* ixgbe_if_rx_queue_intr_enable */
3695 
3696 /************************************************************************
3697  * ixgbe_enable_queue
3698  ************************************************************************/
3699 static void
3700 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3701 {
3702 	struct ixgbe_hw *hw = &adapter->hw;
3703 	u64             queue = 1ULL << vector;
3704 	u32             mask;
3705 
3706 	if (hw->mac.type == ixgbe_mac_82598EB) {
3707 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3708 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3709 	} else {
3710 		mask = (queue & 0xFFFFFFFF);
3711 		if (mask)
3712 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3713 		mask = (queue >> 32);
3714 		if (mask)
3715 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3716 	}
3717 } /* ixgbe_enable_queue */
3718 
3719 /************************************************************************
3720  * ixgbe_disable_queue
3721  ************************************************************************/
3722 static void
3723 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3724 {
3725 	struct ixgbe_hw *hw = &adapter->hw;
3726 	u64             queue = 1ULL << vector;
3727 	u32             mask;
3728 
3729 	if (hw->mac.type == ixgbe_mac_82598EB) {
3730 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3731 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3732 	} else {
3733 		mask = (queue & 0xFFFFFFFF);
3734 		if (mask)
3735 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3736 		mask = (queue >> 32);
3737 		if (mask)
3738 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3739 	}
3740 } /* ixgbe_disable_queue */
3741 
3742 /************************************************************************
3743  * ixgbe_intr - Legacy Interrupt Service Routine
3744  ************************************************************************/
3745 int
3746 ixgbe_intr(void *arg)
3747 {
3748 	struct adapter     *adapter = arg;
3749 	struct ix_rx_queue *que = adapter->rx_queues;
3750 	struct ixgbe_hw    *hw = &adapter->hw;
3751 	if_ctx_t           ctx = adapter->ctx;
3752 	u32                eicr, eicr_mask;
3753 
3754 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3755 
3756 	++que->irqs;
3757 	if (eicr == 0) {
3758 		ixgbe_if_enable_intr(ctx);
3759 		return (FILTER_HANDLED);
3760 	}
3761 
3762 	/* Check for fan failure */
3763 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3764 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3765 		device_printf(adapter->dev,
3766 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3767 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3768 	}
3769 
3770 	/* Link status change */
3771 	if (eicr & IXGBE_EICR_LSC) {
3772 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3773 		iflib_admin_intr_deferred(ctx);
3774 	}
3775 
3776 	if (ixgbe_is_sfp(hw)) {
3777 		/* Pluggable optics-related interrupt */
3778 		if (hw->mac.type >= ixgbe_mac_X540)
3779 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3780 		else
3781 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3782 
3783 		if (eicr & eicr_mask) {
3784 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3785 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3786 		}
3787 
3788 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3789 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3790 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3791 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3792 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3793 		}
3794 	}
3795 
3796 	/* External PHY interrupt */
3797 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3798 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3799 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3800 
3801 	return (FILTER_SCHEDULE_THREAD);
3802 } /* ixgbe_intr */
3803 
3804 /************************************************************************
3805  * ixgbe_free_pci_resources
3806  ************************************************************************/
3807 static void
3808 ixgbe_free_pci_resources(if_ctx_t ctx)
3809 {
3810 	struct adapter *adapter = iflib_get_softc(ctx);
3811 	struct         ix_rx_queue *que = adapter->rx_queues;
3812 	device_t       dev = iflib_get_dev(ctx);
3813 
3814 	/* Release all MSI-X queue resources */
3815 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3816 		iflib_irq_free(ctx, &adapter->irq);
3817 
3818 	if (que != NULL) {
3819 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3820 			iflib_irq_free(ctx, &que->que_irq);
3821 		}
3822 	}
3823 
3824 	if (adapter->pci_mem != NULL)
3825 		bus_release_resource(dev, SYS_RES_MEMORY,
3826 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3827 } /* ixgbe_free_pci_resources */
3828 
3829 /************************************************************************
3830  * ixgbe_sysctl_flowcntl
3831  *
3832  *   SYSCTL wrapper around setting Flow Control
3833  ************************************************************************/
3834 static int
3835 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3836 {
3837 	struct adapter *adapter;
3838 	int            error, fc;
3839 
3840 	adapter = (struct adapter *)arg1;
3841 	fc = adapter->hw.fc.current_mode;
3842 
3843 	error = sysctl_handle_int(oidp, &fc, 0, req);
3844 	if ((error) || (req->newptr == NULL))
3845 		return (error);
3846 
3847 	/* Don't bother if it's not changed */
3848 	if (fc == adapter->hw.fc.current_mode)
3849 		return (0);
3850 
3851 	return ixgbe_set_flowcntl(adapter, fc);
3852 } /* ixgbe_sysctl_flowcntl */
3853 
3854 /************************************************************************
3855  * ixgbe_set_flowcntl - Set flow control
3856  *
3857  *   Flow control values:
3858  *     0 - off
3859  *     1 - rx pause
3860  *     2 - tx pause
3861  *     3 - full
3862  ************************************************************************/
3863 static int
3864 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3865 {
3866 	switch (fc) {
3867 	case ixgbe_fc_rx_pause:
3868 	case ixgbe_fc_tx_pause:
3869 	case ixgbe_fc_full:
3870 		adapter->hw.fc.requested_mode = fc;
3871 		if (adapter->num_rx_queues > 1)
3872 			ixgbe_disable_rx_drop(adapter);
3873 		break;
3874 	case ixgbe_fc_none:
3875 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3876 		if (adapter->num_rx_queues > 1)
3877 			ixgbe_enable_rx_drop(adapter);
3878 		break;
3879 	default:
3880 		return (EINVAL);
3881 	}
3882 
3883 	/* Don't autoneg if forcing a value */
3884 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3885 	ixgbe_fc_enable(&adapter->hw);
3886 
3887 	return (0);
3888 } /* ixgbe_set_flowcntl */
3889 
3890 /************************************************************************
3891  * ixgbe_enable_rx_drop
3892  *
3893  *   Enable the hardware to drop packets when the buffer is
3894  *   full. This is useful with multiqueue, so that no single
3895  *   queue being full stalls the entire RX engine. We only
3896  *   enable this when Multiqueue is enabled AND Flow Control
3897  *   is disabled.
3898  ************************************************************************/
3899 static void
3900 ixgbe_enable_rx_drop(struct adapter *adapter)
3901 {
3902 	struct ixgbe_hw *hw = &adapter->hw;
3903 	struct rx_ring  *rxr;
3904 	u32             srrctl;
3905 
3906 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3907 		rxr = &adapter->rx_queues[i].rxr;
3908 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3909 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3910 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3911 	}
3912 
3913 	/* enable drop for each vf */
3914 	for (int i = 0; i < adapter->num_vfs; i++) {
3915 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3916 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3917 		                IXGBE_QDE_ENABLE));
3918 	}
3919 } /* ixgbe_enable_rx_drop */
3920 
3921 /************************************************************************
3922  * ixgbe_disable_rx_drop
3923  ************************************************************************/
3924 static void
3925 ixgbe_disable_rx_drop(struct adapter *adapter)
3926 {
3927 	struct ixgbe_hw *hw = &adapter->hw;
3928 	struct rx_ring  *rxr;
3929 	u32             srrctl;
3930 
3931 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3932 		rxr = &adapter->rx_queues[i].rxr;
3933 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3934 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3935 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3936 	}
3937 
3938 	/* disable drop for each vf */
3939 	for (int i = 0; i < adapter->num_vfs; i++) {
3940 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3941 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3942 	}
3943 } /* ixgbe_disable_rx_drop */
3944 
3945 /************************************************************************
3946  * ixgbe_sysctl_advertise
3947  *
3948  *   SYSCTL wrapper around setting advertised speed
3949  ************************************************************************/
3950 static int
3951 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3952 {
3953 	struct adapter *adapter;
3954 	int            error, advertise;
3955 
3956 	adapter = (struct adapter *)arg1;
3957 	advertise = adapter->advertise;
3958 
3959 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3960 	if ((error) || (req->newptr == NULL))
3961 		return (error);
3962 
3963 	return ixgbe_set_advertise(adapter, advertise);
3964 } /* ixgbe_sysctl_advertise */
3965 
3966 /************************************************************************
3967  * ixgbe_set_advertise - Control advertised link speed
3968  *
3969  *   Flags:
3970  *     0x1 - advertise 100 Mb
3971  *     0x2 - advertise 1G
3972  *     0x4 - advertise 10G
3973  *     0x8 - advertise 10 Mb (yes, Mb)
3974  ************************************************************************/
3975 static int
3976 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3977 {
3978 	device_t         dev = iflib_get_dev(adapter->ctx);
3979 	struct ixgbe_hw  *hw;
3980 	ixgbe_link_speed speed = 0;
3981 	ixgbe_link_speed link_caps = 0;
3982 	s32              err = IXGBE_NOT_IMPLEMENTED;
3983 	bool             negotiate = FALSE;
3984 
3985 	/* Checks to validate new value */
3986 	if (adapter->advertise == advertise) /* no change */
3987 		return (0);
3988 
3989 	hw = &adapter->hw;
3990 
3991 	/* No speed changes for backplane media */
3992 	if (hw->phy.media_type == ixgbe_media_type_backplane)
3993 		return (ENODEV);
3994 
3995 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
3996 	      (hw->phy.multispeed_fiber))) {
3997 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
3998 		return (EINVAL);
3999 	}
4000 
4001 	if (advertise < 0x1 || advertise > 0xF) {
4002 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4003 		return (EINVAL);
4004 	}
4005 
4006 	if (hw->mac.ops.get_link_capabilities) {
4007 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4008 		    &negotiate);
4009 		if (err != IXGBE_SUCCESS) {
4010 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4011 			return (ENODEV);
4012 		}
4013 	}
4014 
4015 	/* Set new value and report new advertised mode */
4016 	if (advertise & 0x1) {
4017 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4018 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4019 			return (EINVAL);
4020 		}
4021 		speed |= IXGBE_LINK_SPEED_100_FULL;
4022 	}
4023 	if (advertise & 0x2) {
4024 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4025 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4026 			return (EINVAL);
4027 		}
4028 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4029 	}
4030 	if (advertise & 0x4) {
4031 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4032 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4033 			return (EINVAL);
4034 		}
4035 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4036 	}
4037 	if (advertise & 0x8) {
4038 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4039 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4040 			return (EINVAL);
4041 		}
4042 		speed |= IXGBE_LINK_SPEED_10_FULL;
4043 	}
4044 
4045 	hw->mac.autotry_restart = TRUE;
4046 	hw->mac.ops.setup_link(hw, speed, TRUE);
4047 	adapter->advertise = advertise;
4048 
4049 	return (0);
4050 } /* ixgbe_set_advertise */
4051 
4052 /************************************************************************
4053  * ixgbe_get_advertise - Get current advertised speed settings
4054  *
4055  *   Formatted for sysctl usage.
4056  *   Flags:
4057  *     0x1 - advertise 100 Mb
4058  *     0x2 - advertise 1G
4059  *     0x4 - advertise 10G
4060  *     0x8 - advertise 10 Mb (yes, Mb)
4061  ************************************************************************/
4062 static int
4063 ixgbe_get_advertise(struct adapter *adapter)
4064 {
4065 	struct ixgbe_hw  *hw = &adapter->hw;
4066 	int              speed;
4067 	ixgbe_link_speed link_caps = 0;
4068 	s32              err;
4069 	bool             negotiate = FALSE;
4070 
4071 	/*
4072 	 * Advertised speed means nothing unless it's copper or
4073 	 * multi-speed fiber
4074 	 */
4075 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4076 	    !(hw->phy.multispeed_fiber))
4077 		return (0);
4078 
4079 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4080 	if (err != IXGBE_SUCCESS)
4081 		return (0);
4082 
4083 	speed =
4084 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4085 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4086 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4087 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4088 
4089 	return speed;
4090 } /* ixgbe_get_advertise */
4091 
4092 /************************************************************************
4093  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4094  *
4095  *   Control values:
4096  *     0/1 - off / on (use default value of 1000)
4097  *
4098  *     Legal timer values are:
4099  *     50,100,250,500,1000,2000,5000,10000
4100  *
4101  *     Turning off interrupt moderation will also turn this off.
4102  ************************************************************************/
4103 static int
4104 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4105 {
4106 	struct adapter *adapter = (struct adapter *)arg1;
4107 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4108 	int            error;
4109 	u16            newval;
4110 
4111 	newval = adapter->dmac;
4112 	error = sysctl_handle_16(oidp, &newval, 0, req);
4113 	if ((error) || (req->newptr == NULL))
4114 		return (error);
4115 
4116 	switch (newval) {
4117 	case 0:
4118 		/* Disabled */
4119 		adapter->dmac = 0;
4120 		break;
4121 	case 1:
4122 		/* Enable and use default */
4123 		adapter->dmac = 1000;
4124 		break;
4125 	case 50:
4126 	case 100:
4127 	case 250:
4128 	case 500:
4129 	case 1000:
4130 	case 2000:
4131 	case 5000:
4132 	case 10000:
4133 		/* Legal values - allow */
4134 		adapter->dmac = newval;
4135 		break;
4136 	default:
4137 		/* Do nothing, illegal value */
4138 		return (EINVAL);
4139 	}
4140 
4141 	/* Re-initialize hardware if it's already running */
4142 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4143 		ifp->if_init(ifp);
4144 
4145 	return (0);
4146 } /* ixgbe_sysctl_dmac */
4147 
4148 #ifdef IXGBE_DEBUG
4149 /************************************************************************
4150  * ixgbe_sysctl_power_state
4151  *
4152  *   Sysctl to test power states
4153  *   Values:
4154  *     0      - set device to D0
4155  *     3      - set device to D3
4156  *     (none) - get current device power state
4157  ************************************************************************/
4158 static int
4159 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4160 {
4161 	struct adapter *adapter = (struct adapter *)arg1;
4162 	device_t       dev = adapter->dev;
4163 	int            curr_ps, new_ps, error = 0;
4164 
4165 	curr_ps = new_ps = pci_get_powerstate(dev);
4166 
4167 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4168 	if ((error) || (req->newptr == NULL))
4169 		return (error);
4170 
4171 	if (new_ps == curr_ps)
4172 		return (0);
4173 
4174 	if (new_ps == 3 && curr_ps == 0)
4175 		error = DEVICE_SUSPEND(dev);
4176 	else if (new_ps == 0 && curr_ps == 3)
4177 		error = DEVICE_RESUME(dev);
4178 	else
4179 		return (EINVAL);
4180 
4181 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4182 
4183 	return (error);
4184 } /* ixgbe_sysctl_power_state */
4185 #endif
4186 
4187 /************************************************************************
4188  * ixgbe_sysctl_wol_enable
4189  *
4190  *   Sysctl to enable/disable the WoL capability,
4191  *   if supported by the adapter.
4192  *
4193  *   Values:
4194  *     0 - disabled
4195  *     1 - enabled
4196  ************************************************************************/
4197 static int
4198 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4199 {
4200 	struct adapter  *adapter = (struct adapter *)arg1;
4201 	struct ixgbe_hw *hw = &adapter->hw;
4202 	int             new_wol_enabled;
4203 	int             error = 0;
4204 
4205 	new_wol_enabled = hw->wol_enabled;
4206 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4207 	if ((error) || (req->newptr == NULL))
4208 		return (error);
4209 	new_wol_enabled = !!(new_wol_enabled);
4210 	if (new_wol_enabled == hw->wol_enabled)
4211 		return (0);
4212 
4213 	if (new_wol_enabled > 0 && !adapter->wol_support)
4214 		return (ENODEV);
4215 	else
4216 		hw->wol_enabled = new_wol_enabled;
4217 
4218 	return (0);
4219 } /* ixgbe_sysctl_wol_enable */
4220 
4221 /************************************************************************
4222  * ixgbe_sysctl_wufc - Wake Up Filter Control
4223  *
4224  *   Sysctl to enable/disable the types of packets that the
4225  *   adapter will wake up on upon receipt.
4226  *   Flags:
4227  *     0x1  - Link Status Change
4228  *     0x2  - Magic Packet
4229  *     0x4  - Direct Exact
4230  *     0x8  - Directed Multicast
4231  *     0x10 - Broadcast
4232  *     0x20 - ARP/IPv4 Request Packet
4233  *     0x40 - Direct IPv4 Packet
4234  *     0x80 - Direct IPv6 Packet
4235  *
4236  *   Settings not listed above will cause the sysctl to return an error.
4237  ************************************************************************/
4238 static int
4239 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4240 {
4241 	struct adapter *adapter = (struct adapter *)arg1;
4242 	int            error = 0;
4243 	u32            new_wufc;
4244 
4245 	new_wufc = adapter->wufc;
4246 
4247 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4248 	if ((error) || (req->newptr == NULL))
4249 		return (error);
4250 	if (new_wufc == adapter->wufc)
4251 		return (0);
4252 
4253 	if (new_wufc & 0xffffff00)
4254 		return (EINVAL);
4255 
4256 	new_wufc &= 0xff;
4257 	new_wufc |= (0xffffff & adapter->wufc);
4258 	adapter->wufc = new_wufc;
4259 
4260 	return (0);
4261 } /* ixgbe_sysctl_wufc */
4262 
4263 #ifdef IXGBE_DEBUG
4264 /************************************************************************
4265  * ixgbe_sysctl_print_rss_config
4266  ************************************************************************/
4267 static int
4268 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4269 {
4270 	struct adapter  *adapter = (struct adapter *)arg1;
4271 	struct ixgbe_hw *hw = &adapter->hw;
4272 	device_t        dev = adapter->dev;
4273 	struct sbuf     *buf;
4274 	int             error = 0, reta_size;
4275 	u32             reg;
4276 
4277 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4278 	if (!buf) {
4279 		device_printf(dev, "Could not allocate sbuf for output.\n");
4280 		return (ENOMEM);
4281 	}
4282 
4283 	// TODO: use sbufs to make a string to print out
4284 	/* Set multiplier for RETA setup and table size based on MAC */
4285 	switch (adapter->hw.mac.type) {
4286 	case ixgbe_mac_X550:
4287 	case ixgbe_mac_X550EM_x:
4288 	case ixgbe_mac_X550EM_a:
4289 		reta_size = 128;
4290 		break;
4291 	default:
4292 		reta_size = 32;
4293 		break;
4294 	}
4295 
4296 	/* Print out the redirection table */
4297 	sbuf_cat(buf, "\n");
4298 	for (int i = 0; i < reta_size; i++) {
4299 		if (i < 32) {
4300 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4301 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4302 		} else {
4303 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4304 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4305 		}
4306 	}
4307 
4308 	// TODO: print more config
4309 
4310 	error = sbuf_finish(buf);
4311 	if (error)
4312 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4313 
4314 	sbuf_delete(buf);
4315 
4316 	return (0);
4317 } /* ixgbe_sysctl_print_rss_config */
4318 #endif /* IXGBE_DEBUG */
4319 
4320 /************************************************************************
4321  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4322  *
4323  *   For X552/X557-AT devices using an external PHY
4324  ************************************************************************/
4325 static int
4326 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4327 {
4328 	struct adapter  *adapter = (struct adapter *)arg1;
4329 	struct ixgbe_hw *hw = &adapter->hw;
4330 	u16             reg;
4331 
4332 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4333 		device_printf(iflib_get_dev(adapter->ctx),
4334 		    "Device has no supported external thermal sensor.\n");
4335 		return (ENODEV);
4336 	}
4337 
4338 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4339 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4340 		device_printf(iflib_get_dev(adapter->ctx),
4341 		    "Error reading from PHY's current temperature register\n");
4342 		return (EAGAIN);
4343 	}
4344 
4345 	/* Shift temp for output */
4346 	reg = reg >> 8;
4347 
4348 	return (sysctl_handle_16(oidp, NULL, reg, req));
4349 } /* ixgbe_sysctl_phy_temp */
4350 
4351 /************************************************************************
4352  * ixgbe_sysctl_phy_overtemp_occurred
4353  *
4354  *   Reports (directly from the PHY) whether the current PHY
4355  *   temperature is over the overtemp threshold.
4356  ************************************************************************/
4357 static int
4358 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4359 {
4360 	struct adapter  *adapter = (struct adapter *)arg1;
4361 	struct ixgbe_hw *hw = &adapter->hw;
4362 	u16             reg;
4363 
4364 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4365 		device_printf(iflib_get_dev(adapter->ctx),
4366 		    "Device has no supported external thermal sensor.\n");
4367 		return (ENODEV);
4368 	}
4369 
4370 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4371 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4372 		device_printf(iflib_get_dev(adapter->ctx),
4373 		    "Error reading from PHY's temperature status register\n");
4374 		return (EAGAIN);
4375 	}
4376 
4377 	/* Get occurrence bit */
4378 	reg = !!(reg & 0x4000);
4379 
4380 	return (sysctl_handle_16(oidp, 0, reg, req));
4381 } /* ixgbe_sysctl_phy_overtemp_occurred */
4382 
4383 /************************************************************************
4384  * ixgbe_sysctl_eee_state
4385  *
4386  *   Sysctl to set EEE power saving feature
4387  *   Values:
4388  *     0      - disable EEE
4389  *     1      - enable EEE
4390  *     (none) - get current device EEE state
4391  ************************************************************************/
4392 static int
4393 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4394 {
4395 	struct adapter *adapter = (struct adapter *)arg1;
4396 	device_t       dev = adapter->dev;
4397 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4398 	int            curr_eee, new_eee, error = 0;
4399 	s32            retval;
4400 
4401 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4402 
4403 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4404 	if ((error) || (req->newptr == NULL))
4405 		return (error);
4406 
4407 	/* Nothing to do */
4408 	if (new_eee == curr_eee)
4409 		return (0);
4410 
4411 	/* Not supported */
4412 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4413 		return (EINVAL);
4414 
4415 	/* Bounds checking */
4416 	if ((new_eee < 0) || (new_eee > 1))
4417 		return (EINVAL);
4418 
4419 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4420 	if (retval) {
4421 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4422 		return (EINVAL);
4423 	}
4424 
4425 	/* Restart auto-neg */
4426 	ifp->if_init(ifp);
4427 
4428 	device_printf(dev, "New EEE state: %d\n", new_eee);
4429 
4430 	/* Cache new value */
4431 	if (new_eee)
4432 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4433 	else
4434 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4435 
4436 	return (error);
4437 } /* ixgbe_sysctl_eee_state */
4438 
4439 /************************************************************************
4440  * ixgbe_init_device_features
4441  ************************************************************************/
4442 static void
4443 ixgbe_init_device_features(struct adapter *adapter)
4444 {
4445 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4446 	                  | IXGBE_FEATURE_RSS
4447 	                  | IXGBE_FEATURE_MSI
4448 	                  | IXGBE_FEATURE_MSIX
4449 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4450 
4451 	/* Set capabilities first... */
4452 	switch (adapter->hw.mac.type) {
4453 	case ixgbe_mac_82598EB:
4454 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4455 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4456 		break;
4457 	case ixgbe_mac_X540:
4458 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4459 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4460 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4461 		    (adapter->hw.bus.func == 0))
4462 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4463 		break;
4464 	case ixgbe_mac_X550:
4465 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4466 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4467 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4468 		break;
4469 	case ixgbe_mac_X550EM_x:
4470 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4471 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4472 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4473 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4474 		break;
4475 	case ixgbe_mac_X550EM_a:
4476 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4477 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4478 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4479 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4480 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4481 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4482 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4483 		}
4484 		break;
4485 	case ixgbe_mac_82599EB:
4486 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4487 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4488 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4489 		    (adapter->hw.bus.func == 0))
4490 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4491 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4492 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4493 		break;
4494 	default:
4495 		break;
4496 	}
4497 
4498 	/* Enabled by default... */
4499 	/* Fan failure detection */
4500 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4501 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4502 	/* Netmap */
4503 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4504 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4505 	/* EEE */
4506 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4507 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4508 	/* Thermal Sensor */
4509 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4510 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4511 
4512 	/* Enabled via global sysctl... */
4513 	/* Flow Director */
4514 	if (ixgbe_enable_fdir) {
4515 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4516 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4517 		else
4518 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4519 	}
4520 	/*
4521 	 * Message Signal Interrupts - Extended (MSI-X)
4522 	 * Normal MSI is only enabled if MSI-X calls fail.
4523 	 */
4524 	if (!ixgbe_enable_msix)
4525 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4526 	/* Receive-Side Scaling (RSS) */
4527 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4528 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4529 
4530 	/* Disable features with unmet dependencies... */
4531 	/* No MSI-X */
4532 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4533 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4534 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4535 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4536 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4537 	}
4538 } /* ixgbe_init_device_features */
4539 
4540 /************************************************************************
4541  * ixgbe_check_fan_failure
4542  ************************************************************************/
4543 static void
4544 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4545 {
4546 	u32 mask;
4547 
4548 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4549 	    IXGBE_ESDP_SDP1;
4550 
4551 	if (reg & mask)
4552 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4553 } /* ixgbe_check_fan_failure */
4554