xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
143 int ixgbe_intr(void *arg);
144 
145 /************************************************************************
146  * Function prototypes
147  ************************************************************************/
148 #if __FreeBSD_version >= 1100036
149 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
150 #endif
151 
152 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
154 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
155 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
156 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
157 
158 static void ixgbe_config_dmac(struct adapter *adapter);
159 static void ixgbe_configure_ivars(struct adapter *adapter);
160 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
161                            s8 type);
162 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static bool ixgbe_sfp_probe(if_ctx_t ctx);
164 
165 static void ixgbe_free_pci_resources(if_ctx_t ctx);
166 
167 static int  ixgbe_msix_link(void *arg);
168 static int  ixgbe_msix_que(void *arg);
169 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
170 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
171 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
172 
173 static int  ixgbe_setup_interface(if_ctx_t ctx);
174 static void ixgbe_init_device_features(struct adapter *adapter);
175 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
176 static void ixgbe_add_media_types(if_ctx_t ctx);
177 static void ixgbe_update_stats_counters(struct adapter *adapter);
178 static void ixgbe_config_link(if_ctx_t ctx);
179 static void ixgbe_get_slot_info(struct adapter *);
180 static void ixgbe_check_wol_support(struct adapter *adapter);
181 static void ixgbe_enable_rx_drop(struct adapter *);
182 static void ixgbe_disable_rx_drop(struct adapter *);
183 
184 static void ixgbe_add_hw_stats(struct adapter *adapter);
185 static int  ixgbe_set_flowcntl(struct adapter *, int);
186 static int  ixgbe_set_advertise(struct adapter *, int);
187 static int  ixgbe_get_advertise(struct adapter *);
188 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
189 static void ixgbe_config_gpie(struct adapter *adapter);
190 static void ixgbe_config_delay_values(struct adapter *adapter);
191 
192 /* Sysctl handlers */
193 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
198 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
199 #ifdef IXGBE_DEBUG
200 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
201 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
202 #endif
203 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
209 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
210 
211 /* Deferred interrupt tasklets */
212 static void ixgbe_handle_msf(void *);
213 static void ixgbe_handle_mod(void *);
214 static void ixgbe_handle_phy(void *);
215 
216 /************************************************************************
217  *  FreeBSD Device Interface Entry Points
218  ************************************************************************/
219 static device_method_t ix_methods[] = {
220 	/* Device interface */
221 	DEVMETHOD(device_register, ixgbe_register),
222 	DEVMETHOD(device_probe, iflib_device_probe),
223 	DEVMETHOD(device_attach, iflib_device_attach),
224 	DEVMETHOD(device_detach, iflib_device_detach),
225 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
226 	DEVMETHOD(device_suspend, iflib_device_suspend),
227 	DEVMETHOD(device_resume, iflib_device_resume),
228 #ifdef PCI_IOV
229 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
230 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
231 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
232 #endif /* PCI_IOV */
233 	DEVMETHOD_END
234 };
235 
236 static driver_t ix_driver = {
237 	"ix", ix_methods, sizeof(struct adapter),
238 };
239 
240 devclass_t ix_devclass;
241 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
242 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
243 MODULE_DEPEND(ix, pci, 1, 1, 1);
244 MODULE_DEPEND(ix, ether, 1, 1, 1);
245 MODULE_DEPEND(ix, iflib, 1, 1, 1);
246 
247 static device_method_t ixgbe_if_methods[] = {
248 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
249 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
250 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
251 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
252 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
253 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
254 	DEVMETHOD(ifdi_init, ixgbe_if_init),
255 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
256 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
257 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
258 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
259 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
260 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
262 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
263 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
264 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
265 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
266 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
267 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
268 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
269 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
270 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
271 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
272 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
273 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
274 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
275 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
276 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
277 	DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
278 #ifdef PCI_IOV
279 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
280 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
281 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
282 #endif /* PCI_IOV */
283 	DEVMETHOD_END
284 };
285 
286 /*
287  * TUNEABLE PARAMETERS:
288  */
289 
290 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
291     "IXGBE driver parameters");
292 static driver_t ixgbe_if_driver = {
293   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
294 };
295 
296 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
297 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
298     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
299 
300 /* Flow control setting, default to full */
301 static int ixgbe_flow_control = ixgbe_fc_full;
302 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
303     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
304 
305 /* Advertise Speed, default to 0 (auto) */
306 static int ixgbe_advertise_speed = 0;
307 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
308     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
309 
310 /*
311  * Smart speed setting, default to on
312  * this only works as a compile option
313  * right now as its during attach, set
314  * this to 'ixgbe_smart_speed_off' to
315  * disable.
316  */
317 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
318 
319 /*
320  * MSI-X should be the default for best performance,
321  * but this allows it to be forced off for testing.
322  */
323 static int ixgbe_enable_msix = 1;
324 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
325     "Enable MSI-X interrupts");
326 
327 /*
328  * Defining this on will allow the use
329  * of unsupported SFP+ modules, note that
330  * doing so you are on your own :)
331  */
332 static int allow_unsupported_sfp = FALSE;
333 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
334     &allow_unsupported_sfp, 0,
335     "Allow unsupported SFP modules...use at your own risk");
336 
337 /*
338  * Not sure if Flow Director is fully baked,
339  * so we'll default to turning it off.
340  */
341 static int ixgbe_enable_fdir = 0;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
343     "Enable Flow Director");
344 
345 /* Receive-Side Scaling */
346 static int ixgbe_enable_rss = 1;
347 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
348     "Enable Receive-Side Scaling (RSS)");
349 
350 #if 0
351 /* Keep running tab on them for sanity check */
352 static int ixgbe_total_ports;
353 #endif
354 
355 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
356 
357 /*
358  * For Flow Director: this is the number of TX packets we sample
359  * for the filter pool, this means every 20th packet will be probed.
360  *
361  * This feature can be disabled by setting this to 0.
362  */
363 static int atr_sample_rate = 20;
364 
365 extern struct if_txrx ixgbe_txrx;
366 
367 static struct if_shared_ctx ixgbe_sctx_init = {
368 	.isc_magic = IFLIB_MAGIC,
369 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
370 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
371 	.isc_tx_maxsegsize = PAGE_SIZE,
372 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
373 	.isc_tso_maxsegsize = PAGE_SIZE,
374 	.isc_rx_maxsize = PAGE_SIZE*4,
375 	.isc_rx_nsegments = 1,
376 	.isc_rx_maxsegsize = PAGE_SIZE*4,
377 	.isc_nfl = 1,
378 	.isc_ntxqs = 1,
379 	.isc_nrxqs = 1,
380 
381 	.isc_admin_intrcnt = 1,
382 	.isc_vendor_info = ixgbe_vendor_info_array,
383 	.isc_driver_version = ixgbe_driver_version,
384 	.isc_driver = &ixgbe_if_driver,
385 	.isc_flags = IFLIB_TSO_INIT_IP,
386 
387 	.isc_nrxd_min = {MIN_RXD},
388 	.isc_ntxd_min = {MIN_TXD},
389 	.isc_nrxd_max = {MAX_RXD},
390 	.isc_ntxd_max = {MAX_TXD},
391 	.isc_nrxd_default = {DEFAULT_RXD},
392 	.isc_ntxd_default = {DEFAULT_TXD},
393 };
394 
395 /************************************************************************
396  * ixgbe_if_tx_queues_alloc
397  ************************************************************************/
398 static int
399 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
400                          int ntxqs, int ntxqsets)
401 {
402 	struct adapter     *adapter = iflib_get_softc(ctx);
403 	if_softc_ctx_t     scctx = adapter->shared;
404 	struct ix_tx_queue *que;
405 	int                i, j, error;
406 
407 	MPASS(adapter->num_tx_queues > 0);
408 	MPASS(adapter->num_tx_queues == ntxqsets);
409 	MPASS(ntxqs == 1);
410 
411 	/* Allocate queue structure memory */
412 	adapter->tx_queues =
413 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
414 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
415 	if (!adapter->tx_queues) {
416 		device_printf(iflib_get_dev(ctx),
417 		    "Unable to allocate TX ring memory\n");
418 		return (ENOMEM);
419 	}
420 
421 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
422 		struct tx_ring *txr = &que->txr;
423 
424 		/* In case SR-IOV is enabled, align the index properly */
425 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
426 		    i);
427 
428 		txr->adapter = que->adapter = adapter;
429 
430 		/* Allocate report status array */
431 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
432 		if (txr->tx_rsq == NULL) {
433 			error = ENOMEM;
434 			goto fail;
435 		}
436 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
437 			txr->tx_rsq[j] = QIDX_INVALID;
438 		/* get the virtual and physical address of the hardware queues */
439 		txr->tail = IXGBE_TDT(txr->me);
440 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
441 		txr->tx_paddr = paddrs[i];
442 
443 		txr->bytes = 0;
444 		txr->total_packets = 0;
445 
446 		/* Set the rate at which we sample packets */
447 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
448 			txr->atr_sample = atr_sample_rate;
449 
450 	}
451 
452 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
453 	    adapter->num_tx_queues);
454 
455 	return (0);
456 
457 fail:
458 	ixgbe_if_queues_free(ctx);
459 
460 	return (error);
461 } /* ixgbe_if_tx_queues_alloc */
462 
463 /************************************************************************
464  * ixgbe_if_rx_queues_alloc
465  ************************************************************************/
466 static int
467 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
468                          int nrxqs, int nrxqsets)
469 {
470 	struct adapter     *adapter = iflib_get_softc(ctx);
471 	struct ix_rx_queue *que;
472 	int                i;
473 
474 	MPASS(adapter->num_rx_queues > 0);
475 	MPASS(adapter->num_rx_queues == nrxqsets);
476 	MPASS(nrxqs == 1);
477 
478 	/* Allocate queue structure memory */
479 	adapter->rx_queues =
480 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
481 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
482 	if (!adapter->rx_queues) {
483 		device_printf(iflib_get_dev(ctx),
484 		    "Unable to allocate TX ring memory\n");
485 		return (ENOMEM);
486 	}
487 
488 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
489 		struct rx_ring *rxr = &que->rxr;
490 
491 		/* In case SR-IOV is enabled, align the index properly */
492 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
493 		    i);
494 
495 		rxr->adapter = que->adapter = adapter;
496 
497 		/* get the virtual and physical address of the hw queues */
498 		rxr->tail = IXGBE_RDT(rxr->me);
499 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
500 		rxr->rx_paddr = paddrs[i];
501 		rxr->bytes = 0;
502 		rxr->que = que;
503 	}
504 
505 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
506 	    adapter->num_rx_queues);
507 
508 	return (0);
509 } /* ixgbe_if_rx_queues_alloc */
510 
511 /************************************************************************
512  * ixgbe_if_queues_free
513  ************************************************************************/
514 static void
515 ixgbe_if_queues_free(if_ctx_t ctx)
516 {
517 	struct adapter     *adapter = iflib_get_softc(ctx);
518 	struct ix_tx_queue *tx_que = adapter->tx_queues;
519 	struct ix_rx_queue *rx_que = adapter->rx_queues;
520 	int                i;
521 
522 	if (tx_que != NULL) {
523 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
524 			struct tx_ring *txr = &tx_que->txr;
525 			if (txr->tx_rsq == NULL)
526 				break;
527 
528 			free(txr->tx_rsq, M_IXGBE);
529 			txr->tx_rsq = NULL;
530 		}
531 
532 		free(adapter->tx_queues, M_IXGBE);
533 		adapter->tx_queues = NULL;
534 	}
535 	if (rx_que != NULL) {
536 		free(adapter->rx_queues, M_IXGBE);
537 		adapter->rx_queues = NULL;
538 	}
539 } /* ixgbe_if_queues_free */
540 
541 /************************************************************************
542  * ixgbe_initialize_rss_mapping
543  ************************************************************************/
544 static void
545 ixgbe_initialize_rss_mapping(struct adapter *adapter)
546 {
547 	struct ixgbe_hw *hw = &adapter->hw;
548 	u32             reta = 0, mrqc, rss_key[10];
549 	int             queue_id, table_size, index_mult;
550 	int             i, j;
551 	u32             rss_hash_config;
552 
553 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
554 		/* Fetch the configured RSS key */
555 		rss_getkey((uint8_t *)&rss_key);
556 	} else {
557 		/* set up random bits */
558 		arc4rand(&rss_key, sizeof(rss_key), 0);
559 	}
560 
561 	/* Set multiplier for RETA setup and table size based on MAC */
562 	index_mult = 0x1;
563 	table_size = 128;
564 	switch (adapter->hw.mac.type) {
565 	case ixgbe_mac_82598EB:
566 		index_mult = 0x11;
567 		break;
568 	case ixgbe_mac_X550:
569 	case ixgbe_mac_X550EM_x:
570 	case ixgbe_mac_X550EM_a:
571 		table_size = 512;
572 		break;
573 	default:
574 		break;
575 	}
576 
577 	/* Set up the redirection table */
578 	for (i = 0, j = 0; i < table_size; i++, j++) {
579 		if (j == adapter->num_rx_queues)
580 			j = 0;
581 
582 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
583 			/*
584 			 * Fetch the RSS bucket id for the given indirection
585 			 * entry. Cap it at the number of configured buckets
586 			 * (which is num_rx_queues.)
587 			 */
588 			queue_id = rss_get_indirection_to_bucket(i);
589 			queue_id = queue_id % adapter->num_rx_queues;
590 		} else
591 			queue_id = (j * index_mult);
592 
593 		/*
594 		 * The low 8 bits are for hash value (n+0);
595 		 * The next 8 bits are for hash value (n+1), etc.
596 		 */
597 		reta = reta >> 8;
598 		reta = reta | (((uint32_t)queue_id) << 24);
599 		if ((i & 3) == 3) {
600 			if (i < 128)
601 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
602 			else
603 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
604 				    reta);
605 			reta = 0;
606 		}
607 	}
608 
609 	/* Now fill our hash function seeds */
610 	for (i = 0; i < 10; i++)
611 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
612 
613 	/* Perform hash on these packet types */
614 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
615 		rss_hash_config = rss_gethashconfig();
616 	else {
617 		/*
618 		 * Disable UDP - IP fragments aren't currently being handled
619 		 * and so we end up with a mix of 2-tuple and 4-tuple
620 		 * traffic.
621 		 */
622 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
623 		                | RSS_HASHTYPE_RSS_TCP_IPV4
624 		                | RSS_HASHTYPE_RSS_IPV6
625 		                | RSS_HASHTYPE_RSS_TCP_IPV6
626 		                | RSS_HASHTYPE_RSS_IPV6_EX
627 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
628 	}
629 
630 	mrqc = IXGBE_MRQC_RSSEN;
631 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
632 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
633 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
634 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
635 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
636 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
637 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
638 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
639 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
640 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
649 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
650 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
651 } /* ixgbe_initialize_rss_mapping */
652 
653 /************************************************************************
654  * ixgbe_initialize_receive_units - Setup receive registers and features.
655  ************************************************************************/
656 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
657 
658 static void
659 ixgbe_initialize_receive_units(if_ctx_t ctx)
660 {
661 	struct adapter     *adapter = iflib_get_softc(ctx);
662 	if_softc_ctx_t     scctx = adapter->shared;
663 	struct ixgbe_hw    *hw = &adapter->hw;
664 	struct ifnet       *ifp = iflib_get_ifp(ctx);
665 	struct ix_rx_queue *que;
666 	int                i, j;
667 	u32                bufsz, fctrl, srrctl, rxcsum;
668 	u32                hlreg;
669 
670 	/*
671 	 * Make sure receives are disabled while
672 	 * setting up the descriptor ring
673 	 */
674 	ixgbe_disable_rx(hw);
675 
676 	/* Enable broadcasts */
677 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
678 	fctrl |= IXGBE_FCTRL_BAM;
679 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
680 		fctrl |= IXGBE_FCTRL_DPF;
681 		fctrl |= IXGBE_FCTRL_PMCF;
682 	}
683 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
684 
685 	/* Set for Jumbo Frames? */
686 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
687 	if (ifp->if_mtu > ETHERMTU)
688 		hlreg |= IXGBE_HLREG0_JUMBOEN;
689 	else
690 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
691 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
692 
693 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
694 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
695 
696 	/* Setup the Base and Length of the Rx Descriptor Ring */
697 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
698 		struct rx_ring *rxr = &que->rxr;
699 		u64            rdba = rxr->rx_paddr;
700 
701 		j = rxr->me;
702 
703 		/* Setup the Base and Length of the Rx Descriptor Ring */
704 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
705 		    (rdba & 0x00000000ffffffffULL));
706 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
707 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
708 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
709 
710 		/* Set up the SRRCTL register */
711 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
712 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
713 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
714 		srrctl |= bufsz;
715 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
716 
717 		/*
718 		 * Set DROP_EN iff we have no flow control and >1 queue.
719 		 * Note that srrctl was cleared shortly before during reset,
720 		 * so we do not need to clear the bit, but do it just in case
721 		 * this code is moved elsewhere.
722 		 */
723 		if (adapter->num_rx_queues > 1 &&
724 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
725 			srrctl |= IXGBE_SRRCTL_DROP_EN;
726 		} else {
727 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
728 		}
729 
730 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
731 
732 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
733 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
734 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
735 
736 		/* Set the driver rx tail address */
737 		rxr->tail =  IXGBE_RDT(rxr->me);
738 	}
739 
740 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
741 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
742 		            | IXGBE_PSRTYPE_UDPHDR
743 		            | IXGBE_PSRTYPE_IPV4HDR
744 		            | IXGBE_PSRTYPE_IPV6HDR;
745 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
746 	}
747 
748 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
749 
750 	ixgbe_initialize_rss_mapping(adapter);
751 
752 	if (adapter->num_rx_queues > 1) {
753 		/* RSS and RX IPP Checksum are mutually exclusive */
754 		rxcsum |= IXGBE_RXCSUM_PCSD;
755 	}
756 
757 	if (ifp->if_capenable & IFCAP_RXCSUM)
758 		rxcsum |= IXGBE_RXCSUM_PCSD;
759 
760 	/* This is useful for calculating UDP/IP fragment checksums */
761 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
762 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
763 
764 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
765 
766 } /* ixgbe_initialize_receive_units */
767 
768 /************************************************************************
769  * ixgbe_initialize_transmit_units - Enable transmit units.
770  ************************************************************************/
771 static void
772 ixgbe_initialize_transmit_units(if_ctx_t ctx)
773 {
774 	struct adapter     *adapter = iflib_get_softc(ctx);
775 	struct ixgbe_hw    *hw = &adapter->hw;
776 	if_softc_ctx_t     scctx = adapter->shared;
777 	struct ix_tx_queue *que;
778 	int i;
779 
780 	/* Setup the Base and Length of the Tx Descriptor Ring */
781 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
782 	    i++, que++) {
783 		struct tx_ring	   *txr = &que->txr;
784 		u64 tdba = txr->tx_paddr;
785 		u32 txctrl = 0;
786 		int j = txr->me;
787 
788 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
789 		    (tdba & 0x00000000ffffffffULL));
790 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
791 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
792 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
793 
794 		/* Setup the HW Tx Head and Tail descriptor pointers */
795 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
796 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
797 
798 		/* Cache the tail address */
799 		txr->tail = IXGBE_TDT(txr->me);
800 
801 		txr->tx_rs_cidx = txr->tx_rs_pidx;
802 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
803 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
804 			txr->tx_rsq[k] = QIDX_INVALID;
805 
806 		/* Disable Head Writeback */
807 		/*
808 		 * Note: for X550 series devices, these registers are actually
809 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
810 		 * fields remain the same.
811 		 */
812 		switch (hw->mac.type) {
813 		case ixgbe_mac_82598EB:
814 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
815 			break;
816 		default:
817 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
818 			break;
819 		}
820 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
821 		switch (hw->mac.type) {
822 		case ixgbe_mac_82598EB:
823 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
824 			break;
825 		default:
826 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
827 			break;
828 		}
829 
830 	}
831 
832 	if (hw->mac.type != ixgbe_mac_82598EB) {
833 		u32 dmatxctl, rttdcs;
834 
835 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
836 		dmatxctl |= IXGBE_DMATXCTL_TE;
837 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
838 		/* Disable arbiter to set MTQC */
839 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
840 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
841 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
842 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
843 		    ixgbe_get_mtqc(adapter->iov_mode));
844 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
845 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
846 	}
847 
848 } /* ixgbe_initialize_transmit_units */
849 
850 /************************************************************************
851  * ixgbe_register
852  ************************************************************************/
853 static void *
854 ixgbe_register(device_t dev)
855 {
856 	return (&ixgbe_sctx_init);
857 } /* ixgbe_register */
858 
859 /************************************************************************
860  * ixgbe_if_attach_pre - Device initialization routine, part 1
861  *
862  *   Called when the driver is being loaded.
863  *   Identifies the type of hardware, initializes the hardware,
864  *   and initializes iflib structures.
865  *
866  *   return 0 on success, positive on failure
867  ************************************************************************/
868 static int
869 ixgbe_if_attach_pre(if_ctx_t ctx)
870 {
871 	struct adapter  *adapter;
872 	device_t        dev;
873 	if_softc_ctx_t  scctx;
874 	struct ixgbe_hw *hw;
875 	int             error = 0;
876 	u32             ctrl_ext;
877 
878 	INIT_DEBUGOUT("ixgbe_attach: begin");
879 
880 	/* Allocate, clear, and link in our adapter structure */
881 	dev = iflib_get_dev(ctx);
882 	adapter = iflib_get_softc(ctx);
883 	adapter->hw.back = adapter;
884 	adapter->ctx = ctx;
885 	adapter->dev = dev;
886 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
887 	adapter->media = iflib_get_media(ctx);
888 	hw = &adapter->hw;
889 
890 	/* Determine hardware revision */
891 	hw->vendor_id = pci_get_vendor(dev);
892 	hw->device_id = pci_get_device(dev);
893 	hw->revision_id = pci_get_revid(dev);
894 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
895 	hw->subsystem_device_id = pci_get_subdevice(dev);
896 
897 	/* Do base PCI setup - map BAR0 */
898 	if (ixgbe_allocate_pci_resources(ctx)) {
899 		device_printf(dev, "Allocation of PCI resources failed\n");
900 		return (ENXIO);
901 	}
902 
903 	/* let hardware know driver is loaded */
904 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
905 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
906 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
907 
908 	/*
909 	 * Initialize the shared code
910 	 */
911 	if (ixgbe_init_shared_code(hw) != 0) {
912 		device_printf(dev, "Unable to initialize the shared code\n");
913 		error = ENXIO;
914 		goto err_pci;
915 	}
916 
917 	if (hw->mbx.ops.init_params)
918 		hw->mbx.ops.init_params(hw);
919 
920 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
921 
922 	if (hw->mac.type != ixgbe_mac_82598EB)
923 		hw->phy.smart_speed = ixgbe_smart_speed;
924 
925 	ixgbe_init_device_features(adapter);
926 
927 	/* Enable WoL (if supported) */
928 	ixgbe_check_wol_support(adapter);
929 
930 	/* Verify adapter fan is still functional (if applicable) */
931 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
932 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
933 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
934 	}
935 
936 	/* Ensure SW/FW semaphore is free */
937 	ixgbe_init_swfw_semaphore(hw);
938 
939 	/* Set an initial default flow control value */
940 	hw->fc.requested_mode = ixgbe_flow_control;
941 
942 	hw->phy.reset_if_overtemp = TRUE;
943 	error = ixgbe_reset_hw(hw);
944 	hw->phy.reset_if_overtemp = FALSE;
945 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
946 		/*
947 		 * No optics in this port, set up
948 		 * so the timer routine will probe
949 		 * for later insertion.
950 		 */
951 		adapter->sfp_probe = TRUE;
952 		error = 0;
953 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
954 		device_printf(dev, "Unsupported SFP+ module detected!\n");
955 		error = EIO;
956 		goto err_pci;
957 	} else if (error) {
958 		device_printf(dev, "Hardware initialization failed\n");
959 		error = EIO;
960 		goto err_pci;
961 	}
962 
963 	/* Make sure we have a good EEPROM before we read from it */
964 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
965 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
966 		error = EIO;
967 		goto err_pci;
968 	}
969 
970 	error = ixgbe_start_hw(hw);
971 	switch (error) {
972 	case IXGBE_ERR_EEPROM_VERSION:
973 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
974 		break;
975 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
976 		device_printf(dev, "Unsupported SFP+ Module\n");
977 		error = EIO;
978 		goto err_pci;
979 	case IXGBE_ERR_SFP_NOT_PRESENT:
980 		device_printf(dev, "No SFP+ Module found\n");
981 		/* falls thru */
982 	default:
983 		break;
984 	}
985 
986 	/* Most of the iflib initialization... */
987 
988 	iflib_set_mac(ctx, hw->mac.addr);
989 	switch (adapter->hw.mac.type) {
990 	case ixgbe_mac_X550:
991 	case ixgbe_mac_X550EM_x:
992 	case ixgbe_mac_X550EM_a:
993 		scctx->isc_rss_table_size = 512;
994 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
995 		break;
996 	default:
997 		scctx->isc_rss_table_size = 128;
998 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
999 	}
1000 
1001 	/* Allow legacy interrupts */
1002 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1003 
1004 	scctx->isc_txqsizes[0] =
1005 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1006 	    sizeof(u32), DBA_ALIGN),
1007 	scctx->isc_rxqsizes[0] =
1008 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1009 	    DBA_ALIGN);
1010 
1011 	/* XXX */
1012 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1013 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1014 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1015 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1016 	} else {
1017 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1018 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1019 	}
1020 
1021 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1022 
1023 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1024 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1025 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1026 
1027 	scctx->isc_txrx = &ixgbe_txrx;
1028 
1029 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1030 
1031 	return (0);
1032 
1033 err_pci:
1034 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1035 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1036 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1037 	ixgbe_free_pci_resources(ctx);
1038 
1039 	return (error);
1040 } /* ixgbe_if_attach_pre */
1041 
1042  /*********************************************************************
1043  * ixgbe_if_attach_post - Device initialization routine, part 2
1044  *
1045  *   Called during driver load, but after interrupts and
1046  *   resources have been allocated and configured.
1047  *   Sets up some data structures not relevant to iflib.
1048  *
1049  *   return 0 on success, positive on failure
1050  *********************************************************************/
1051 static int
1052 ixgbe_if_attach_post(if_ctx_t ctx)
1053 {
1054 	device_t dev;
1055 	struct adapter  *adapter;
1056 	struct ixgbe_hw *hw;
1057 	int             error = 0;
1058 
1059 	dev = iflib_get_dev(ctx);
1060 	adapter = iflib_get_softc(ctx);
1061 	hw = &adapter->hw;
1062 
1063 
1064 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1065 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1066 		device_printf(dev, "Device does not support legacy interrupts");
1067 		error = ENXIO;
1068 		goto err;
1069 	}
1070 
1071 	/* Allocate multicast array memory. */
1072 	adapter->mta = malloc(sizeof(*adapter->mta) *
1073 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1074 	if (adapter->mta == NULL) {
1075 		device_printf(dev, "Can not allocate multicast setup array\n");
1076 		error = ENOMEM;
1077 		goto err;
1078 	}
1079 
1080 	/* hw.ix defaults init */
1081 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1082 
1083 	/* Enable the optics for 82599 SFP+ fiber */
1084 	ixgbe_enable_tx_laser(hw);
1085 
1086 	/* Enable power to the phy. */
1087 	ixgbe_set_phy_power(hw, TRUE);
1088 
1089 	ixgbe_initialize_iov(adapter);
1090 
1091 	error = ixgbe_setup_interface(ctx);
1092 	if (error) {
1093 		device_printf(dev, "Interface setup failed: %d\n", error);
1094 		goto err;
1095 	}
1096 
1097 	ixgbe_if_update_admin_status(ctx);
1098 
1099 	/* Initialize statistics */
1100 	ixgbe_update_stats_counters(adapter);
1101 	ixgbe_add_hw_stats(adapter);
1102 
1103 	/* Check PCIE slot type/speed/width */
1104 	ixgbe_get_slot_info(adapter);
1105 
1106 	/*
1107 	 * Do time init and sysctl init here, but
1108 	 * only on the first port of a bypass adapter.
1109 	 */
1110 	ixgbe_bypass_init(adapter);
1111 
1112 	/* Set an initial dmac value */
1113 	adapter->dmac = 0;
1114 	/* Set initial advertised speeds (if applicable) */
1115 	adapter->advertise = ixgbe_get_advertise(adapter);
1116 
1117 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1118 		ixgbe_define_iov_schemas(dev, &error);
1119 
1120 	/* Add sysctls */
1121 	ixgbe_add_device_sysctls(ctx);
1122 
1123 	return (0);
1124 err:
1125 	return (error);
1126 } /* ixgbe_if_attach_post */
1127 
1128 /************************************************************************
1129  * ixgbe_check_wol_support
1130  *
1131  *   Checks whether the adapter's ports are capable of
1132  *   Wake On LAN by reading the adapter's NVM.
1133  *
1134  *   Sets each port's hw->wol_enabled value depending
1135  *   on the value read here.
1136  ************************************************************************/
1137 static void
1138 ixgbe_check_wol_support(struct adapter *adapter)
1139 {
1140 	struct ixgbe_hw *hw = &adapter->hw;
1141 	u16             dev_caps = 0;
1142 
1143 	/* Find out WoL support for port */
1144 	adapter->wol_support = hw->wol_enabled = 0;
1145 	ixgbe_get_device_caps(hw, &dev_caps);
1146 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1147 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1148 	     hw->bus.func == 0))
1149 		adapter->wol_support = hw->wol_enabled = 1;
1150 
1151 	/* Save initial wake up filter configuration */
1152 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1153 
1154 	return;
1155 } /* ixgbe_check_wol_support */
1156 
1157 /************************************************************************
1158  * ixgbe_setup_interface
1159  *
1160  *   Setup networking device structure and register an interface.
1161  ************************************************************************/
1162 static int
1163 ixgbe_setup_interface(if_ctx_t ctx)
1164 {
1165 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1166 	struct adapter *adapter = iflib_get_softc(ctx);
1167 
1168 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1169 
1170 	if_setbaudrate(ifp, IF_Gbps(10));
1171 
1172 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1173 
1174 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1175 
1176 	ixgbe_add_media_types(ctx);
1177 
1178 	/* Autoselect media by default */
1179 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1180 
1181 	return (0);
1182 } /* ixgbe_setup_interface */
1183 
1184 /************************************************************************
1185  * ixgbe_if_get_counter
1186  ************************************************************************/
1187 static uint64_t
1188 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1189 {
1190 	struct adapter *adapter = iflib_get_softc(ctx);
1191 	if_t           ifp = iflib_get_ifp(ctx);
1192 
1193 	switch (cnt) {
1194 	case IFCOUNTER_IPACKETS:
1195 		return (adapter->ipackets);
1196 	case IFCOUNTER_OPACKETS:
1197 		return (adapter->opackets);
1198 	case IFCOUNTER_IBYTES:
1199 		return (adapter->ibytes);
1200 	case IFCOUNTER_OBYTES:
1201 		return (adapter->obytes);
1202 	case IFCOUNTER_IMCASTS:
1203 		return (adapter->imcasts);
1204 	case IFCOUNTER_OMCASTS:
1205 		return (adapter->omcasts);
1206 	case IFCOUNTER_COLLISIONS:
1207 		return (0);
1208 	case IFCOUNTER_IQDROPS:
1209 		return (adapter->iqdrops);
1210 	case IFCOUNTER_OQDROPS:
1211 		return (0);
1212 	case IFCOUNTER_IERRORS:
1213 		return (adapter->ierrors);
1214 	default:
1215 		return (if_get_counter_default(ifp, cnt));
1216 	}
1217 } /* ixgbe_if_get_counter */
1218 
1219 /************************************************************************
1220  * ixgbe_if_i2c_req
1221  ************************************************************************/
1222 static int
1223 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1224 {
1225 	struct adapter		*adapter = iflib_get_softc(ctx);
1226 	struct ixgbe_hw 	*hw = &adapter->hw;
1227 	int 			i;
1228 
1229 
1230 	if (hw->phy.ops.read_i2c_byte == NULL)
1231 		return (ENXIO);
1232 	for (i = 0; i < req->len; i++)
1233 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1234 		    req->dev_addr, &req->data[i]);
1235 	return (0);
1236 } /* ixgbe_if_i2c_req */
1237 
1238 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1239  * @ctx: iflib context
1240  * @event: event code to check
1241  *
1242  * Defaults to returning true for unknown events.
1243  *
1244  * @returns true if iflib needs to reinit the interface
1245  */
1246 static bool
1247 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1248 {
1249 	switch (event) {
1250 	case IFLIB_RESTART_VLAN_CONFIG:
1251 		return (false);
1252 	default:
1253 		return (true);
1254 	}
1255 }
1256 
1257 /************************************************************************
1258  * ixgbe_add_media_types
1259  ************************************************************************/
1260 static void
1261 ixgbe_add_media_types(if_ctx_t ctx)
1262 {
1263 	struct adapter  *adapter = iflib_get_softc(ctx);
1264 	struct ixgbe_hw *hw = &adapter->hw;
1265 	device_t        dev = iflib_get_dev(ctx);
1266 	u64             layer;
1267 
1268 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1269 
1270 	/* Media types with matching FreeBSD media defines */
1271 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1272 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1273 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1274 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1275 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1276 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1277 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1278 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1279 
1280 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1281 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1282 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1283 		    NULL);
1284 
1285 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1286 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1287 		if (hw->phy.multispeed_fiber)
1288 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1289 			    NULL);
1290 	}
1291 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1292 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1293 		if (hw->phy.multispeed_fiber)
1294 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1295 			    NULL);
1296 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1297 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1298 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1299 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1300 
1301 #ifdef IFM_ETH_XTYPE
1302 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1303 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1304 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1305 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1306 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1307 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1308 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1309 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1310 #else
1311 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1312 		device_printf(dev, "Media supported: 10GbaseKR\n");
1313 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1314 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1315 	}
1316 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1317 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1318 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1319 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1320 	}
1321 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1322 		device_printf(dev, "Media supported: 1000baseKX\n");
1323 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1324 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1325 	}
1326 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1327 		device_printf(dev, "Media supported: 2500baseKX\n");
1328 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1329 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1330 	}
1331 #endif
1332 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1333 		device_printf(dev, "Media supported: 1000baseBX\n");
1334 
1335 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1336 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1337 		    0, NULL);
1338 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1339 	}
1340 
1341 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1342 } /* ixgbe_add_media_types */
1343 
1344 /************************************************************************
1345  * ixgbe_is_sfp
1346  ************************************************************************/
1347 static inline bool
1348 ixgbe_is_sfp(struct ixgbe_hw *hw)
1349 {
1350 	switch (hw->mac.type) {
1351 	case ixgbe_mac_82598EB:
1352 		if (hw->phy.type == ixgbe_phy_nl)
1353 			return (TRUE);
1354 		return (FALSE);
1355 	case ixgbe_mac_82599EB:
1356 		switch (hw->mac.ops.get_media_type(hw)) {
1357 		case ixgbe_media_type_fiber:
1358 		case ixgbe_media_type_fiber_qsfp:
1359 			return (TRUE);
1360 		default:
1361 			return (FALSE);
1362 		}
1363 	case ixgbe_mac_X550EM_x:
1364 	case ixgbe_mac_X550EM_a:
1365 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1366 			return (TRUE);
1367 		return (FALSE);
1368 	default:
1369 		return (FALSE);
1370 	}
1371 } /* ixgbe_is_sfp */
1372 
1373 /************************************************************************
1374  * ixgbe_config_link
1375  ************************************************************************/
1376 static void
1377 ixgbe_config_link(if_ctx_t ctx)
1378 {
1379 	struct adapter  *adapter = iflib_get_softc(ctx);
1380 	struct ixgbe_hw *hw = &adapter->hw;
1381 	u32             autoneg, err = 0;
1382 	bool            sfp, negotiate;
1383 
1384 	sfp = ixgbe_is_sfp(hw);
1385 
1386 	if (sfp) {
1387 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1388 		iflib_admin_intr_deferred(ctx);
1389 	} else {
1390 		if (hw->mac.ops.check_link)
1391 			err = ixgbe_check_link(hw, &adapter->link_speed,
1392 			    &adapter->link_up, FALSE);
1393 		if (err)
1394 			return;
1395 		autoneg = hw->phy.autoneg_advertised;
1396 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1397 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1398 			    &negotiate);
1399 		if (err)
1400 			return;
1401 		if (hw->mac.ops.setup_link)
1402 			err = hw->mac.ops.setup_link(hw, autoneg,
1403 			    adapter->link_up);
1404 	}
1405 } /* ixgbe_config_link */
1406 
1407 /************************************************************************
1408  * ixgbe_update_stats_counters - Update board statistics counters.
1409  ************************************************************************/
1410 static void
1411 ixgbe_update_stats_counters(struct adapter *adapter)
1412 {
1413 	struct ixgbe_hw       *hw = &adapter->hw;
1414 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1415 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1416 	u32                   lxoffrxc;
1417 	u64                   total_missed_rx = 0;
1418 
1419 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1420 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1421 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1422 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1423 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1424 
1425 	for (int i = 0; i < 16; i++) {
1426 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1427 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1428 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1429 	}
1430 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1431 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1432 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1433 
1434 	/* Hardware workaround, gprc counts missed packets */
1435 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1436 	stats->gprc -= missed_rx;
1437 
1438 	if (hw->mac.type != ixgbe_mac_82598EB) {
1439 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1440 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1441 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1442 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1443 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1444 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1445 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1446 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1447 		stats->lxoffrxc += lxoffrxc;
1448 	} else {
1449 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1450 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1451 		stats->lxoffrxc += lxoffrxc;
1452 		/* 82598 only has a counter in the high register */
1453 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1454 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1455 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1456 	}
1457 
1458 	/*
1459 	 * For watchdog management we need to know if we have been paused
1460 	 * during the last interval, so capture that here.
1461 	*/
1462 	if (lxoffrxc)
1463 		adapter->shared->isc_pause_frames = 1;
1464 
1465 	/*
1466 	 * Workaround: mprc hardware is incorrectly counting
1467 	 * broadcasts, so for now we subtract those.
1468 	 */
1469 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1470 	stats->bprc += bprc;
1471 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1472 	if (hw->mac.type == ixgbe_mac_82598EB)
1473 		stats->mprc -= bprc;
1474 
1475 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1476 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1477 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1478 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1479 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1480 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1481 
1482 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1483 	stats->lxontxc += lxon;
1484 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1485 	stats->lxofftxc += lxoff;
1486 	total = lxon + lxoff;
1487 
1488 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1489 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1490 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1491 	stats->gptc -= total;
1492 	stats->mptc -= total;
1493 	stats->ptc64 -= total;
1494 	stats->gotc -= total * ETHER_MIN_LEN;
1495 
1496 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1497 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1498 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1499 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1500 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1501 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1502 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1503 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1504 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1505 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1506 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1507 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1508 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1509 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1510 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1511 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1512 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1513 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1514 	/* Only read FCOE on 82599 */
1515 	if (hw->mac.type != ixgbe_mac_82598EB) {
1516 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1517 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1518 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1519 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1520 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1521 	}
1522 
1523 	/* Fill out the OS statistics structure */
1524 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1525 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1526 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1527 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1528 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1529 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1530 	IXGBE_SET_COLLISIONS(adapter, 0);
1531 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1532 
1533 	/*
1534 	 * Aggregate following types of errors as RX errors:
1535 	 * - CRC error count,
1536 	 * - illegal byte error count,
1537 	 * - checksum error count,
1538 	 * - missed packets count,
1539 	 * - length error count,
1540 	 * - undersized packets count,
1541 	 * - fragmented packets count,
1542 	 * - oversized packets count,
1543 	 * - jabber count.
1544 	 */
1545 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->illerrc + stats->xec +
1546 	    stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1547 	    stats->rjc);
1548 } /* ixgbe_update_stats_counters */
1549 
1550 /************************************************************************
1551  * ixgbe_add_hw_stats
1552  *
1553  *   Add sysctl variables, one per statistic, to the system.
1554  ************************************************************************/
1555 static void
1556 ixgbe_add_hw_stats(struct adapter *adapter)
1557 {
1558 	device_t               dev = iflib_get_dev(adapter->ctx);
1559 	struct ix_rx_queue     *rx_que;
1560 	struct ix_tx_queue     *tx_que;
1561 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1562 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1563 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1564 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1565 	struct sysctl_oid      *stat_node, *queue_node;
1566 	struct sysctl_oid_list *stat_list, *queue_list;
1567 	int                    i;
1568 
1569 #define QUEUE_NAME_LEN 32
1570 	char                   namebuf[QUEUE_NAME_LEN];
1571 
1572 	/* Driver Statistics */
1573 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1574 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1575 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1576 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1577 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1578 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1579 
1580 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1581 		struct tx_ring *txr = &tx_que->txr;
1582 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1583 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1584 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1585 		queue_list = SYSCTL_CHILDREN(queue_node);
1586 
1587 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1588 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1589 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1590 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1591 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1592 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1593 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1594 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1595 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1596 		    CTLFLAG_RD, &txr->total_packets,
1597 		    "Queue Packets Transmitted");
1598 	}
1599 
1600 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1601 		struct rx_ring *rxr = &rx_que->rxr;
1602 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1603 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1604 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1605 		queue_list = SYSCTL_CHILDREN(queue_node);
1606 
1607 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1608 		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1609 		    &adapter->rx_queues[i], 0,
1610 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1611 		    "Interrupt Rate");
1612 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1613 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1614 		    "irqs on this queue");
1615 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1616 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1617 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1618 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1619 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1620 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1621 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1622 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1623 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1624 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1625 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1626 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1627 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1628 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1629 	}
1630 
1631 	/* MAC stats get their own sub node */
1632 
1633 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1634 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1635 	stat_list = SYSCTL_CHILDREN(stat_node);
1636 
1637 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1638 	    CTLFLAG_RD, &adapter->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1639 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1640 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1641 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1642 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1643 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1644 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1645 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1646 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1648 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1650 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1652 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1654 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1655 
1656 	/* Flow Control stats */
1657 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1658 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1660 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1662 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1664 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1665 
1666 	/* Packet Reception Stats */
1667 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1668 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1670 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1672 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1674 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1676 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1678 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1680 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1681 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1682 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1683 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1684 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1685 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1686 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1687 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1688 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1689 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1690 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1691 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1692 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1693 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1694 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1695 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1696 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1697 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1698 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1699 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1700 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1701 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1702 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1703 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1704 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1705 
1706 	/* Packet Transmission Stats */
1707 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1708 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1709 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1710 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1711 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1712 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1713 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1714 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1715 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1716 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1717 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1718 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1719 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1720 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1721 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1722 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1723 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1724 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1725 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1726 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1727 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1728 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1729 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1730 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1731 } /* ixgbe_add_hw_stats */
1732 
1733 /************************************************************************
1734  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1735  *
1736  *   Retrieves the TDH value from the hardware
1737  ************************************************************************/
1738 static int
1739 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1740 {
1741 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1742 	int            error;
1743 	unsigned int   val;
1744 
1745 	if (!txr)
1746 		return (0);
1747 
1748 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1749 	error = sysctl_handle_int(oidp, &val, 0, req);
1750 	if (error || !req->newptr)
1751 		return error;
1752 
1753 	return (0);
1754 } /* ixgbe_sysctl_tdh_handler */
1755 
1756 /************************************************************************
1757  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1758  *
1759  *   Retrieves the TDT value from the hardware
1760  ************************************************************************/
1761 static int
1762 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1763 {
1764 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1765 	int            error;
1766 	unsigned int   val;
1767 
1768 	if (!txr)
1769 		return (0);
1770 
1771 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1772 	error = sysctl_handle_int(oidp, &val, 0, req);
1773 	if (error || !req->newptr)
1774 		return error;
1775 
1776 	return (0);
1777 } /* ixgbe_sysctl_tdt_handler */
1778 
1779 /************************************************************************
1780  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1781  *
1782  *   Retrieves the RDH value from the hardware
1783  ************************************************************************/
1784 static int
1785 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1786 {
1787 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1788 	int            error;
1789 	unsigned int   val;
1790 
1791 	if (!rxr)
1792 		return (0);
1793 
1794 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1795 	error = sysctl_handle_int(oidp, &val, 0, req);
1796 	if (error || !req->newptr)
1797 		return error;
1798 
1799 	return (0);
1800 } /* ixgbe_sysctl_rdh_handler */
1801 
1802 /************************************************************************
1803  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1804  *
1805  *   Retrieves the RDT value from the hardware
1806  ************************************************************************/
1807 static int
1808 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1809 {
1810 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1811 	int            error;
1812 	unsigned int   val;
1813 
1814 	if (!rxr)
1815 		return (0);
1816 
1817 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1818 	error = sysctl_handle_int(oidp, &val, 0, req);
1819 	if (error || !req->newptr)
1820 		return error;
1821 
1822 	return (0);
1823 } /* ixgbe_sysctl_rdt_handler */
1824 
1825 /************************************************************************
1826  * ixgbe_if_vlan_register
1827  *
1828  *   Run via vlan config EVENT, it enables us to use the
1829  *   HW Filter table since we can get the vlan id. This
1830  *   just creates the entry in the soft version of the
1831  *   VFTA, init will repopulate the real table.
1832  ************************************************************************/
1833 static void
1834 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1835 {
1836 	struct adapter *adapter = iflib_get_softc(ctx);
1837 	u16            index, bit;
1838 
1839 	index = (vtag >> 5) & 0x7F;
1840 	bit = vtag & 0x1F;
1841 	adapter->shadow_vfta[index] |= (1 << bit);
1842 	++adapter->num_vlans;
1843 	ixgbe_setup_vlan_hw_support(ctx);
1844 } /* ixgbe_if_vlan_register */
1845 
1846 /************************************************************************
1847  * ixgbe_if_vlan_unregister
1848  *
1849  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1850  ************************************************************************/
1851 static void
1852 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1853 {
1854 	struct adapter *adapter = iflib_get_softc(ctx);
1855 	u16            index, bit;
1856 
1857 	index = (vtag >> 5) & 0x7F;
1858 	bit = vtag & 0x1F;
1859 	adapter->shadow_vfta[index] &= ~(1 << bit);
1860 	--adapter->num_vlans;
1861 	/* Re-init to load the changes */
1862 	ixgbe_setup_vlan_hw_support(ctx);
1863 } /* ixgbe_if_vlan_unregister */
1864 
1865 /************************************************************************
1866  * ixgbe_setup_vlan_hw_support
1867  ************************************************************************/
1868 static void
1869 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1870 {
1871 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1872 	struct adapter  *adapter = iflib_get_softc(ctx);
1873 	struct ixgbe_hw *hw = &adapter->hw;
1874 	struct rx_ring  *rxr;
1875 	int             i;
1876 	u32             ctrl;
1877 
1878 
1879 	/*
1880 	 * We get here thru init_locked, meaning
1881 	 * a soft reset, this has already cleared
1882 	 * the VFTA and other state, so if there
1883 	 * have been no vlan's registered do nothing.
1884 	 */
1885 	if (adapter->num_vlans == 0)
1886 		return;
1887 
1888 	/* Setup the queues for vlans */
1889 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1890 		for (i = 0; i < adapter->num_rx_queues; i++) {
1891 			rxr = &adapter->rx_queues[i].rxr;
1892 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1893 			if (hw->mac.type != ixgbe_mac_82598EB) {
1894 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1895 				ctrl |= IXGBE_RXDCTL_VME;
1896 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1897 			}
1898 			rxr->vtag_strip = TRUE;
1899 		}
1900 	}
1901 
1902 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1903 		return;
1904 	/*
1905 	 * A soft reset zero's out the VFTA, so
1906 	 * we need to repopulate it now.
1907 	 */
1908 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1909 		if (adapter->shadow_vfta[i] != 0)
1910 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1911 			    adapter->shadow_vfta[i]);
1912 
1913 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1914 	/* Enable the Filter Table if enabled */
1915 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1916 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1917 		ctrl |= IXGBE_VLNCTRL_VFE;
1918 	}
1919 	if (hw->mac.type == ixgbe_mac_82598EB)
1920 		ctrl |= IXGBE_VLNCTRL_VME;
1921 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1922 } /* ixgbe_setup_vlan_hw_support */
1923 
1924 /************************************************************************
1925  * ixgbe_get_slot_info
1926  *
1927  *   Get the width and transaction speed of
1928  *   the slot this adapter is plugged into.
1929  ************************************************************************/
1930 static void
1931 ixgbe_get_slot_info(struct adapter *adapter)
1932 {
1933 	device_t        dev = iflib_get_dev(adapter->ctx);
1934 	struct ixgbe_hw *hw = &adapter->hw;
1935 	int             bus_info_valid = TRUE;
1936 	u32             offset;
1937 	u16             link;
1938 
1939 	/* Some devices are behind an internal bridge */
1940 	switch (hw->device_id) {
1941 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1942 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1943 		goto get_parent_info;
1944 	default:
1945 		break;
1946 	}
1947 
1948 	ixgbe_get_bus_info(hw);
1949 
1950 	/*
1951 	 * Some devices don't use PCI-E, but there is no need
1952 	 * to display "Unknown" for bus speed and width.
1953 	 */
1954 	switch (hw->mac.type) {
1955 	case ixgbe_mac_X550EM_x:
1956 	case ixgbe_mac_X550EM_a:
1957 		return;
1958 	default:
1959 		goto display;
1960 	}
1961 
1962 get_parent_info:
1963 	/*
1964 	 * For the Quad port adapter we need to parse back
1965 	 * up the PCI tree to find the speed of the expansion
1966 	 * slot into which this adapter is plugged. A bit more work.
1967 	 */
1968 	dev = device_get_parent(device_get_parent(dev));
1969 #ifdef IXGBE_DEBUG
1970 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1971 	    pci_get_slot(dev), pci_get_function(dev));
1972 #endif
1973 	dev = device_get_parent(device_get_parent(dev));
1974 #ifdef IXGBE_DEBUG
1975 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1976 	    pci_get_slot(dev), pci_get_function(dev));
1977 #endif
1978 	/* Now get the PCI Express Capabilities offset */
1979 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1980 		/*
1981 		 * Hmm...can't get PCI-Express capabilities.
1982 		 * Falling back to default method.
1983 		 */
1984 		bus_info_valid = FALSE;
1985 		ixgbe_get_bus_info(hw);
1986 		goto display;
1987 	}
1988 	/* ...and read the Link Status Register */
1989 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1990 	ixgbe_set_pci_config_data_generic(hw, link);
1991 
1992 display:
1993 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1994 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1995 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1996 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1997 	     "Unknown"),
1998 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1999 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2000 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2001 	     "Unknown"));
2002 
2003 	if (bus_info_valid) {
2004 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2005 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2006 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
2007 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2008 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2009 		}
2010 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2011 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2012 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
2013 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2014 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2015 		}
2016 	} else
2017 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2018 
2019 	return;
2020 } /* ixgbe_get_slot_info */
2021 
2022 /************************************************************************
2023  * ixgbe_if_msix_intr_assign
2024  *
2025  *   Setup MSI-X Interrupt resources and handlers
2026  ************************************************************************/
2027 static int
2028 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2029 {
2030 	struct adapter     *adapter = iflib_get_softc(ctx);
2031 	struct ix_rx_queue *rx_que = adapter->rx_queues;
2032 	struct ix_tx_queue *tx_que;
2033 	int                error, rid, vector = 0;
2034 	int                cpu_id = 0;
2035 	char               buf[16];
2036 
2037 	/* Admin Que is vector 0*/
2038 	rid = vector + 1;
2039 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2040 		rid = vector + 1;
2041 
2042 		snprintf(buf, sizeof(buf), "rxq%d", i);
2043 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2044 		    IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2045 
2046 		if (error) {
2047 			device_printf(iflib_get_dev(ctx),
2048 			    "Failed to allocate que int %d err: %d", i, error);
2049 			adapter->num_rx_queues = i + 1;
2050 			goto fail;
2051 		}
2052 
2053 		rx_que->msix = vector;
2054 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2055 			/*
2056 			 * The queue ID is used as the RSS layer bucket ID.
2057 			 * We look up the queue ID -> RSS CPU ID and select
2058 			 * that.
2059 			 */
2060 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2061 		} else {
2062 			/*
2063 			 * Bind the MSI-X vector, and thus the
2064 			 * rings to the corresponding cpu.
2065 			 *
2066 			 * This just happens to match the default RSS
2067 			 * round-robin bucket -> queue -> CPU allocation.
2068 			 */
2069 			if (adapter->num_rx_queues > 1)
2070 				cpu_id = i;
2071 		}
2072 
2073 	}
2074 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2075 		snprintf(buf, sizeof(buf), "txq%d", i);
2076 		tx_que = &adapter->tx_queues[i];
2077 		tx_que->msix = i % adapter->num_rx_queues;
2078 		iflib_softirq_alloc_generic(ctx,
2079 		    &adapter->rx_queues[tx_que->msix].que_irq,
2080 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2081 	}
2082 	rid = vector + 1;
2083 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2084 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2085 	if (error) {
2086 		device_printf(iflib_get_dev(ctx),
2087 		    "Failed to register admin handler");
2088 		return (error);
2089 	}
2090 
2091 	adapter->vector = vector;
2092 
2093 	return (0);
2094 fail:
2095 	iflib_irq_free(ctx, &adapter->irq);
2096 	rx_que = adapter->rx_queues;
2097 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2098 		iflib_irq_free(ctx, &rx_que->que_irq);
2099 
2100 	return (error);
2101 } /* ixgbe_if_msix_intr_assign */
2102 
2103 /*********************************************************************
2104  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2105  **********************************************************************/
2106 static int
2107 ixgbe_msix_que(void *arg)
2108 {
2109 	struct ix_rx_queue *que = arg;
2110 	struct adapter     *adapter = que->adapter;
2111 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2112 
2113 	/* Protect against spurious interrupts */
2114 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2115 		return (FILTER_HANDLED);
2116 
2117 	ixgbe_disable_queue(adapter, que->msix);
2118 	++que->irqs;
2119 
2120 	return (FILTER_SCHEDULE_THREAD);
2121 } /* ixgbe_msix_que */
2122 
2123 /************************************************************************
2124  * ixgbe_media_status - Media Ioctl callback
2125  *
2126  *   Called whenever the user queries the status of
2127  *   the interface using ifconfig.
2128  ************************************************************************/
2129 static void
2130 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2131 {
2132 	struct adapter  *adapter = iflib_get_softc(ctx);
2133 	struct ixgbe_hw *hw = &adapter->hw;
2134 	int             layer;
2135 
2136 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2137 
2138 	ifmr->ifm_status = IFM_AVALID;
2139 	ifmr->ifm_active = IFM_ETHER;
2140 
2141 	if (!adapter->link_active)
2142 		return;
2143 
2144 	ifmr->ifm_status |= IFM_ACTIVE;
2145 	layer = adapter->phy_layer;
2146 
2147 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2148 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2149 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2150 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2151 		switch (adapter->link_speed) {
2152 		case IXGBE_LINK_SPEED_10GB_FULL:
2153 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2154 			break;
2155 		case IXGBE_LINK_SPEED_1GB_FULL:
2156 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2157 			break;
2158 		case IXGBE_LINK_SPEED_100_FULL:
2159 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2160 			break;
2161 		case IXGBE_LINK_SPEED_10_FULL:
2162 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2163 			break;
2164 		}
2165 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2166 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2167 		switch (adapter->link_speed) {
2168 		case IXGBE_LINK_SPEED_10GB_FULL:
2169 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2170 			break;
2171 		}
2172 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2173 		switch (adapter->link_speed) {
2174 		case IXGBE_LINK_SPEED_10GB_FULL:
2175 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2176 			break;
2177 		case IXGBE_LINK_SPEED_1GB_FULL:
2178 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2179 			break;
2180 		}
2181 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2182 		switch (adapter->link_speed) {
2183 		case IXGBE_LINK_SPEED_10GB_FULL:
2184 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2185 			break;
2186 		case IXGBE_LINK_SPEED_1GB_FULL:
2187 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2188 			break;
2189 		}
2190 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2191 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2192 		switch (adapter->link_speed) {
2193 		case IXGBE_LINK_SPEED_10GB_FULL:
2194 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2195 			break;
2196 		case IXGBE_LINK_SPEED_1GB_FULL:
2197 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2198 			break;
2199 		}
2200 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2201 		switch (adapter->link_speed) {
2202 		case IXGBE_LINK_SPEED_10GB_FULL:
2203 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2204 			break;
2205 		}
2206 	/*
2207 	 * XXX: These need to use the proper media types once
2208 	 * they're added.
2209 	 */
2210 #ifndef IFM_ETH_XTYPE
2211 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2212 		switch (adapter->link_speed) {
2213 		case IXGBE_LINK_SPEED_10GB_FULL:
2214 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2215 			break;
2216 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2217 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2218 			break;
2219 		case IXGBE_LINK_SPEED_1GB_FULL:
2220 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2221 			break;
2222 		}
2223 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2224 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2225 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2226 		switch (adapter->link_speed) {
2227 		case IXGBE_LINK_SPEED_10GB_FULL:
2228 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2229 			break;
2230 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2231 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2232 			break;
2233 		case IXGBE_LINK_SPEED_1GB_FULL:
2234 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2235 			break;
2236 		}
2237 #else
2238 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2239 		switch (adapter->link_speed) {
2240 		case IXGBE_LINK_SPEED_10GB_FULL:
2241 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2242 			break;
2243 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2244 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2245 			break;
2246 		case IXGBE_LINK_SPEED_1GB_FULL:
2247 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2248 			break;
2249 		}
2250 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2251 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2252 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2253 		switch (adapter->link_speed) {
2254 		case IXGBE_LINK_SPEED_10GB_FULL:
2255 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2256 			break;
2257 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2258 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2259 			break;
2260 		case IXGBE_LINK_SPEED_1GB_FULL:
2261 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2262 			break;
2263 		}
2264 #endif
2265 
2266 	/* If nothing is recognized... */
2267 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2268 		ifmr->ifm_active |= IFM_UNKNOWN;
2269 
2270 	/* Display current flow control setting used on link */
2271 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2272 	    hw->fc.current_mode == ixgbe_fc_full)
2273 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2274 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2275 	    hw->fc.current_mode == ixgbe_fc_full)
2276 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2277 } /* ixgbe_media_status */
2278 
2279 /************************************************************************
2280  * ixgbe_media_change - Media Ioctl callback
2281  *
2282  *   Called when the user changes speed/duplex using
2283  *   media/mediopt option with ifconfig.
2284  ************************************************************************/
2285 static int
2286 ixgbe_if_media_change(if_ctx_t ctx)
2287 {
2288 	struct adapter   *adapter = iflib_get_softc(ctx);
2289 	struct ifmedia   *ifm = iflib_get_media(ctx);
2290 	struct ixgbe_hw  *hw = &adapter->hw;
2291 	ixgbe_link_speed speed = 0;
2292 
2293 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2294 
2295 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2296 		return (EINVAL);
2297 
2298 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2299 		return (EPERM);
2300 
2301 	/*
2302 	 * We don't actually need to check against the supported
2303 	 * media types of the adapter; ifmedia will take care of
2304 	 * that for us.
2305 	 */
2306 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2307 	case IFM_AUTO:
2308 	case IFM_10G_T:
2309 		speed |= IXGBE_LINK_SPEED_100_FULL;
2310 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2311 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2312 		break;
2313 	case IFM_10G_LRM:
2314 	case IFM_10G_LR:
2315 #ifndef IFM_ETH_XTYPE
2316 	case IFM_10G_SR: /* KR, too */
2317 	case IFM_10G_CX4: /* KX4 */
2318 #else
2319 	case IFM_10G_KR:
2320 	case IFM_10G_KX4:
2321 #endif
2322 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2323 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2324 		break;
2325 #ifndef IFM_ETH_XTYPE
2326 	case IFM_1000_CX: /* KX */
2327 #else
2328 	case IFM_1000_KX:
2329 #endif
2330 	case IFM_1000_LX:
2331 	case IFM_1000_SX:
2332 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2333 		break;
2334 	case IFM_1000_T:
2335 		speed |= IXGBE_LINK_SPEED_100_FULL;
2336 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2337 		break;
2338 	case IFM_10G_TWINAX:
2339 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2340 		break;
2341 	case IFM_100_TX:
2342 		speed |= IXGBE_LINK_SPEED_100_FULL;
2343 		break;
2344 	case IFM_10_T:
2345 		speed |= IXGBE_LINK_SPEED_10_FULL;
2346 		break;
2347 	default:
2348 		goto invalid;
2349 	}
2350 
2351 	hw->mac.autotry_restart = TRUE;
2352 	hw->mac.ops.setup_link(hw, speed, TRUE);
2353 	adapter->advertise =
2354 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2355 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2356 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2357 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2358 
2359 	return (0);
2360 
2361 invalid:
2362 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2363 
2364 	return (EINVAL);
2365 } /* ixgbe_if_media_change */
2366 
2367 /************************************************************************
2368  * ixgbe_set_promisc
2369  ************************************************************************/
2370 static int
2371 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2372 {
2373 	struct adapter *adapter = iflib_get_softc(ctx);
2374 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2375 	u32            rctl;
2376 	int            mcnt = 0;
2377 
2378 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2379 	rctl &= (~IXGBE_FCTRL_UPE);
2380 	if (ifp->if_flags & IFF_ALLMULTI)
2381 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2382 	else {
2383 		mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2384 	}
2385 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2386 		rctl &= (~IXGBE_FCTRL_MPE);
2387 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2388 
2389 	if (ifp->if_flags & IFF_PROMISC) {
2390 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2391 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2392 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2393 		rctl |= IXGBE_FCTRL_MPE;
2394 		rctl &= ~IXGBE_FCTRL_UPE;
2395 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2396 	}
2397 	return (0);
2398 } /* ixgbe_if_promisc_set */
2399 
2400 /************************************************************************
2401  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2402  ************************************************************************/
2403 static int
2404 ixgbe_msix_link(void *arg)
2405 {
2406 	struct adapter  *adapter = arg;
2407 	struct ixgbe_hw *hw = &adapter->hw;
2408 	u32             eicr, eicr_mask;
2409 	s32             retval;
2410 
2411 	++adapter->link_irq;
2412 
2413 	/* Pause other interrupts */
2414 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2415 
2416 	/* First get the cause */
2417 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2418 	/* Be sure the queue bits are not cleared */
2419 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2420 	/* Clear interrupt with write */
2421 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2422 
2423 	/* Link status change */
2424 	if (eicr & IXGBE_EICR_LSC) {
2425 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2426 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2427 	}
2428 
2429 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2430 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2431 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2432 			/* This is probably overkill :) */
2433 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2434 				return (FILTER_HANDLED);
2435 			/* Disable the interrupt */
2436 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2437 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2438 		} else
2439 			if (eicr & IXGBE_EICR_ECC) {
2440 				device_printf(iflib_get_dev(adapter->ctx),
2441 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2442 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2443 			}
2444 
2445 		/* Check for over temp condition */
2446 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2447 			switch (adapter->hw.mac.type) {
2448 			case ixgbe_mac_X550EM_a:
2449 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2450 					break;
2451 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2452 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2453 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2454 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2455 				retval = hw->phy.ops.check_overtemp(hw);
2456 				if (retval != IXGBE_ERR_OVERTEMP)
2457 					break;
2458 				device_printf(iflib_get_dev(adapter->ctx),
2459 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2460 				device_printf(iflib_get_dev(adapter->ctx),
2461 				    "System shutdown required!\n");
2462 				break;
2463 			default:
2464 				if (!(eicr & IXGBE_EICR_TS))
2465 					break;
2466 				retval = hw->phy.ops.check_overtemp(hw);
2467 				if (retval != IXGBE_ERR_OVERTEMP)
2468 					break;
2469 				device_printf(iflib_get_dev(adapter->ctx),
2470 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2471 				device_printf(iflib_get_dev(adapter->ctx),
2472 				    "System shutdown required!\n");
2473 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2474 				break;
2475 			}
2476 		}
2477 
2478 		/* Check for VF message */
2479 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2480 		    (eicr & IXGBE_EICR_MAILBOX))
2481 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2482 	}
2483 
2484 	if (ixgbe_is_sfp(hw)) {
2485 		/* Pluggable optics-related interrupt */
2486 		if (hw->mac.type >= ixgbe_mac_X540)
2487 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2488 		else
2489 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2490 
2491 		if (eicr & eicr_mask) {
2492 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2493 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2494 		}
2495 
2496 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2497 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2498 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2499 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2500 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2501 		}
2502 	}
2503 
2504 	/* Check for fan failure */
2505 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2506 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2507 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2508 	}
2509 
2510 	/* External PHY interrupt */
2511 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2512 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2513 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2514 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2515 	}
2516 
2517 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2518 } /* ixgbe_msix_link */
2519 
2520 /************************************************************************
2521  * ixgbe_sysctl_interrupt_rate_handler
2522  ************************************************************************/
2523 static int
2524 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2525 {
2526 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2527 	int                error;
2528 	unsigned int       reg, usec, rate;
2529 
2530 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2531 	usec = ((reg & 0x0FF8) >> 3);
2532 	if (usec > 0)
2533 		rate = 500000 / usec;
2534 	else
2535 		rate = 0;
2536 	error = sysctl_handle_int(oidp, &rate, 0, req);
2537 	if (error || !req->newptr)
2538 		return error;
2539 	reg &= ~0xfff; /* default, no limitation */
2540 	ixgbe_max_interrupt_rate = 0;
2541 	if (rate > 0 && rate < 500000) {
2542 		if (rate < 1000)
2543 			rate = 1000;
2544 		ixgbe_max_interrupt_rate = rate;
2545 		reg |= ((4000000/rate) & 0xff8);
2546 	}
2547 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2548 
2549 	return (0);
2550 } /* ixgbe_sysctl_interrupt_rate_handler */
2551 
2552 /************************************************************************
2553  * ixgbe_add_device_sysctls
2554  ************************************************************************/
2555 static void
2556 ixgbe_add_device_sysctls(if_ctx_t ctx)
2557 {
2558 	struct adapter         *adapter = iflib_get_softc(ctx);
2559 	device_t               dev = iflib_get_dev(ctx);
2560 	struct ixgbe_hw        *hw = &adapter->hw;
2561 	struct sysctl_oid_list *child;
2562 	struct sysctl_ctx_list *ctx_list;
2563 
2564 	ctx_list = device_get_sysctl_ctx(dev);
2565 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2566 
2567 	/* Sysctls for all devices */
2568 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2569 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2570 	    adapter, 0, ixgbe_sysctl_flowcntl, "I",
2571 	    IXGBE_SYSCTL_DESC_SET_FC);
2572 
2573 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2574 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2575 	    adapter, 0, ixgbe_sysctl_advertise, "I",
2576 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2577 
2578 #ifdef IXGBE_DEBUG
2579 	/* testing sysctls (for all devices) */
2580 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2581 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2582 	    adapter, 0, ixgbe_sysctl_power_state,
2583 	    "I", "PCI Power State");
2584 
2585 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2586 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2587 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2588 #endif
2589 	/* for X550 series devices */
2590 	if (hw->mac.type >= ixgbe_mac_X550)
2591 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2592 		    CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2593 		    adapter, 0, ixgbe_sysctl_dmac,
2594 		    "I", "DMA Coalesce");
2595 
2596 	/* for WoL-capable devices */
2597 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2598 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2599 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2600 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2601 
2602 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2603 		    CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2604 		    adapter, 0, ixgbe_sysctl_wufc,
2605 		    "I", "Enable/Disable Wake Up Filters");
2606 	}
2607 
2608 	/* for X552/X557-AT devices */
2609 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2610 		struct sysctl_oid *phy_node;
2611 		struct sysctl_oid_list *phy_list;
2612 
2613 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2614 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2615 		phy_list = SYSCTL_CHILDREN(phy_node);
2616 
2617 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2618 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2619 		    adapter, 0, ixgbe_sysctl_phy_temp,
2620 		    "I", "Current External PHY Temperature (Celsius)");
2621 
2622 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2623 		    "overtemp_occurred",
2624 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2625 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2626 		    "External PHY High Temperature Event Occurred");
2627 	}
2628 
2629 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2630 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2631 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2632 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2633 	}
2634 } /* ixgbe_add_device_sysctls */
2635 
2636 /************************************************************************
2637  * ixgbe_allocate_pci_resources
2638  ************************************************************************/
2639 static int
2640 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2641 {
2642 	struct adapter *adapter = iflib_get_softc(ctx);
2643 	device_t        dev = iflib_get_dev(ctx);
2644 	int             rid;
2645 
2646 	rid = PCIR_BAR(0);
2647 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2648 	    RF_ACTIVE);
2649 
2650 	if (!(adapter->pci_mem)) {
2651 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2652 		return (ENXIO);
2653 	}
2654 
2655 	/* Save bus_space values for READ/WRITE_REG macros */
2656 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2657 	adapter->osdep.mem_bus_space_handle =
2658 	    rman_get_bushandle(adapter->pci_mem);
2659 	/* Set hw values for shared code */
2660 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2661 
2662 	return (0);
2663 } /* ixgbe_allocate_pci_resources */
2664 
2665 /************************************************************************
2666  * ixgbe_detach - Device removal routine
2667  *
2668  *   Called when the driver is being removed.
2669  *   Stops the adapter and deallocates all the resources
2670  *   that were allocated for driver operation.
2671  *
2672  *   return 0 on success, positive on failure
2673  ************************************************************************/
2674 static int
2675 ixgbe_if_detach(if_ctx_t ctx)
2676 {
2677 	struct adapter *adapter = iflib_get_softc(ctx);
2678 	device_t       dev = iflib_get_dev(ctx);
2679 	u32            ctrl_ext;
2680 
2681 	INIT_DEBUGOUT("ixgbe_detach: begin");
2682 
2683 	if (ixgbe_pci_iov_detach(dev) != 0) {
2684 		device_printf(dev, "SR-IOV in use; detach first.\n");
2685 		return (EBUSY);
2686 	}
2687 
2688 	ixgbe_setup_low_power_mode(ctx);
2689 
2690 	/* let hardware know driver is unloading */
2691 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2692 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2693 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2694 
2695 	ixgbe_free_pci_resources(ctx);
2696 	free(adapter->mta, M_IXGBE);
2697 
2698 	return (0);
2699 } /* ixgbe_if_detach */
2700 
2701 /************************************************************************
2702  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2703  *
2704  *   Prepare the adapter/port for LPLU and/or WoL
2705  ************************************************************************/
2706 static int
2707 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2708 {
2709 	struct adapter  *adapter = iflib_get_softc(ctx);
2710 	struct ixgbe_hw *hw = &adapter->hw;
2711 	device_t        dev = iflib_get_dev(ctx);
2712 	s32             error = 0;
2713 
2714 	if (!hw->wol_enabled)
2715 		ixgbe_set_phy_power(hw, FALSE);
2716 
2717 	/* Limit power management flow to X550EM baseT */
2718 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2719 	    hw->phy.ops.enter_lplu) {
2720 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2721 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2722 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2723 
2724 		/*
2725 		 * Clear Wake Up Status register to prevent any previous wakeup
2726 		 * events from waking us up immediately after we suspend.
2727 		 */
2728 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2729 
2730 		/*
2731 		 * Program the Wakeup Filter Control register with user filter
2732 		 * settings
2733 		 */
2734 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2735 
2736 		/* Enable wakeups and power management in Wakeup Control */
2737 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2738 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2739 
2740 		/* X550EM baseT adapters need a special LPLU flow */
2741 		hw->phy.reset_disable = TRUE;
2742 		ixgbe_if_stop(ctx);
2743 		error = hw->phy.ops.enter_lplu(hw);
2744 		if (error)
2745 			device_printf(dev, "Error entering LPLU: %d\n", error);
2746 		hw->phy.reset_disable = FALSE;
2747 	} else {
2748 		/* Just stop for other adapters */
2749 		ixgbe_if_stop(ctx);
2750 	}
2751 
2752 	return error;
2753 } /* ixgbe_setup_low_power_mode */
2754 
2755 /************************************************************************
2756  * ixgbe_shutdown - Shutdown entry point
2757  ************************************************************************/
2758 static int
2759 ixgbe_if_shutdown(if_ctx_t ctx)
2760 {
2761 	int error = 0;
2762 
2763 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2764 
2765 	error = ixgbe_setup_low_power_mode(ctx);
2766 
2767 	return (error);
2768 } /* ixgbe_if_shutdown */
2769 
2770 /************************************************************************
2771  * ixgbe_suspend
2772  *
2773  *   From D0 to D3
2774  ************************************************************************/
2775 static int
2776 ixgbe_if_suspend(if_ctx_t ctx)
2777 {
2778 	int error = 0;
2779 
2780 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2781 
2782 	error = ixgbe_setup_low_power_mode(ctx);
2783 
2784 	return (error);
2785 } /* ixgbe_if_suspend */
2786 
2787 /************************************************************************
2788  * ixgbe_resume
2789  *
2790  *   From D3 to D0
2791  ************************************************************************/
2792 static int
2793 ixgbe_if_resume(if_ctx_t ctx)
2794 {
2795 	struct adapter  *adapter = iflib_get_softc(ctx);
2796 	device_t        dev = iflib_get_dev(ctx);
2797 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2798 	struct ixgbe_hw *hw = &adapter->hw;
2799 	u32             wus;
2800 
2801 	INIT_DEBUGOUT("ixgbe_resume: begin");
2802 
2803 	/* Read & clear WUS register */
2804 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2805 	if (wus)
2806 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2807 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2808 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2809 	/* And clear WUFC until next low-power transition */
2810 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2811 
2812 	/*
2813 	 * Required after D3->D0 transition;
2814 	 * will re-advertise all previous advertised speeds
2815 	 */
2816 	if (ifp->if_flags & IFF_UP)
2817 		ixgbe_if_init(ctx);
2818 
2819 	return (0);
2820 } /* ixgbe_if_resume */
2821 
2822 /************************************************************************
2823  * ixgbe_if_mtu_set - Ioctl mtu entry point
2824  *
2825  *   Return 0 on success, EINVAL on failure
2826  ************************************************************************/
2827 static int
2828 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2829 {
2830 	struct adapter *adapter = iflib_get_softc(ctx);
2831 	int error = 0;
2832 
2833 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2834 
2835 	if (mtu > IXGBE_MAX_MTU) {
2836 		error = EINVAL;
2837 	} else {
2838 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2839 	}
2840 
2841 	return error;
2842 } /* ixgbe_if_mtu_set */
2843 
2844 /************************************************************************
2845  * ixgbe_if_crcstrip_set
2846  ************************************************************************/
2847 static void
2848 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2849 {
2850 	struct adapter *sc = iflib_get_softc(ctx);
2851 	struct ixgbe_hw *hw = &sc->hw;
2852 	/* crc stripping is set in two places:
2853 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2854 	 * IXGBE_RDRXCTL (set by the original driver in
2855 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2856 	 *	We disable the setting when netmap is compiled in).
2857 	 * We update the values here, but also in ixgbe.c because
2858 	 * init_locked sometimes is called outside our control.
2859 	 */
2860 	uint32_t hl, rxc;
2861 
2862 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2863 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2864 #ifdef NETMAP
2865 	if (netmap_verbose)
2866 		D("%s read  HLREG 0x%x rxc 0x%x",
2867 			onoff ? "enter" : "exit", hl, rxc);
2868 #endif
2869 	/* hw requirements ... */
2870 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2871 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2872 	if (onoff && !crcstrip) {
2873 		/* keep the crc. Fast rx */
2874 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2875 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2876 	} else {
2877 		/* reset default mode */
2878 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2879 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2880 	}
2881 #ifdef NETMAP
2882 	if (netmap_verbose)
2883 		D("%s write HLREG 0x%x rxc 0x%x",
2884 			onoff ? "enter" : "exit", hl, rxc);
2885 #endif
2886 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2887 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2888 } /* ixgbe_if_crcstrip_set */
2889 
2890 /*********************************************************************
2891  * ixgbe_if_init - Init entry point
2892  *
2893  *   Used in two ways: It is used by the stack as an init
2894  *   entry point in network interface structure. It is also
2895  *   used by the driver as a hw/sw initialization routine to
2896  *   get to a consistent state.
2897  *
2898  *   Return 0 on success, positive on failure
2899  **********************************************************************/
2900 void
2901 ixgbe_if_init(if_ctx_t ctx)
2902 {
2903 	struct adapter     *adapter = iflib_get_softc(ctx);
2904 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2905 	device_t           dev = iflib_get_dev(ctx);
2906 	struct ixgbe_hw *hw = &adapter->hw;
2907 	struct ix_rx_queue *rx_que;
2908 	struct ix_tx_queue *tx_que;
2909 	u32             txdctl, mhadd;
2910 	u32             rxdctl, rxctrl;
2911 	u32             ctrl_ext;
2912 
2913 	int             i, j, err;
2914 
2915 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2916 
2917 	/* Queue indices may change with IOV mode */
2918 	ixgbe_align_all_queue_indices(adapter);
2919 
2920 	/* reprogram the RAR[0] in case user changed it. */
2921 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2922 
2923 	/* Get the latest mac address, User can use a LAA */
2924 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2925 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2926 	hw->addr_ctrl.rar_used_count = 1;
2927 
2928 	ixgbe_init_hw(hw);
2929 
2930 	ixgbe_initialize_iov(adapter);
2931 
2932 	ixgbe_initialize_transmit_units(ctx);
2933 
2934 	/* Setup Multicast table */
2935 	ixgbe_if_multi_set(ctx);
2936 
2937 	/* Determine the correct mbuf pool, based on frame size */
2938 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2939 
2940 	/* Configure RX settings */
2941 	ixgbe_initialize_receive_units(ctx);
2942 
2943 	/*
2944 	 * Initialize variable holding task enqueue requests
2945 	 * from MSI-X interrupts
2946 	 */
2947 	adapter->task_requests = 0;
2948 
2949 	/* Enable SDP & MSI-X interrupts based on adapter */
2950 	ixgbe_config_gpie(adapter);
2951 
2952 	/* Set MTU size */
2953 	if (ifp->if_mtu > ETHERMTU) {
2954 		/* aka IXGBE_MAXFRS on 82599 and newer */
2955 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2956 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2957 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2958 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2959 	}
2960 
2961 	/* Now enable all the queues */
2962 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2963 		struct tx_ring *txr = &tx_que->txr;
2964 
2965 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2966 		txdctl |= IXGBE_TXDCTL_ENABLE;
2967 		/* Set WTHRESH to 8, burst writeback */
2968 		txdctl |= (8 << 16);
2969 		/*
2970 		 * When the internal queue falls below PTHRESH (32),
2971 		 * start prefetching as long as there are at least
2972 		 * HTHRESH (1) buffers ready. The values are taken
2973 		 * from the Intel linux driver 3.8.21.
2974 		 * Prefetching enables tx line rate even with 1 queue.
2975 		 */
2976 		txdctl |= (32 << 0) | (1 << 8);
2977 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2978 	}
2979 
2980 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2981 		struct rx_ring *rxr = &rx_que->rxr;
2982 
2983 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2984 		if (hw->mac.type == ixgbe_mac_82598EB) {
2985 			/*
2986 			 * PTHRESH = 21
2987 			 * HTHRESH = 4
2988 			 * WTHRESH = 8
2989 			 */
2990 			rxdctl &= ~0x3FFFFF;
2991 			rxdctl |= 0x080420;
2992 		}
2993 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2994 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2995 		for (j = 0; j < 10; j++) {
2996 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2997 			    IXGBE_RXDCTL_ENABLE)
2998 				break;
2999 			else
3000 				msec_delay(1);
3001 		}
3002 		wmb();
3003 	}
3004 
3005 	/* Enable Receive engine */
3006 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3007 	if (hw->mac.type == ixgbe_mac_82598EB)
3008 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3009 	rxctrl |= IXGBE_RXCTRL_RXEN;
3010 	ixgbe_enable_rx_dma(hw, rxctrl);
3011 
3012 	/* Set up MSI/MSI-X routing */
3013 	if (ixgbe_enable_msix)  {
3014 		ixgbe_configure_ivars(adapter);
3015 		/* Set up auto-mask */
3016 		if (hw->mac.type == ixgbe_mac_82598EB)
3017 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3018 		else {
3019 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3020 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3021 		}
3022 	} else {  /* Simple settings for Legacy/MSI */
3023 		ixgbe_set_ivar(adapter, 0, 0, 0);
3024 		ixgbe_set_ivar(adapter, 0, 0, 1);
3025 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3026 	}
3027 
3028 	ixgbe_init_fdir(adapter);
3029 
3030 	/*
3031 	 * Check on any SFP devices that
3032 	 * need to be kick-started
3033 	 */
3034 	if (hw->phy.type == ixgbe_phy_none) {
3035 		err = hw->phy.ops.identify(hw);
3036 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3037 			device_printf(dev,
3038 			    "Unsupported SFP+ module type was detected.\n");
3039 			return;
3040 		}
3041 	}
3042 
3043 	/* Set moderation on the Link interrupt */
3044 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3045 
3046 	/* Enable power to the phy. */
3047 	ixgbe_set_phy_power(hw, TRUE);
3048 
3049 	/* Config/Enable Link */
3050 	ixgbe_config_link(ctx);
3051 
3052 	/* Hardware Packet Buffer & Flow Control setup */
3053 	ixgbe_config_delay_values(adapter);
3054 
3055 	/* Initialize the FC settings */
3056 	ixgbe_start_hw(hw);
3057 
3058 	/* Set up VLAN support and filter */
3059 	ixgbe_setup_vlan_hw_support(ctx);
3060 
3061 	/* Setup DMA Coalescing */
3062 	ixgbe_config_dmac(adapter);
3063 
3064 	/* And now turn on interrupts */
3065 	ixgbe_if_enable_intr(ctx);
3066 
3067 	/* Enable the use of the MBX by the VF's */
3068 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3069 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3070 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3071 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3072 	}
3073 
3074 } /* ixgbe_init_locked */
3075 
3076 /************************************************************************
3077  * ixgbe_set_ivar
3078  *
3079  *   Setup the correct IVAR register for a particular MSI-X interrupt
3080  *     (yes this is all very magic and confusing :)
3081  *    - entry is the register array entry
3082  *    - vector is the MSI-X vector for this queue
3083  *    - type is RX/TX/MISC
3084  ************************************************************************/
3085 static void
3086 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3087 {
3088 	struct ixgbe_hw *hw = &adapter->hw;
3089 	u32 ivar, index;
3090 
3091 	vector |= IXGBE_IVAR_ALLOC_VAL;
3092 
3093 	switch (hw->mac.type) {
3094 	case ixgbe_mac_82598EB:
3095 		if (type == -1)
3096 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3097 		else
3098 			entry += (type * 64);
3099 		index = (entry >> 2) & 0x1F;
3100 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3101 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3102 		ivar |= (vector << (8 * (entry & 0x3)));
3103 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3104 		break;
3105 	case ixgbe_mac_82599EB:
3106 	case ixgbe_mac_X540:
3107 	case ixgbe_mac_X550:
3108 	case ixgbe_mac_X550EM_x:
3109 	case ixgbe_mac_X550EM_a:
3110 		if (type == -1) { /* MISC IVAR */
3111 			index = (entry & 1) * 8;
3112 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3113 			ivar &= ~(0xFF << index);
3114 			ivar |= (vector << index);
3115 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3116 		} else {          /* RX/TX IVARS */
3117 			index = (16 * (entry & 1)) + (8 * type);
3118 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3119 			ivar &= ~(0xFF << index);
3120 			ivar |= (vector << index);
3121 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3122 		}
3123 	default:
3124 		break;
3125 	}
3126 } /* ixgbe_set_ivar */
3127 
3128 /************************************************************************
3129  * ixgbe_configure_ivars
3130  ************************************************************************/
3131 static void
3132 ixgbe_configure_ivars(struct adapter *adapter)
3133 {
3134 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3135 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3136 	u32                newitr;
3137 
3138 	if (ixgbe_max_interrupt_rate > 0)
3139 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3140 	else {
3141 		/*
3142 		 * Disable DMA coalescing if interrupt moderation is
3143 		 * disabled.
3144 		 */
3145 		adapter->dmac = 0;
3146 		newitr = 0;
3147 	}
3148 
3149 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3150 		struct rx_ring *rxr = &rx_que->rxr;
3151 
3152 		/* First the RX queue entry */
3153 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3154 
3155 		/* Set an Initial EITR value */
3156 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3157 	}
3158 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3159 		struct tx_ring *txr = &tx_que->txr;
3160 
3161 		/* ... and the TX */
3162 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3163 	}
3164 	/* For the Link interrupt */
3165 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3166 } /* ixgbe_configure_ivars */
3167 
3168 /************************************************************************
3169  * ixgbe_config_gpie
3170  ************************************************************************/
3171 static void
3172 ixgbe_config_gpie(struct adapter *adapter)
3173 {
3174 	struct ixgbe_hw *hw = &adapter->hw;
3175 	u32             gpie;
3176 
3177 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3178 
3179 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3180 		/* Enable Enhanced MSI-X mode */
3181 		gpie |= IXGBE_GPIE_MSIX_MODE
3182 		     |  IXGBE_GPIE_EIAME
3183 		     |  IXGBE_GPIE_PBA_SUPPORT
3184 		     |  IXGBE_GPIE_OCD;
3185 	}
3186 
3187 	/* Fan Failure Interrupt */
3188 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3189 		gpie |= IXGBE_SDP1_GPIEN;
3190 
3191 	/* Thermal Sensor Interrupt */
3192 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3193 		gpie |= IXGBE_SDP0_GPIEN_X540;
3194 
3195 	/* Link detection */
3196 	switch (hw->mac.type) {
3197 	case ixgbe_mac_82599EB:
3198 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3199 		break;
3200 	case ixgbe_mac_X550EM_x:
3201 	case ixgbe_mac_X550EM_a:
3202 		gpie |= IXGBE_SDP0_GPIEN_X540;
3203 		break;
3204 	default:
3205 		break;
3206 	}
3207 
3208 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3209 
3210 } /* ixgbe_config_gpie */
3211 
3212 /************************************************************************
3213  * ixgbe_config_delay_values
3214  *
3215  *   Requires adapter->max_frame_size to be set.
3216  ************************************************************************/
3217 static void
3218 ixgbe_config_delay_values(struct adapter *adapter)
3219 {
3220 	struct ixgbe_hw *hw = &adapter->hw;
3221 	u32             rxpb, frame, size, tmp;
3222 
3223 	frame = adapter->max_frame_size;
3224 
3225 	/* Calculate High Water */
3226 	switch (hw->mac.type) {
3227 	case ixgbe_mac_X540:
3228 	case ixgbe_mac_X550:
3229 	case ixgbe_mac_X550EM_x:
3230 	case ixgbe_mac_X550EM_a:
3231 		tmp = IXGBE_DV_X540(frame, frame);
3232 		break;
3233 	default:
3234 		tmp = IXGBE_DV(frame, frame);
3235 		break;
3236 	}
3237 	size = IXGBE_BT2KB(tmp);
3238 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3239 	hw->fc.high_water[0] = rxpb - size;
3240 
3241 	/* Now calculate Low Water */
3242 	switch (hw->mac.type) {
3243 	case ixgbe_mac_X540:
3244 	case ixgbe_mac_X550:
3245 	case ixgbe_mac_X550EM_x:
3246 	case ixgbe_mac_X550EM_a:
3247 		tmp = IXGBE_LOW_DV_X540(frame);
3248 		break;
3249 	default:
3250 		tmp = IXGBE_LOW_DV(frame);
3251 		break;
3252 	}
3253 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3254 
3255 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3256 	hw->fc.send_xon = TRUE;
3257 } /* ixgbe_config_delay_values */
3258 
3259 /************************************************************************
3260  * ixgbe_set_multi - Multicast Update
3261  *
3262  *   Called whenever multicast address list is updated.
3263  ************************************************************************/
3264 static u_int
3265 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count)
3266 {
3267 	struct adapter *adapter = arg;
3268 	struct ixgbe_mc_addr *mta = adapter->mta;
3269 
3270 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3271 		return (0);
3272 	bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3273 	mta[count].vmdq = adapter->pool;
3274 
3275 	return (1);
3276 } /* ixgbe_mc_filter_apply */
3277 
3278 static void
3279 ixgbe_if_multi_set(if_ctx_t ctx)
3280 {
3281 	struct adapter       *adapter = iflib_get_softc(ctx);
3282 	struct ixgbe_mc_addr *mta;
3283 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3284 	u8                   *update_ptr;
3285 	u32                  fctrl;
3286 	u_int		     mcnt;
3287 
3288 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3289 
3290 	mta = adapter->mta;
3291 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3292 
3293 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3294 	    adapter);
3295 
3296 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3297 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3298 	if (ifp->if_flags & IFF_PROMISC)
3299 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3300 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3301 	    ifp->if_flags & IFF_ALLMULTI) {
3302 		fctrl |= IXGBE_FCTRL_MPE;
3303 		fctrl &= ~IXGBE_FCTRL_UPE;
3304 	} else
3305 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3306 
3307 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3308 
3309 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3310 		update_ptr = (u8 *)mta;
3311 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3312 		    ixgbe_mc_array_itr, TRUE);
3313 	}
3314 
3315 } /* ixgbe_if_multi_set */
3316 
3317 /************************************************************************
3318  * ixgbe_mc_array_itr
3319  *
3320  *   An iterator function needed by the multicast shared code.
3321  *   It feeds the shared code routine the addresses in the
3322  *   array of ixgbe_set_multi() one by one.
3323  ************************************************************************/
3324 static u8 *
3325 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3326 {
3327 	struct ixgbe_mc_addr *mta;
3328 
3329 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3330 	*vmdq = mta->vmdq;
3331 
3332 	*update_ptr = (u8*)(mta + 1);
3333 
3334 	return (mta->addr);
3335 } /* ixgbe_mc_array_itr */
3336 
3337 /************************************************************************
3338  * ixgbe_local_timer - Timer routine
3339  *
3340  *   Checks for link status, updates statistics,
3341  *   and runs the watchdog check.
3342  ************************************************************************/
3343 static void
3344 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3345 {
3346 	struct adapter *adapter = iflib_get_softc(ctx);
3347 
3348 	if (qid != 0)
3349 		return;
3350 
3351 	/* Check for pluggable optics */
3352 	if (adapter->sfp_probe)
3353 		if (!ixgbe_sfp_probe(ctx))
3354 			return; /* Nothing to do */
3355 
3356 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3357 	    &adapter->link_up, 0);
3358 
3359 	/* Fire off the adminq task */
3360 	iflib_admin_intr_deferred(ctx);
3361 
3362 } /* ixgbe_if_timer */
3363 
3364 /************************************************************************
3365  * ixgbe_sfp_probe
3366  *
3367  *   Determine if a port had optics inserted.
3368  ************************************************************************/
3369 static bool
3370 ixgbe_sfp_probe(if_ctx_t ctx)
3371 {
3372 	struct adapter  *adapter = iflib_get_softc(ctx);
3373 	struct ixgbe_hw *hw = &adapter->hw;
3374 	device_t        dev = iflib_get_dev(ctx);
3375 	bool            result = FALSE;
3376 
3377 	if ((hw->phy.type == ixgbe_phy_nl) &&
3378 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3379 		s32 ret = hw->phy.ops.identify_sfp(hw);
3380 		if (ret)
3381 			goto out;
3382 		ret = hw->phy.ops.reset(hw);
3383 		adapter->sfp_probe = FALSE;
3384 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3385 			device_printf(dev, "Unsupported SFP+ module detected!");
3386 			device_printf(dev,
3387 			    "Reload driver with supported module.\n");
3388 			goto out;
3389 		} else
3390 			device_printf(dev, "SFP+ module detected!\n");
3391 		/* We now have supported optics */
3392 		result = TRUE;
3393 	}
3394 out:
3395 
3396 	return (result);
3397 } /* ixgbe_sfp_probe */
3398 
3399 /************************************************************************
3400  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3401  ************************************************************************/
3402 static void
3403 ixgbe_handle_mod(void *context)
3404 {
3405 	if_ctx_t        ctx = context;
3406 	struct adapter  *adapter = iflib_get_softc(ctx);
3407 	struct ixgbe_hw *hw = &adapter->hw;
3408 	device_t        dev = iflib_get_dev(ctx);
3409 	u32             err, cage_full = 0;
3410 
3411 	if (adapter->hw.need_crosstalk_fix) {
3412 		switch (hw->mac.type) {
3413 		case ixgbe_mac_82599EB:
3414 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3415 			    IXGBE_ESDP_SDP2;
3416 			break;
3417 		case ixgbe_mac_X550EM_x:
3418 		case ixgbe_mac_X550EM_a:
3419 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3420 			    IXGBE_ESDP_SDP0;
3421 			break;
3422 		default:
3423 			break;
3424 		}
3425 
3426 		if (!cage_full)
3427 			goto handle_mod_out;
3428 	}
3429 
3430 	err = hw->phy.ops.identify_sfp(hw);
3431 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3432 		device_printf(dev,
3433 		    "Unsupported SFP+ module type was detected.\n");
3434 		goto handle_mod_out;
3435 	}
3436 
3437 	if (hw->mac.type == ixgbe_mac_82598EB)
3438 		err = hw->phy.ops.reset(hw);
3439 	else
3440 		err = hw->mac.ops.setup_sfp(hw);
3441 
3442 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3443 		device_printf(dev,
3444 		    "Setup failure - unsupported SFP+ module type.\n");
3445 		goto handle_mod_out;
3446 	}
3447 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3448 	return;
3449 
3450 handle_mod_out:
3451 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3452 } /* ixgbe_handle_mod */
3453 
3454 
3455 /************************************************************************
3456  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3457  ************************************************************************/
3458 static void
3459 ixgbe_handle_msf(void *context)
3460 {
3461 	if_ctx_t        ctx = context;
3462 	struct adapter  *adapter = iflib_get_softc(ctx);
3463 	struct ixgbe_hw *hw = &adapter->hw;
3464 	u32             autoneg;
3465 	bool            negotiate;
3466 
3467 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3468 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3469 
3470 	autoneg = hw->phy.autoneg_advertised;
3471 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3472 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3473 	if (hw->mac.ops.setup_link)
3474 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3475 
3476 	/* Adjust media types shown in ifconfig */
3477 	ifmedia_removeall(adapter->media);
3478 	ixgbe_add_media_types(adapter->ctx);
3479 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3480 } /* ixgbe_handle_msf */
3481 
3482 /************************************************************************
3483  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3484  ************************************************************************/
3485 static void
3486 ixgbe_handle_phy(void *context)
3487 {
3488 	if_ctx_t        ctx = context;
3489 	struct adapter  *adapter = iflib_get_softc(ctx);
3490 	struct ixgbe_hw *hw = &adapter->hw;
3491 	int             error;
3492 
3493 	error = hw->phy.ops.handle_lasi(hw);
3494 	if (error == IXGBE_ERR_OVERTEMP)
3495 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3496 	else if (error)
3497 		device_printf(adapter->dev,
3498 		    "Error handling LASI interrupt: %d\n", error);
3499 } /* ixgbe_handle_phy */
3500 
3501 /************************************************************************
3502  * ixgbe_if_stop - Stop the hardware
3503  *
3504  *   Disables all traffic on the adapter by issuing a
3505  *   global reset on the MAC and deallocates TX/RX buffers.
3506  ************************************************************************/
3507 static void
3508 ixgbe_if_stop(if_ctx_t ctx)
3509 {
3510 	struct adapter  *adapter = iflib_get_softc(ctx);
3511 	struct ixgbe_hw *hw = &adapter->hw;
3512 
3513 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3514 
3515 	ixgbe_reset_hw(hw);
3516 	hw->adapter_stopped = FALSE;
3517 	ixgbe_stop_adapter(hw);
3518 	if (hw->mac.type == ixgbe_mac_82599EB)
3519 		ixgbe_stop_mac_link_on_d3_82599(hw);
3520 	/* Turn off the laser - noop with no optics */
3521 	ixgbe_disable_tx_laser(hw);
3522 
3523 	/* Update the stack */
3524 	adapter->link_up = FALSE;
3525 	ixgbe_if_update_admin_status(ctx);
3526 
3527 	/* reprogram the RAR[0] in case user changed it. */
3528 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3529 
3530 	return;
3531 } /* ixgbe_if_stop */
3532 
3533 /************************************************************************
3534  * ixgbe_update_link_status - Update OS on link state
3535  *
3536  * Note: Only updates the OS on the cached link state.
3537  *       The real check of the hardware only happens with
3538  *       a link interrupt.
3539  ************************************************************************/
3540 static void
3541 ixgbe_if_update_admin_status(if_ctx_t ctx)
3542 {
3543 	struct adapter *adapter = iflib_get_softc(ctx);
3544 	device_t       dev = iflib_get_dev(ctx);
3545 
3546 	if (adapter->link_up) {
3547 		if (adapter->link_active == FALSE) {
3548 			if (bootverbose)
3549 				device_printf(dev, "Link is up %d Gbps %s \n",
3550 				    ((adapter->link_speed == 128) ? 10 : 1),
3551 				    "Full Duplex");
3552 			adapter->link_active = TRUE;
3553 			/* Update any Flow Control changes */
3554 			ixgbe_fc_enable(&adapter->hw);
3555 			/* Update DMA coalescing config */
3556 			ixgbe_config_dmac(adapter);
3557 			/* should actually be negotiated value */
3558 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3559 
3560 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3561 				ixgbe_ping_all_vfs(adapter);
3562 		}
3563 	} else { /* Link down */
3564 		if (adapter->link_active == TRUE) {
3565 			if (bootverbose)
3566 				device_printf(dev, "Link is Down\n");
3567 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3568 			adapter->link_active = FALSE;
3569 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3570 				ixgbe_ping_all_vfs(adapter);
3571 		}
3572 	}
3573 
3574 	/* Handle task requests from msix_link() */
3575 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3576 		ixgbe_handle_mod(ctx);
3577 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3578 		ixgbe_handle_msf(ctx);
3579 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3580 		ixgbe_handle_mbx(ctx);
3581 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3582 		ixgbe_reinit_fdir(ctx);
3583 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3584 		ixgbe_handle_phy(ctx);
3585 	adapter->task_requests = 0;
3586 
3587 	ixgbe_update_stats_counters(adapter);
3588 } /* ixgbe_if_update_admin_status */
3589 
3590 /************************************************************************
3591  * ixgbe_config_dmac - Configure DMA Coalescing
3592  ************************************************************************/
3593 static void
3594 ixgbe_config_dmac(struct adapter *adapter)
3595 {
3596 	struct ixgbe_hw          *hw = &adapter->hw;
3597 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3598 
3599 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3600 		return;
3601 
3602 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3603 	    dcfg->link_speed ^ adapter->link_speed) {
3604 		dcfg->watchdog_timer = adapter->dmac;
3605 		dcfg->fcoe_en = FALSE;
3606 		dcfg->link_speed = adapter->link_speed;
3607 		dcfg->num_tcs = 1;
3608 
3609 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3610 		    dcfg->watchdog_timer, dcfg->link_speed);
3611 
3612 		hw->mac.ops.dmac_config(hw);
3613 	}
3614 } /* ixgbe_config_dmac */
3615 
3616 /************************************************************************
3617  * ixgbe_if_enable_intr
3618  ************************************************************************/
3619 void
3620 ixgbe_if_enable_intr(if_ctx_t ctx)
3621 {
3622 	struct adapter     *adapter = iflib_get_softc(ctx);
3623 	struct ixgbe_hw    *hw = &adapter->hw;
3624 	struct ix_rx_queue *que = adapter->rx_queues;
3625 	u32                mask, fwsm;
3626 
3627 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3628 
3629 	switch (adapter->hw.mac.type) {
3630 	case ixgbe_mac_82599EB:
3631 		mask |= IXGBE_EIMS_ECC;
3632 		/* Temperature sensor on some adapters */
3633 		mask |= IXGBE_EIMS_GPI_SDP0;
3634 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3635 		mask |= IXGBE_EIMS_GPI_SDP1;
3636 		mask |= IXGBE_EIMS_GPI_SDP2;
3637 		break;
3638 	case ixgbe_mac_X540:
3639 		/* Detect if Thermal Sensor is enabled */
3640 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3641 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3642 			mask |= IXGBE_EIMS_TS;
3643 		mask |= IXGBE_EIMS_ECC;
3644 		break;
3645 	case ixgbe_mac_X550:
3646 		/* MAC thermal sensor is automatically enabled */
3647 		mask |= IXGBE_EIMS_TS;
3648 		mask |= IXGBE_EIMS_ECC;
3649 		break;
3650 	case ixgbe_mac_X550EM_x:
3651 	case ixgbe_mac_X550EM_a:
3652 		/* Some devices use SDP0 for important information */
3653 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3654 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3655 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3656 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3657 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3658 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3659 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3660 		mask |= IXGBE_EIMS_ECC;
3661 		break;
3662 	default:
3663 		break;
3664 	}
3665 
3666 	/* Enable Fan Failure detection */
3667 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3668 		mask |= IXGBE_EIMS_GPI_SDP1;
3669 	/* Enable SR-IOV */
3670 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3671 		mask |= IXGBE_EIMS_MAILBOX;
3672 	/* Enable Flow Director */
3673 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3674 		mask |= IXGBE_EIMS_FLOW_DIR;
3675 
3676 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3677 
3678 	/* With MSI-X we use auto clear */
3679 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3680 		mask = IXGBE_EIMS_ENABLE_MASK;
3681 		/* Don't autoclear Link */
3682 		mask &= ~IXGBE_EIMS_OTHER;
3683 		mask &= ~IXGBE_EIMS_LSC;
3684 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3685 			mask &= ~IXGBE_EIMS_MAILBOX;
3686 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3687 	}
3688 
3689 	/*
3690 	 * Now enable all queues, this is done separately to
3691 	 * allow for handling the extended (beyond 32) MSI-X
3692 	 * vectors that can be used by 82599
3693 	 */
3694 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3695 		ixgbe_enable_queue(adapter, que->msix);
3696 
3697 	IXGBE_WRITE_FLUSH(hw);
3698 
3699 } /* ixgbe_if_enable_intr */
3700 
3701 /************************************************************************
3702  * ixgbe_disable_intr
3703  ************************************************************************/
3704 static void
3705 ixgbe_if_disable_intr(if_ctx_t ctx)
3706 {
3707 	struct adapter *adapter = iflib_get_softc(ctx);
3708 
3709 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3710 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3711 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3712 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3713 	} else {
3714 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3715 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3716 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3717 	}
3718 	IXGBE_WRITE_FLUSH(&adapter->hw);
3719 
3720 } /* ixgbe_if_disable_intr */
3721 
3722 /************************************************************************
3723  * ixgbe_link_intr_enable
3724  ************************************************************************/
3725 static void
3726 ixgbe_link_intr_enable(if_ctx_t ctx)
3727 {
3728 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3729 
3730 	/* Re-enable other interrupts */
3731 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3732 } /* ixgbe_link_intr_enable */
3733 
3734 /************************************************************************
3735  * ixgbe_if_rx_queue_intr_enable
3736  ************************************************************************/
3737 static int
3738 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3739 {
3740 	struct adapter     *adapter = iflib_get_softc(ctx);
3741 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3742 
3743 	ixgbe_enable_queue(adapter, que->msix);
3744 
3745 	return (0);
3746 } /* ixgbe_if_rx_queue_intr_enable */
3747 
3748 /************************************************************************
3749  * ixgbe_enable_queue
3750  ************************************************************************/
3751 static void
3752 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3753 {
3754 	struct ixgbe_hw *hw = &adapter->hw;
3755 	u64             queue = 1ULL << vector;
3756 	u32             mask;
3757 
3758 	if (hw->mac.type == ixgbe_mac_82598EB) {
3759 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3760 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3761 	} else {
3762 		mask = (queue & 0xFFFFFFFF);
3763 		if (mask)
3764 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3765 		mask = (queue >> 32);
3766 		if (mask)
3767 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3768 	}
3769 } /* ixgbe_enable_queue */
3770 
3771 /************************************************************************
3772  * ixgbe_disable_queue
3773  ************************************************************************/
3774 static void
3775 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3776 {
3777 	struct ixgbe_hw *hw = &adapter->hw;
3778 	u64             queue = 1ULL << vector;
3779 	u32             mask;
3780 
3781 	if (hw->mac.type == ixgbe_mac_82598EB) {
3782 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3783 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3784 	} else {
3785 		mask = (queue & 0xFFFFFFFF);
3786 		if (mask)
3787 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3788 		mask = (queue >> 32);
3789 		if (mask)
3790 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3791 	}
3792 } /* ixgbe_disable_queue */
3793 
3794 /************************************************************************
3795  * ixgbe_intr - Legacy Interrupt Service Routine
3796  ************************************************************************/
3797 int
3798 ixgbe_intr(void *arg)
3799 {
3800 	struct adapter     *adapter = arg;
3801 	struct ix_rx_queue *que = adapter->rx_queues;
3802 	struct ixgbe_hw    *hw = &adapter->hw;
3803 	if_ctx_t           ctx = adapter->ctx;
3804 	u32                eicr, eicr_mask;
3805 
3806 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3807 
3808 	++que->irqs;
3809 	if (eicr == 0) {
3810 		ixgbe_if_enable_intr(ctx);
3811 		return (FILTER_HANDLED);
3812 	}
3813 
3814 	/* Check for fan failure */
3815 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3816 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3817 		device_printf(adapter->dev,
3818 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3819 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3820 	}
3821 
3822 	/* Link status change */
3823 	if (eicr & IXGBE_EICR_LSC) {
3824 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3825 		iflib_admin_intr_deferred(ctx);
3826 	}
3827 
3828 	if (ixgbe_is_sfp(hw)) {
3829 		/* Pluggable optics-related interrupt */
3830 		if (hw->mac.type >= ixgbe_mac_X540)
3831 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3832 		else
3833 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3834 
3835 		if (eicr & eicr_mask) {
3836 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3837 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3838 		}
3839 
3840 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3841 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3842 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3843 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3844 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3845 		}
3846 	}
3847 
3848 	/* External PHY interrupt */
3849 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3850 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3851 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3852 
3853 	return (FILTER_SCHEDULE_THREAD);
3854 } /* ixgbe_intr */
3855 
3856 /************************************************************************
3857  * ixgbe_free_pci_resources
3858  ************************************************************************/
3859 static void
3860 ixgbe_free_pci_resources(if_ctx_t ctx)
3861 {
3862 	struct adapter *adapter = iflib_get_softc(ctx);
3863 	struct         ix_rx_queue *que = adapter->rx_queues;
3864 	device_t       dev = iflib_get_dev(ctx);
3865 
3866 	/* Release all MSI-X queue resources */
3867 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3868 		iflib_irq_free(ctx, &adapter->irq);
3869 
3870 	if (que != NULL) {
3871 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3872 			iflib_irq_free(ctx, &que->que_irq);
3873 		}
3874 	}
3875 
3876 	if (adapter->pci_mem != NULL)
3877 		bus_release_resource(dev, SYS_RES_MEMORY,
3878 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3879 } /* ixgbe_free_pci_resources */
3880 
3881 /************************************************************************
3882  * ixgbe_sysctl_flowcntl
3883  *
3884  *   SYSCTL wrapper around setting Flow Control
3885  ************************************************************************/
3886 static int
3887 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3888 {
3889 	struct adapter *adapter;
3890 	int            error, fc;
3891 
3892 	adapter = (struct adapter *)arg1;
3893 	fc = adapter->hw.fc.current_mode;
3894 
3895 	error = sysctl_handle_int(oidp, &fc, 0, req);
3896 	if ((error) || (req->newptr == NULL))
3897 		return (error);
3898 
3899 	/* Don't bother if it's not changed */
3900 	if (fc == adapter->hw.fc.current_mode)
3901 		return (0);
3902 
3903 	return ixgbe_set_flowcntl(adapter, fc);
3904 } /* ixgbe_sysctl_flowcntl */
3905 
3906 /************************************************************************
3907  * ixgbe_set_flowcntl - Set flow control
3908  *
3909  *   Flow control values:
3910  *     0 - off
3911  *     1 - rx pause
3912  *     2 - tx pause
3913  *     3 - full
3914  ************************************************************************/
3915 static int
3916 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3917 {
3918 	switch (fc) {
3919 	case ixgbe_fc_rx_pause:
3920 	case ixgbe_fc_tx_pause:
3921 	case ixgbe_fc_full:
3922 		adapter->hw.fc.requested_mode = fc;
3923 		if (adapter->num_rx_queues > 1)
3924 			ixgbe_disable_rx_drop(adapter);
3925 		break;
3926 	case ixgbe_fc_none:
3927 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3928 		if (adapter->num_rx_queues > 1)
3929 			ixgbe_enable_rx_drop(adapter);
3930 		break;
3931 	default:
3932 		return (EINVAL);
3933 	}
3934 
3935 	/* Don't autoneg if forcing a value */
3936 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3937 	ixgbe_fc_enable(&adapter->hw);
3938 
3939 	return (0);
3940 } /* ixgbe_set_flowcntl */
3941 
3942 /************************************************************************
3943  * ixgbe_enable_rx_drop
3944  *
3945  *   Enable the hardware to drop packets when the buffer is
3946  *   full. This is useful with multiqueue, so that no single
3947  *   queue being full stalls the entire RX engine. We only
3948  *   enable this when Multiqueue is enabled AND Flow Control
3949  *   is disabled.
3950  ************************************************************************/
3951 static void
3952 ixgbe_enable_rx_drop(struct adapter *adapter)
3953 {
3954 	struct ixgbe_hw *hw = &adapter->hw;
3955 	struct rx_ring  *rxr;
3956 	u32             srrctl;
3957 
3958 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3959 		rxr = &adapter->rx_queues[i].rxr;
3960 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3961 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3962 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3963 	}
3964 
3965 	/* enable drop for each vf */
3966 	for (int i = 0; i < adapter->num_vfs; i++) {
3967 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3968 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3969 		                IXGBE_QDE_ENABLE));
3970 	}
3971 } /* ixgbe_enable_rx_drop */
3972 
3973 /************************************************************************
3974  * ixgbe_disable_rx_drop
3975  ************************************************************************/
3976 static void
3977 ixgbe_disable_rx_drop(struct adapter *adapter)
3978 {
3979 	struct ixgbe_hw *hw = &adapter->hw;
3980 	struct rx_ring  *rxr;
3981 	u32             srrctl;
3982 
3983 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3984 		rxr = &adapter->rx_queues[i].rxr;
3985 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3986 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3987 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3988 	}
3989 
3990 	/* disable drop for each vf */
3991 	for (int i = 0; i < adapter->num_vfs; i++) {
3992 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3993 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3994 	}
3995 } /* ixgbe_disable_rx_drop */
3996 
3997 /************************************************************************
3998  * ixgbe_sysctl_advertise
3999  *
4000  *   SYSCTL wrapper around setting advertised speed
4001  ************************************************************************/
4002 static int
4003 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4004 {
4005 	struct adapter *adapter;
4006 	int            error, advertise;
4007 
4008 	adapter = (struct adapter *)arg1;
4009 	advertise = adapter->advertise;
4010 
4011 	error = sysctl_handle_int(oidp, &advertise, 0, req);
4012 	if ((error) || (req->newptr == NULL))
4013 		return (error);
4014 
4015 	return ixgbe_set_advertise(adapter, advertise);
4016 } /* ixgbe_sysctl_advertise */
4017 
4018 /************************************************************************
4019  * ixgbe_set_advertise - Control advertised link speed
4020  *
4021  *   Flags:
4022  *     0x1 - advertise 100 Mb
4023  *     0x2 - advertise 1G
4024  *     0x4 - advertise 10G
4025  *     0x8 - advertise 10 Mb (yes, Mb)
4026  ************************************************************************/
4027 static int
4028 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4029 {
4030 	device_t         dev = iflib_get_dev(adapter->ctx);
4031 	struct ixgbe_hw  *hw;
4032 	ixgbe_link_speed speed = 0;
4033 	ixgbe_link_speed link_caps = 0;
4034 	s32              err = IXGBE_NOT_IMPLEMENTED;
4035 	bool             negotiate = FALSE;
4036 
4037 	/* Checks to validate new value */
4038 	if (adapter->advertise == advertise) /* no change */
4039 		return (0);
4040 
4041 	hw = &adapter->hw;
4042 
4043 	/* No speed changes for backplane media */
4044 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4045 		return (ENODEV);
4046 
4047 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4048 	      (hw->phy.multispeed_fiber))) {
4049 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4050 		return (EINVAL);
4051 	}
4052 
4053 	if (advertise < 0x1 || advertise > 0xF) {
4054 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4055 		return (EINVAL);
4056 	}
4057 
4058 	if (hw->mac.ops.get_link_capabilities) {
4059 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4060 		    &negotiate);
4061 		if (err != IXGBE_SUCCESS) {
4062 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4063 			return (ENODEV);
4064 		}
4065 	}
4066 
4067 	/* Set new value and report new advertised mode */
4068 	if (advertise & 0x1) {
4069 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4070 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4071 			return (EINVAL);
4072 		}
4073 		speed |= IXGBE_LINK_SPEED_100_FULL;
4074 	}
4075 	if (advertise & 0x2) {
4076 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4077 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4078 			return (EINVAL);
4079 		}
4080 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4081 	}
4082 	if (advertise & 0x4) {
4083 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4084 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4085 			return (EINVAL);
4086 		}
4087 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4088 	}
4089 	if (advertise & 0x8) {
4090 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4091 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4092 			return (EINVAL);
4093 		}
4094 		speed |= IXGBE_LINK_SPEED_10_FULL;
4095 	}
4096 
4097 	hw->mac.autotry_restart = TRUE;
4098 	hw->mac.ops.setup_link(hw, speed, TRUE);
4099 	adapter->advertise = advertise;
4100 
4101 	return (0);
4102 } /* ixgbe_set_advertise */
4103 
4104 /************************************************************************
4105  * ixgbe_get_advertise - Get current advertised speed settings
4106  *
4107  *   Formatted for sysctl usage.
4108  *   Flags:
4109  *     0x1 - advertise 100 Mb
4110  *     0x2 - advertise 1G
4111  *     0x4 - advertise 10G
4112  *     0x8 - advertise 10 Mb (yes, Mb)
4113  ************************************************************************/
4114 static int
4115 ixgbe_get_advertise(struct adapter *adapter)
4116 {
4117 	struct ixgbe_hw  *hw = &adapter->hw;
4118 	int              speed;
4119 	ixgbe_link_speed link_caps = 0;
4120 	s32              err;
4121 	bool             negotiate = FALSE;
4122 
4123 	/*
4124 	 * Advertised speed means nothing unless it's copper or
4125 	 * multi-speed fiber
4126 	 */
4127 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4128 	    !(hw->phy.multispeed_fiber))
4129 		return (0);
4130 
4131 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4132 	if (err != IXGBE_SUCCESS)
4133 		return (0);
4134 
4135 	speed =
4136 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4137 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4138 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4139 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4140 
4141 	return speed;
4142 } /* ixgbe_get_advertise */
4143 
4144 /************************************************************************
4145  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4146  *
4147  *   Control values:
4148  *     0/1 - off / on (use default value of 1000)
4149  *
4150  *     Legal timer values are:
4151  *     50,100,250,500,1000,2000,5000,10000
4152  *
4153  *     Turning off interrupt moderation will also turn this off.
4154  ************************************************************************/
4155 static int
4156 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4157 {
4158 	struct adapter *adapter = (struct adapter *)arg1;
4159 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4160 	int            error;
4161 	u16            newval;
4162 
4163 	newval = adapter->dmac;
4164 	error = sysctl_handle_16(oidp, &newval, 0, req);
4165 	if ((error) || (req->newptr == NULL))
4166 		return (error);
4167 
4168 	switch (newval) {
4169 	case 0:
4170 		/* Disabled */
4171 		adapter->dmac = 0;
4172 		break;
4173 	case 1:
4174 		/* Enable and use default */
4175 		adapter->dmac = 1000;
4176 		break;
4177 	case 50:
4178 	case 100:
4179 	case 250:
4180 	case 500:
4181 	case 1000:
4182 	case 2000:
4183 	case 5000:
4184 	case 10000:
4185 		/* Legal values - allow */
4186 		adapter->dmac = newval;
4187 		break;
4188 	default:
4189 		/* Do nothing, illegal value */
4190 		return (EINVAL);
4191 	}
4192 
4193 	/* Re-initialize hardware if it's already running */
4194 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4195 		ifp->if_init(ifp);
4196 
4197 	return (0);
4198 } /* ixgbe_sysctl_dmac */
4199 
4200 #ifdef IXGBE_DEBUG
4201 /************************************************************************
4202  * ixgbe_sysctl_power_state
4203  *
4204  *   Sysctl to test power states
4205  *   Values:
4206  *     0      - set device to D0
4207  *     3      - set device to D3
4208  *     (none) - get current device power state
4209  ************************************************************************/
4210 static int
4211 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4212 {
4213 	struct adapter *adapter = (struct adapter *)arg1;
4214 	device_t       dev = adapter->dev;
4215 	int            curr_ps, new_ps, error = 0;
4216 
4217 	curr_ps = new_ps = pci_get_powerstate(dev);
4218 
4219 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4220 	if ((error) || (req->newptr == NULL))
4221 		return (error);
4222 
4223 	if (new_ps == curr_ps)
4224 		return (0);
4225 
4226 	if (new_ps == 3 && curr_ps == 0)
4227 		error = DEVICE_SUSPEND(dev);
4228 	else if (new_ps == 0 && curr_ps == 3)
4229 		error = DEVICE_RESUME(dev);
4230 	else
4231 		return (EINVAL);
4232 
4233 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4234 
4235 	return (error);
4236 } /* ixgbe_sysctl_power_state */
4237 #endif
4238 
4239 /************************************************************************
4240  * ixgbe_sysctl_wol_enable
4241  *
4242  *   Sysctl to enable/disable the WoL capability,
4243  *   if supported by the adapter.
4244  *
4245  *   Values:
4246  *     0 - disabled
4247  *     1 - enabled
4248  ************************************************************************/
4249 static int
4250 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4251 {
4252 	struct adapter  *adapter = (struct adapter *)arg1;
4253 	struct ixgbe_hw *hw = &adapter->hw;
4254 	int             new_wol_enabled;
4255 	int             error = 0;
4256 
4257 	new_wol_enabled = hw->wol_enabled;
4258 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4259 	if ((error) || (req->newptr == NULL))
4260 		return (error);
4261 	new_wol_enabled = !!(new_wol_enabled);
4262 	if (new_wol_enabled == hw->wol_enabled)
4263 		return (0);
4264 
4265 	if (new_wol_enabled > 0 && !adapter->wol_support)
4266 		return (ENODEV);
4267 	else
4268 		hw->wol_enabled = new_wol_enabled;
4269 
4270 	return (0);
4271 } /* ixgbe_sysctl_wol_enable */
4272 
4273 /************************************************************************
4274  * ixgbe_sysctl_wufc - Wake Up Filter Control
4275  *
4276  *   Sysctl to enable/disable the types of packets that the
4277  *   adapter will wake up on upon receipt.
4278  *   Flags:
4279  *     0x1  - Link Status Change
4280  *     0x2  - Magic Packet
4281  *     0x4  - Direct Exact
4282  *     0x8  - Directed Multicast
4283  *     0x10 - Broadcast
4284  *     0x20 - ARP/IPv4 Request Packet
4285  *     0x40 - Direct IPv4 Packet
4286  *     0x80 - Direct IPv6 Packet
4287  *
4288  *   Settings not listed above will cause the sysctl to return an error.
4289  ************************************************************************/
4290 static int
4291 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4292 {
4293 	struct adapter *adapter = (struct adapter *)arg1;
4294 	int            error = 0;
4295 	u32            new_wufc;
4296 
4297 	new_wufc = adapter->wufc;
4298 
4299 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4300 	if ((error) || (req->newptr == NULL))
4301 		return (error);
4302 	if (new_wufc == adapter->wufc)
4303 		return (0);
4304 
4305 	if (new_wufc & 0xffffff00)
4306 		return (EINVAL);
4307 
4308 	new_wufc &= 0xff;
4309 	new_wufc |= (0xffffff & adapter->wufc);
4310 	adapter->wufc = new_wufc;
4311 
4312 	return (0);
4313 } /* ixgbe_sysctl_wufc */
4314 
4315 #ifdef IXGBE_DEBUG
4316 /************************************************************************
4317  * ixgbe_sysctl_print_rss_config
4318  ************************************************************************/
4319 static int
4320 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4321 {
4322 	struct adapter  *adapter = (struct adapter *)arg1;
4323 	struct ixgbe_hw *hw = &adapter->hw;
4324 	device_t        dev = adapter->dev;
4325 	struct sbuf     *buf;
4326 	int             error = 0, reta_size;
4327 	u32             reg;
4328 
4329 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4330 	if (!buf) {
4331 		device_printf(dev, "Could not allocate sbuf for output.\n");
4332 		return (ENOMEM);
4333 	}
4334 
4335 	// TODO: use sbufs to make a string to print out
4336 	/* Set multiplier for RETA setup and table size based on MAC */
4337 	switch (adapter->hw.mac.type) {
4338 	case ixgbe_mac_X550:
4339 	case ixgbe_mac_X550EM_x:
4340 	case ixgbe_mac_X550EM_a:
4341 		reta_size = 128;
4342 		break;
4343 	default:
4344 		reta_size = 32;
4345 		break;
4346 	}
4347 
4348 	/* Print out the redirection table */
4349 	sbuf_cat(buf, "\n");
4350 	for (int i = 0; i < reta_size; i++) {
4351 		if (i < 32) {
4352 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4353 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4354 		} else {
4355 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4356 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4357 		}
4358 	}
4359 
4360 	// TODO: print more config
4361 
4362 	error = sbuf_finish(buf);
4363 	if (error)
4364 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4365 
4366 	sbuf_delete(buf);
4367 
4368 	return (0);
4369 } /* ixgbe_sysctl_print_rss_config */
4370 #endif /* IXGBE_DEBUG */
4371 
4372 /************************************************************************
4373  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4374  *
4375  *   For X552/X557-AT devices using an external PHY
4376  ************************************************************************/
4377 static int
4378 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4379 {
4380 	struct adapter  *adapter = (struct adapter *)arg1;
4381 	struct ixgbe_hw *hw = &adapter->hw;
4382 	u16             reg;
4383 
4384 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4385 		device_printf(iflib_get_dev(adapter->ctx),
4386 		    "Device has no supported external thermal sensor.\n");
4387 		return (ENODEV);
4388 	}
4389 
4390 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4391 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4392 		device_printf(iflib_get_dev(adapter->ctx),
4393 		    "Error reading from PHY's current temperature register\n");
4394 		return (EAGAIN);
4395 	}
4396 
4397 	/* Shift temp for output */
4398 	reg = reg >> 8;
4399 
4400 	return (sysctl_handle_16(oidp, NULL, reg, req));
4401 } /* ixgbe_sysctl_phy_temp */
4402 
4403 /************************************************************************
4404  * ixgbe_sysctl_phy_overtemp_occurred
4405  *
4406  *   Reports (directly from the PHY) whether the current PHY
4407  *   temperature is over the overtemp threshold.
4408  ************************************************************************/
4409 static int
4410 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4411 {
4412 	struct adapter  *adapter = (struct adapter *)arg1;
4413 	struct ixgbe_hw *hw = &adapter->hw;
4414 	u16             reg;
4415 
4416 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4417 		device_printf(iflib_get_dev(adapter->ctx),
4418 		    "Device has no supported external thermal sensor.\n");
4419 		return (ENODEV);
4420 	}
4421 
4422 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4423 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4424 		device_printf(iflib_get_dev(adapter->ctx),
4425 		    "Error reading from PHY's temperature status register\n");
4426 		return (EAGAIN);
4427 	}
4428 
4429 	/* Get occurrence bit */
4430 	reg = !!(reg & 0x4000);
4431 
4432 	return (sysctl_handle_16(oidp, 0, reg, req));
4433 } /* ixgbe_sysctl_phy_overtemp_occurred */
4434 
4435 /************************************************************************
4436  * ixgbe_sysctl_eee_state
4437  *
4438  *   Sysctl to set EEE power saving feature
4439  *   Values:
4440  *     0      - disable EEE
4441  *     1      - enable EEE
4442  *     (none) - get current device EEE state
4443  ************************************************************************/
4444 static int
4445 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4446 {
4447 	struct adapter *adapter = (struct adapter *)arg1;
4448 	device_t       dev = adapter->dev;
4449 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4450 	int            curr_eee, new_eee, error = 0;
4451 	s32            retval;
4452 
4453 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4454 
4455 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4456 	if ((error) || (req->newptr == NULL))
4457 		return (error);
4458 
4459 	/* Nothing to do */
4460 	if (new_eee == curr_eee)
4461 		return (0);
4462 
4463 	/* Not supported */
4464 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4465 		return (EINVAL);
4466 
4467 	/* Bounds checking */
4468 	if ((new_eee < 0) || (new_eee > 1))
4469 		return (EINVAL);
4470 
4471 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4472 	if (retval) {
4473 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4474 		return (EINVAL);
4475 	}
4476 
4477 	/* Restart auto-neg */
4478 	ifp->if_init(ifp);
4479 
4480 	device_printf(dev, "New EEE state: %d\n", new_eee);
4481 
4482 	/* Cache new value */
4483 	if (new_eee)
4484 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4485 	else
4486 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4487 
4488 	return (error);
4489 } /* ixgbe_sysctl_eee_state */
4490 
4491 /************************************************************************
4492  * ixgbe_init_device_features
4493  ************************************************************************/
4494 static void
4495 ixgbe_init_device_features(struct adapter *adapter)
4496 {
4497 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4498 	                  | IXGBE_FEATURE_RSS
4499 	                  | IXGBE_FEATURE_MSI
4500 	                  | IXGBE_FEATURE_MSIX
4501 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4502 
4503 	/* Set capabilities first... */
4504 	switch (adapter->hw.mac.type) {
4505 	case ixgbe_mac_82598EB:
4506 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4507 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4508 		break;
4509 	case ixgbe_mac_X540:
4510 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4511 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4512 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4513 		    (adapter->hw.bus.func == 0))
4514 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4515 		break;
4516 	case ixgbe_mac_X550:
4517 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4518 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4519 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4520 		break;
4521 	case ixgbe_mac_X550EM_x:
4522 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4523 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4524 		break;
4525 	case ixgbe_mac_X550EM_a:
4526 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4527 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4528 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4529 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4530 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4531 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4532 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4533 		}
4534 		break;
4535 	case ixgbe_mac_82599EB:
4536 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4537 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4538 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4539 		    (adapter->hw.bus.func == 0))
4540 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4541 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4542 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4543 		break;
4544 	default:
4545 		break;
4546 	}
4547 
4548 	/* Enabled by default... */
4549 	/* Fan failure detection */
4550 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4551 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4552 	/* Netmap */
4553 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4554 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4555 	/* EEE */
4556 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4557 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4558 	/* Thermal Sensor */
4559 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4560 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4561 
4562 	/* Enabled via global sysctl... */
4563 	/* Flow Director */
4564 	if (ixgbe_enable_fdir) {
4565 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4566 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4567 		else
4568 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4569 	}
4570 	/*
4571 	 * Message Signal Interrupts - Extended (MSI-X)
4572 	 * Normal MSI is only enabled if MSI-X calls fail.
4573 	 */
4574 	if (!ixgbe_enable_msix)
4575 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4576 	/* Receive-Side Scaling (RSS) */
4577 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4578 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4579 
4580 	/* Disable features with unmet dependencies... */
4581 	/* No MSI-X */
4582 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4583 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4584 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4585 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4586 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4587 	}
4588 } /* ixgbe_init_device_features */
4589 
4590 /************************************************************************
4591  * ixgbe_check_fan_failure
4592  ************************************************************************/
4593 static void
4594 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4595 {
4596 	u32 mask;
4597 
4598 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4599 	    IXGBE_ESDP_SDP1;
4600 
4601 	if (reg & mask)
4602 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4603 } /* ixgbe_check_fan_failure */
4604