xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
143 int ixgbe_intr(void *arg);
144 
145 /************************************************************************
146  * Function prototypes
147  ************************************************************************/
148 #if __FreeBSD_version >= 1100036
149 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
150 #endif
151 
152 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
154 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
155 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
156 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
157 
158 static void ixgbe_config_dmac(struct adapter *adapter);
159 static void ixgbe_configure_ivars(struct adapter *adapter);
160 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
161                            s8 type);
162 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static bool ixgbe_sfp_probe(if_ctx_t ctx);
164 
165 static void ixgbe_free_pci_resources(if_ctx_t ctx);
166 
167 static int  ixgbe_msix_link(void *arg);
168 static int  ixgbe_msix_que(void *arg);
169 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
170 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
171 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
172 
173 static int  ixgbe_setup_interface(if_ctx_t ctx);
174 static void ixgbe_init_device_features(struct adapter *adapter);
175 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
176 static void ixgbe_add_media_types(if_ctx_t ctx);
177 static void ixgbe_update_stats_counters(struct adapter *adapter);
178 static void ixgbe_config_link(if_ctx_t ctx);
179 static void ixgbe_get_slot_info(struct adapter *);
180 static void ixgbe_check_wol_support(struct adapter *adapter);
181 static void ixgbe_enable_rx_drop(struct adapter *);
182 static void ixgbe_disable_rx_drop(struct adapter *);
183 
184 static void ixgbe_add_hw_stats(struct adapter *adapter);
185 static int  ixgbe_set_flowcntl(struct adapter *, int);
186 static int  ixgbe_set_advertise(struct adapter *, int);
187 static int  ixgbe_get_advertise(struct adapter *);
188 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
189 static void ixgbe_config_gpie(struct adapter *adapter);
190 static void ixgbe_config_delay_values(struct adapter *adapter);
191 
192 /* Sysctl handlers */
193 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
198 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
199 #ifdef IXGBE_DEBUG
200 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
201 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
202 #endif
203 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
209 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
210 
211 /* Deferred interrupt tasklets */
212 static void ixgbe_handle_msf(void *);
213 static void ixgbe_handle_mod(void *);
214 static void ixgbe_handle_phy(void *);
215 
216 /************************************************************************
217  *  FreeBSD Device Interface Entry Points
218  ************************************************************************/
219 static device_method_t ix_methods[] = {
220 	/* Device interface */
221 	DEVMETHOD(device_register, ixgbe_register),
222 	DEVMETHOD(device_probe, iflib_device_probe),
223 	DEVMETHOD(device_attach, iflib_device_attach),
224 	DEVMETHOD(device_detach, iflib_device_detach),
225 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
226 	DEVMETHOD(device_suspend, iflib_device_suspend),
227 	DEVMETHOD(device_resume, iflib_device_resume),
228 #ifdef PCI_IOV
229 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
230 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
231 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
232 #endif /* PCI_IOV */
233 	DEVMETHOD_END
234 };
235 
236 static driver_t ix_driver = {
237 	"ix", ix_methods, sizeof(struct adapter),
238 };
239 
240 devclass_t ix_devclass;
241 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
242 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
243 MODULE_DEPEND(ix, pci, 1, 1, 1);
244 MODULE_DEPEND(ix, ether, 1, 1, 1);
245 MODULE_DEPEND(ix, iflib, 1, 1, 1);
246 
247 static device_method_t ixgbe_if_methods[] = {
248 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
249 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
250 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
251 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
252 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
253 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
254 	DEVMETHOD(ifdi_init, ixgbe_if_init),
255 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
256 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
257 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
258 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
259 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
260 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
262 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
263 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
264 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
265 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
266 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
267 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
268 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
269 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
270 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
271 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
272 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
273 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
274 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
275 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
276 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
277 	DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
278 #ifdef PCI_IOV
279 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
280 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
281 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
282 #endif /* PCI_IOV */
283 	DEVMETHOD_END
284 };
285 
286 /*
287  * TUNEABLE PARAMETERS:
288  */
289 
290 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
291     "IXGBE driver parameters");
292 static driver_t ixgbe_if_driver = {
293   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
294 };
295 
296 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
297 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
298     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
299 
300 /* Flow control setting, default to full */
301 static int ixgbe_flow_control = ixgbe_fc_full;
302 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
303     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
304 
305 /* Advertise Speed, default to 0 (auto) */
306 static int ixgbe_advertise_speed = 0;
307 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
308     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
309 
310 /*
311  * Smart speed setting, default to on
312  * this only works as a compile option
313  * right now as its during attach, set
314  * this to 'ixgbe_smart_speed_off' to
315  * disable.
316  */
317 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
318 
319 /*
320  * MSI-X should be the default for best performance,
321  * but this allows it to be forced off for testing.
322  */
323 static int ixgbe_enable_msix = 1;
324 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
325     "Enable MSI-X interrupts");
326 
327 /*
328  * Defining this on will allow the use
329  * of unsupported SFP+ modules, note that
330  * doing so you are on your own :)
331  */
332 static int allow_unsupported_sfp = FALSE;
333 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
334     &allow_unsupported_sfp, 0,
335     "Allow unsupported SFP modules...use at your own risk");
336 
337 /*
338  * Not sure if Flow Director is fully baked,
339  * so we'll default to turning it off.
340  */
341 static int ixgbe_enable_fdir = 0;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
343     "Enable Flow Director");
344 
345 /* Receive-Side Scaling */
346 static int ixgbe_enable_rss = 1;
347 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
348     "Enable Receive-Side Scaling (RSS)");
349 
350 #if 0
351 /* Keep running tab on them for sanity check */
352 static int ixgbe_total_ports;
353 #endif
354 
355 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
356 
357 /*
358  * For Flow Director: this is the number of TX packets we sample
359  * for the filter pool, this means every 20th packet will be probed.
360  *
361  * This feature can be disabled by setting this to 0.
362  */
363 static int atr_sample_rate = 20;
364 
365 extern struct if_txrx ixgbe_txrx;
366 
367 static struct if_shared_ctx ixgbe_sctx_init = {
368 	.isc_magic = IFLIB_MAGIC,
369 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
370 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
371 	.isc_tx_maxsegsize = PAGE_SIZE,
372 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
373 	.isc_tso_maxsegsize = PAGE_SIZE,
374 	.isc_rx_maxsize = PAGE_SIZE*4,
375 	.isc_rx_nsegments = 1,
376 	.isc_rx_maxsegsize = PAGE_SIZE*4,
377 	.isc_nfl = 1,
378 	.isc_ntxqs = 1,
379 	.isc_nrxqs = 1,
380 
381 	.isc_admin_intrcnt = 1,
382 	.isc_vendor_info = ixgbe_vendor_info_array,
383 	.isc_driver_version = ixgbe_driver_version,
384 	.isc_driver = &ixgbe_if_driver,
385 	.isc_flags = IFLIB_TSO_INIT_IP,
386 
387 	.isc_nrxd_min = {MIN_RXD},
388 	.isc_ntxd_min = {MIN_TXD},
389 	.isc_nrxd_max = {MAX_RXD},
390 	.isc_ntxd_max = {MAX_TXD},
391 	.isc_nrxd_default = {DEFAULT_RXD},
392 	.isc_ntxd_default = {DEFAULT_TXD},
393 };
394 
395 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
396 
397 /************************************************************************
398  * ixgbe_if_tx_queues_alloc
399  ************************************************************************/
400 static int
401 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
402                          int ntxqs, int ntxqsets)
403 {
404 	struct adapter     *adapter = iflib_get_softc(ctx);
405 	if_softc_ctx_t     scctx = adapter->shared;
406 	struct ix_tx_queue *que;
407 	int                i, j, error;
408 
409 	MPASS(adapter->num_tx_queues > 0);
410 	MPASS(adapter->num_tx_queues == ntxqsets);
411 	MPASS(ntxqs == 1);
412 
413 	/* Allocate queue structure memory */
414 	adapter->tx_queues =
415 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
416 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
417 	if (!adapter->tx_queues) {
418 		device_printf(iflib_get_dev(ctx),
419 		    "Unable to allocate TX ring memory\n");
420 		return (ENOMEM);
421 	}
422 
423 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
424 		struct tx_ring *txr = &que->txr;
425 
426 		/* In case SR-IOV is enabled, align the index properly */
427 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
428 		    i);
429 
430 		txr->adapter = que->adapter = adapter;
431 
432 		/* Allocate report status array */
433 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
434 		if (txr->tx_rsq == NULL) {
435 			error = ENOMEM;
436 			goto fail;
437 		}
438 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
439 			txr->tx_rsq[j] = QIDX_INVALID;
440 		/* get the virtual and physical address of the hardware queues */
441 		txr->tail = IXGBE_TDT(txr->me);
442 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
443 		txr->tx_paddr = paddrs[i];
444 
445 		txr->bytes = 0;
446 		txr->total_packets = 0;
447 
448 		/* Set the rate at which we sample packets */
449 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
450 			txr->atr_sample = atr_sample_rate;
451 
452 	}
453 
454 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
455 	    adapter->num_tx_queues);
456 
457 	return (0);
458 
459 fail:
460 	ixgbe_if_queues_free(ctx);
461 
462 	return (error);
463 } /* ixgbe_if_tx_queues_alloc */
464 
465 /************************************************************************
466  * ixgbe_if_rx_queues_alloc
467  ************************************************************************/
468 static int
469 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
470                          int nrxqs, int nrxqsets)
471 {
472 	struct adapter     *adapter = iflib_get_softc(ctx);
473 	struct ix_rx_queue *que;
474 	int                i;
475 
476 	MPASS(adapter->num_rx_queues > 0);
477 	MPASS(adapter->num_rx_queues == nrxqsets);
478 	MPASS(nrxqs == 1);
479 
480 	/* Allocate queue structure memory */
481 	adapter->rx_queues =
482 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
483 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
484 	if (!adapter->rx_queues) {
485 		device_printf(iflib_get_dev(ctx),
486 		    "Unable to allocate TX ring memory\n");
487 		return (ENOMEM);
488 	}
489 
490 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
491 		struct rx_ring *rxr = &que->rxr;
492 
493 		/* In case SR-IOV is enabled, align the index properly */
494 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
495 		    i);
496 
497 		rxr->adapter = que->adapter = adapter;
498 
499 		/* get the virtual and physical address of the hw queues */
500 		rxr->tail = IXGBE_RDT(rxr->me);
501 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
502 		rxr->rx_paddr = paddrs[i];
503 		rxr->bytes = 0;
504 		rxr->que = que;
505 	}
506 
507 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
508 	    adapter->num_rx_queues);
509 
510 	return (0);
511 } /* ixgbe_if_rx_queues_alloc */
512 
513 /************************************************************************
514  * ixgbe_if_queues_free
515  ************************************************************************/
516 static void
517 ixgbe_if_queues_free(if_ctx_t ctx)
518 {
519 	struct adapter     *adapter = iflib_get_softc(ctx);
520 	struct ix_tx_queue *tx_que = adapter->tx_queues;
521 	struct ix_rx_queue *rx_que = adapter->rx_queues;
522 	int                i;
523 
524 	if (tx_que != NULL) {
525 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
526 			struct tx_ring *txr = &tx_que->txr;
527 			if (txr->tx_rsq == NULL)
528 				break;
529 
530 			free(txr->tx_rsq, M_IXGBE);
531 			txr->tx_rsq = NULL;
532 		}
533 
534 		free(adapter->tx_queues, M_IXGBE);
535 		adapter->tx_queues = NULL;
536 	}
537 	if (rx_que != NULL) {
538 		free(adapter->rx_queues, M_IXGBE);
539 		adapter->rx_queues = NULL;
540 	}
541 } /* ixgbe_if_queues_free */
542 
543 /************************************************************************
544  * ixgbe_initialize_rss_mapping
545  ************************************************************************/
546 static void
547 ixgbe_initialize_rss_mapping(struct adapter *adapter)
548 {
549 	struct ixgbe_hw *hw = &adapter->hw;
550 	u32             reta = 0, mrqc, rss_key[10];
551 	int             queue_id, table_size, index_mult;
552 	int             i, j;
553 	u32             rss_hash_config;
554 
555 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
556 		/* Fetch the configured RSS key */
557 		rss_getkey((uint8_t *)&rss_key);
558 	} else {
559 		/* set up random bits */
560 		arc4rand(&rss_key, sizeof(rss_key), 0);
561 	}
562 
563 	/* Set multiplier for RETA setup and table size based on MAC */
564 	index_mult = 0x1;
565 	table_size = 128;
566 	switch (adapter->hw.mac.type) {
567 	case ixgbe_mac_82598EB:
568 		index_mult = 0x11;
569 		break;
570 	case ixgbe_mac_X550:
571 	case ixgbe_mac_X550EM_x:
572 	case ixgbe_mac_X550EM_a:
573 		table_size = 512;
574 		break;
575 	default:
576 		break;
577 	}
578 
579 	/* Set up the redirection table */
580 	for (i = 0, j = 0; i < table_size; i++, j++) {
581 		if (j == adapter->num_rx_queues)
582 			j = 0;
583 
584 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
585 			/*
586 			 * Fetch the RSS bucket id for the given indirection
587 			 * entry. Cap it at the number of configured buckets
588 			 * (which is num_rx_queues.)
589 			 */
590 			queue_id = rss_get_indirection_to_bucket(i);
591 			queue_id = queue_id % adapter->num_rx_queues;
592 		} else
593 			queue_id = (j * index_mult);
594 
595 		/*
596 		 * The low 8 bits are for hash value (n+0);
597 		 * The next 8 bits are for hash value (n+1), etc.
598 		 */
599 		reta = reta >> 8;
600 		reta = reta | (((uint32_t)queue_id) << 24);
601 		if ((i & 3) == 3) {
602 			if (i < 128)
603 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
604 			else
605 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
606 				    reta);
607 			reta = 0;
608 		}
609 	}
610 
611 	/* Now fill our hash function seeds */
612 	for (i = 0; i < 10; i++)
613 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
614 
615 	/* Perform hash on these packet types */
616 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
617 		rss_hash_config = rss_gethashconfig();
618 	else {
619 		/*
620 		 * Disable UDP - IP fragments aren't currently being handled
621 		 * and so we end up with a mix of 2-tuple and 4-tuple
622 		 * traffic.
623 		 */
624 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
625 		                | RSS_HASHTYPE_RSS_TCP_IPV4
626 		                | RSS_HASHTYPE_RSS_IPV6
627 		                | RSS_HASHTYPE_RSS_TCP_IPV6
628 		                | RSS_HASHTYPE_RSS_IPV6_EX
629 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
630 	}
631 
632 	mrqc = IXGBE_MRQC_RSSEN;
633 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
634 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
635 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
636 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
637 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
638 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
639 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
640 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
649 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
650 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
651 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
652 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
653 } /* ixgbe_initialize_rss_mapping */
654 
655 /************************************************************************
656  * ixgbe_initialize_receive_units - Setup receive registers and features.
657  ************************************************************************/
658 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
659 
660 static void
661 ixgbe_initialize_receive_units(if_ctx_t ctx)
662 {
663 	struct adapter     *adapter = iflib_get_softc(ctx);
664 	if_softc_ctx_t     scctx = adapter->shared;
665 	struct ixgbe_hw    *hw = &adapter->hw;
666 	struct ifnet       *ifp = iflib_get_ifp(ctx);
667 	struct ix_rx_queue *que;
668 	int                i, j;
669 	u32                bufsz, fctrl, srrctl, rxcsum;
670 	u32                hlreg;
671 
672 	/*
673 	 * Make sure receives are disabled while
674 	 * setting up the descriptor ring
675 	 */
676 	ixgbe_disable_rx(hw);
677 
678 	/* Enable broadcasts */
679 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
680 	fctrl |= IXGBE_FCTRL_BAM;
681 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
682 		fctrl |= IXGBE_FCTRL_DPF;
683 		fctrl |= IXGBE_FCTRL_PMCF;
684 	}
685 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
686 
687 	/* Set for Jumbo Frames? */
688 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
689 	if (ifp->if_mtu > ETHERMTU)
690 		hlreg |= IXGBE_HLREG0_JUMBOEN;
691 	else
692 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
693 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
694 
695 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
696 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
697 
698 	/* Setup the Base and Length of the Rx Descriptor Ring */
699 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
700 		struct rx_ring *rxr = &que->rxr;
701 		u64            rdba = rxr->rx_paddr;
702 
703 		j = rxr->me;
704 
705 		/* Setup the Base and Length of the Rx Descriptor Ring */
706 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
707 		    (rdba & 0x00000000ffffffffULL));
708 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
709 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
710 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
711 
712 		/* Set up the SRRCTL register */
713 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
714 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
715 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
716 		srrctl |= bufsz;
717 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
718 
719 		/*
720 		 * Set DROP_EN iff we have no flow control and >1 queue.
721 		 * Note that srrctl was cleared shortly before during reset,
722 		 * so we do not need to clear the bit, but do it just in case
723 		 * this code is moved elsewhere.
724 		 */
725 		if (adapter->num_rx_queues > 1 &&
726 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
727 			srrctl |= IXGBE_SRRCTL_DROP_EN;
728 		} else {
729 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
730 		}
731 
732 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
733 
734 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
735 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
736 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
737 
738 		/* Set the driver rx tail address */
739 		rxr->tail =  IXGBE_RDT(rxr->me);
740 	}
741 
742 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
743 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
744 		            | IXGBE_PSRTYPE_UDPHDR
745 		            | IXGBE_PSRTYPE_IPV4HDR
746 		            | IXGBE_PSRTYPE_IPV6HDR;
747 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
748 	}
749 
750 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
751 
752 	ixgbe_initialize_rss_mapping(adapter);
753 
754 	if (adapter->num_rx_queues > 1) {
755 		/* RSS and RX IPP Checksum are mutually exclusive */
756 		rxcsum |= IXGBE_RXCSUM_PCSD;
757 	}
758 
759 	if (ifp->if_capenable & IFCAP_RXCSUM)
760 		rxcsum |= IXGBE_RXCSUM_PCSD;
761 
762 	/* This is useful for calculating UDP/IP fragment checksums */
763 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
764 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
765 
766 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
767 
768 } /* ixgbe_initialize_receive_units */
769 
770 /************************************************************************
771  * ixgbe_initialize_transmit_units - Enable transmit units.
772  ************************************************************************/
773 static void
774 ixgbe_initialize_transmit_units(if_ctx_t ctx)
775 {
776 	struct adapter     *adapter = iflib_get_softc(ctx);
777 	struct ixgbe_hw    *hw = &adapter->hw;
778 	if_softc_ctx_t     scctx = adapter->shared;
779 	struct ix_tx_queue *que;
780 	int i;
781 
782 	/* Setup the Base and Length of the Tx Descriptor Ring */
783 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
784 	    i++, que++) {
785 		struct tx_ring	   *txr = &que->txr;
786 		u64 tdba = txr->tx_paddr;
787 		u32 txctrl = 0;
788 		int j = txr->me;
789 
790 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
791 		    (tdba & 0x00000000ffffffffULL));
792 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
793 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
794 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
795 
796 		/* Setup the HW Tx Head and Tail descriptor pointers */
797 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
798 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
799 
800 		/* Cache the tail address */
801 		txr->tail = IXGBE_TDT(txr->me);
802 
803 		txr->tx_rs_cidx = txr->tx_rs_pidx;
804 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
805 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
806 			txr->tx_rsq[k] = QIDX_INVALID;
807 
808 		/* Disable Head Writeback */
809 		/*
810 		 * Note: for X550 series devices, these registers are actually
811 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
812 		 * fields remain the same.
813 		 */
814 		switch (hw->mac.type) {
815 		case ixgbe_mac_82598EB:
816 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
817 			break;
818 		default:
819 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
820 			break;
821 		}
822 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
823 		switch (hw->mac.type) {
824 		case ixgbe_mac_82598EB:
825 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
826 			break;
827 		default:
828 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
829 			break;
830 		}
831 
832 	}
833 
834 	if (hw->mac.type != ixgbe_mac_82598EB) {
835 		u32 dmatxctl, rttdcs;
836 
837 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
838 		dmatxctl |= IXGBE_DMATXCTL_TE;
839 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
840 		/* Disable arbiter to set MTQC */
841 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
842 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
843 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
844 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
845 		    ixgbe_get_mtqc(adapter->iov_mode));
846 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
847 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
848 	}
849 
850 } /* ixgbe_initialize_transmit_units */
851 
852 /************************************************************************
853  * ixgbe_register
854  ************************************************************************/
855 static void *
856 ixgbe_register(device_t dev)
857 {
858 	return (ixgbe_sctx);
859 } /* ixgbe_register */
860 
861 /************************************************************************
862  * ixgbe_if_attach_pre - Device initialization routine, part 1
863  *
864  *   Called when the driver is being loaded.
865  *   Identifies the type of hardware, initializes the hardware,
866  *   and initializes iflib structures.
867  *
868  *   return 0 on success, positive on failure
869  ************************************************************************/
870 static int
871 ixgbe_if_attach_pre(if_ctx_t ctx)
872 {
873 	struct adapter  *adapter;
874 	device_t        dev;
875 	if_softc_ctx_t  scctx;
876 	struct ixgbe_hw *hw;
877 	int             error = 0;
878 	u32             ctrl_ext;
879 
880 	INIT_DEBUGOUT("ixgbe_attach: begin");
881 
882 	/* Allocate, clear, and link in our adapter structure */
883 	dev = iflib_get_dev(ctx);
884 	adapter = iflib_get_softc(ctx);
885 	adapter->hw.back = adapter;
886 	adapter->ctx = ctx;
887 	adapter->dev = dev;
888 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
889 	adapter->media = iflib_get_media(ctx);
890 	hw = &adapter->hw;
891 
892 	/* Determine hardware revision */
893 	hw->vendor_id = pci_get_vendor(dev);
894 	hw->device_id = pci_get_device(dev);
895 	hw->revision_id = pci_get_revid(dev);
896 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
897 	hw->subsystem_device_id = pci_get_subdevice(dev);
898 
899 	/* Do base PCI setup - map BAR0 */
900 	if (ixgbe_allocate_pci_resources(ctx)) {
901 		device_printf(dev, "Allocation of PCI resources failed\n");
902 		return (ENXIO);
903 	}
904 
905 	/* let hardware know driver is loaded */
906 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
907 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
908 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
909 
910 	/*
911 	 * Initialize the shared code
912 	 */
913 	if (ixgbe_init_shared_code(hw) != 0) {
914 		device_printf(dev, "Unable to initialize the shared code\n");
915 		error = ENXIO;
916 		goto err_pci;
917 	}
918 
919 	if (hw->mbx.ops.init_params)
920 		hw->mbx.ops.init_params(hw);
921 
922 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
923 
924 	if (hw->mac.type != ixgbe_mac_82598EB)
925 		hw->phy.smart_speed = ixgbe_smart_speed;
926 
927 	ixgbe_init_device_features(adapter);
928 
929 	/* Enable WoL (if supported) */
930 	ixgbe_check_wol_support(adapter);
931 
932 	/* Verify adapter fan is still functional (if applicable) */
933 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
934 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
935 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
936 	}
937 
938 	/* Ensure SW/FW semaphore is free */
939 	ixgbe_init_swfw_semaphore(hw);
940 
941 	/* Set an initial default flow control value */
942 	hw->fc.requested_mode = ixgbe_flow_control;
943 
944 	hw->phy.reset_if_overtemp = TRUE;
945 	error = ixgbe_reset_hw(hw);
946 	hw->phy.reset_if_overtemp = FALSE;
947 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
948 		/*
949 		 * No optics in this port, set up
950 		 * so the timer routine will probe
951 		 * for later insertion.
952 		 */
953 		adapter->sfp_probe = TRUE;
954 		error = 0;
955 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
956 		device_printf(dev, "Unsupported SFP+ module detected!\n");
957 		error = EIO;
958 		goto err_pci;
959 	} else if (error) {
960 		device_printf(dev, "Hardware initialization failed\n");
961 		error = EIO;
962 		goto err_pci;
963 	}
964 
965 	/* Make sure we have a good EEPROM before we read from it */
966 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
967 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
968 		error = EIO;
969 		goto err_pci;
970 	}
971 
972 	error = ixgbe_start_hw(hw);
973 	switch (error) {
974 	case IXGBE_ERR_EEPROM_VERSION:
975 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
976 		break;
977 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
978 		device_printf(dev, "Unsupported SFP+ Module\n");
979 		error = EIO;
980 		goto err_pci;
981 	case IXGBE_ERR_SFP_NOT_PRESENT:
982 		device_printf(dev, "No SFP+ Module found\n");
983 		/* falls thru */
984 	default:
985 		break;
986 	}
987 
988 	/* Most of the iflib initialization... */
989 
990 	iflib_set_mac(ctx, hw->mac.addr);
991 	switch (adapter->hw.mac.type) {
992 	case ixgbe_mac_X550:
993 	case ixgbe_mac_X550EM_x:
994 	case ixgbe_mac_X550EM_a:
995 		scctx->isc_rss_table_size = 512;
996 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
997 		break;
998 	default:
999 		scctx->isc_rss_table_size = 128;
1000 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1001 	}
1002 
1003 	/* Allow legacy interrupts */
1004 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1005 
1006 	scctx->isc_txqsizes[0] =
1007 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1008 	    sizeof(u32), DBA_ALIGN),
1009 	scctx->isc_rxqsizes[0] =
1010 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1011 	    DBA_ALIGN);
1012 
1013 	/* XXX */
1014 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1015 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1016 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1017 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1018 	} else {
1019 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1020 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1021 	}
1022 
1023 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1024 
1025 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1026 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1027 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1028 
1029 	scctx->isc_txrx = &ixgbe_txrx;
1030 
1031 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1032 
1033 	return (0);
1034 
1035 err_pci:
1036 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1037 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1038 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1039 	ixgbe_free_pci_resources(ctx);
1040 
1041 	return (error);
1042 } /* ixgbe_if_attach_pre */
1043 
1044  /*********************************************************************
1045  * ixgbe_if_attach_post - Device initialization routine, part 2
1046  *
1047  *   Called during driver load, but after interrupts and
1048  *   resources have been allocated and configured.
1049  *   Sets up some data structures not relevant to iflib.
1050  *
1051  *   return 0 on success, positive on failure
1052  *********************************************************************/
1053 static int
1054 ixgbe_if_attach_post(if_ctx_t ctx)
1055 {
1056 	device_t dev;
1057 	struct adapter  *adapter;
1058 	struct ixgbe_hw *hw;
1059 	int             error = 0;
1060 
1061 	dev = iflib_get_dev(ctx);
1062 	adapter = iflib_get_softc(ctx);
1063 	hw = &adapter->hw;
1064 
1065 
1066 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1067 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1068 		device_printf(dev, "Device does not support legacy interrupts");
1069 		error = ENXIO;
1070 		goto err;
1071 	}
1072 
1073 	/* Allocate multicast array memory. */
1074 	adapter->mta = malloc(sizeof(*adapter->mta) *
1075 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1076 	if (adapter->mta == NULL) {
1077 		device_printf(dev, "Can not allocate multicast setup array\n");
1078 		error = ENOMEM;
1079 		goto err;
1080 	}
1081 
1082 	/* hw.ix defaults init */
1083 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1084 
1085 	/* Enable the optics for 82599 SFP+ fiber */
1086 	ixgbe_enable_tx_laser(hw);
1087 
1088 	/* Enable power to the phy. */
1089 	ixgbe_set_phy_power(hw, TRUE);
1090 
1091 	ixgbe_initialize_iov(adapter);
1092 
1093 	error = ixgbe_setup_interface(ctx);
1094 	if (error) {
1095 		device_printf(dev, "Interface setup failed: %d\n", error);
1096 		goto err;
1097 	}
1098 
1099 	ixgbe_if_update_admin_status(ctx);
1100 
1101 	/* Initialize statistics */
1102 	ixgbe_update_stats_counters(adapter);
1103 	ixgbe_add_hw_stats(adapter);
1104 
1105 	/* Check PCIE slot type/speed/width */
1106 	ixgbe_get_slot_info(adapter);
1107 
1108 	/*
1109 	 * Do time init and sysctl init here, but
1110 	 * only on the first port of a bypass adapter.
1111 	 */
1112 	ixgbe_bypass_init(adapter);
1113 
1114 	/* Set an initial dmac value */
1115 	adapter->dmac = 0;
1116 	/* Set initial advertised speeds (if applicable) */
1117 	adapter->advertise = ixgbe_get_advertise(adapter);
1118 
1119 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1120 		ixgbe_define_iov_schemas(dev, &error);
1121 
1122 	/* Add sysctls */
1123 	ixgbe_add_device_sysctls(ctx);
1124 
1125 	return (0);
1126 err:
1127 	return (error);
1128 } /* ixgbe_if_attach_post */
1129 
1130 /************************************************************************
1131  * ixgbe_check_wol_support
1132  *
1133  *   Checks whether the adapter's ports are capable of
1134  *   Wake On LAN by reading the adapter's NVM.
1135  *
1136  *   Sets each port's hw->wol_enabled value depending
1137  *   on the value read here.
1138  ************************************************************************/
1139 static void
1140 ixgbe_check_wol_support(struct adapter *adapter)
1141 {
1142 	struct ixgbe_hw *hw = &adapter->hw;
1143 	u16             dev_caps = 0;
1144 
1145 	/* Find out WoL support for port */
1146 	adapter->wol_support = hw->wol_enabled = 0;
1147 	ixgbe_get_device_caps(hw, &dev_caps);
1148 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1149 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1150 	     hw->bus.func == 0))
1151 		adapter->wol_support = hw->wol_enabled = 1;
1152 
1153 	/* Save initial wake up filter configuration */
1154 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1155 
1156 	return;
1157 } /* ixgbe_check_wol_support */
1158 
1159 /************************************************************************
1160  * ixgbe_setup_interface
1161  *
1162  *   Setup networking device structure and register an interface.
1163  ************************************************************************/
1164 static int
1165 ixgbe_setup_interface(if_ctx_t ctx)
1166 {
1167 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1168 	struct adapter *adapter = iflib_get_softc(ctx);
1169 
1170 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1171 
1172 	if_setbaudrate(ifp, IF_Gbps(10));
1173 
1174 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1175 
1176 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1177 
1178 	ixgbe_add_media_types(ctx);
1179 
1180 	/* Autoselect media by default */
1181 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1182 
1183 	return (0);
1184 } /* ixgbe_setup_interface */
1185 
1186 /************************************************************************
1187  * ixgbe_if_get_counter
1188  ************************************************************************/
1189 static uint64_t
1190 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1191 {
1192 	struct adapter *adapter = iflib_get_softc(ctx);
1193 	if_t           ifp = iflib_get_ifp(ctx);
1194 
1195 	switch (cnt) {
1196 	case IFCOUNTER_IPACKETS:
1197 		return (adapter->ipackets);
1198 	case IFCOUNTER_OPACKETS:
1199 		return (adapter->opackets);
1200 	case IFCOUNTER_IBYTES:
1201 		return (adapter->ibytes);
1202 	case IFCOUNTER_OBYTES:
1203 		return (adapter->obytes);
1204 	case IFCOUNTER_IMCASTS:
1205 		return (adapter->imcasts);
1206 	case IFCOUNTER_OMCASTS:
1207 		return (adapter->omcasts);
1208 	case IFCOUNTER_COLLISIONS:
1209 		return (0);
1210 	case IFCOUNTER_IQDROPS:
1211 		return (adapter->iqdrops);
1212 	case IFCOUNTER_OQDROPS:
1213 		return (0);
1214 	case IFCOUNTER_IERRORS:
1215 		return (adapter->ierrors);
1216 	default:
1217 		return (if_get_counter_default(ifp, cnt));
1218 	}
1219 } /* ixgbe_if_get_counter */
1220 
1221 /************************************************************************
1222  * ixgbe_if_i2c_req
1223  ************************************************************************/
1224 static int
1225 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1226 {
1227 	struct adapter		*adapter = iflib_get_softc(ctx);
1228 	struct ixgbe_hw 	*hw = &adapter->hw;
1229 	int 			i;
1230 
1231 
1232 	if (hw->phy.ops.read_i2c_byte == NULL)
1233 		return (ENXIO);
1234 	for (i = 0; i < req->len; i++)
1235 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1236 		    req->dev_addr, &req->data[i]);
1237 	return (0);
1238 } /* ixgbe_if_i2c_req */
1239 
1240 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1241  * @ctx: iflib context
1242  * @event: event code to check
1243  *
1244  * Defaults to returning true for unknown events.
1245  *
1246  * @returns true if iflib needs to reinit the interface
1247  */
1248 static bool
1249 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1250 {
1251 	switch (event) {
1252 	case IFLIB_RESTART_VLAN_CONFIG:
1253 		return (false);
1254 	default:
1255 		return (true);
1256 	}
1257 }
1258 
1259 /************************************************************************
1260  * ixgbe_add_media_types
1261  ************************************************************************/
1262 static void
1263 ixgbe_add_media_types(if_ctx_t ctx)
1264 {
1265 	struct adapter  *adapter = iflib_get_softc(ctx);
1266 	struct ixgbe_hw *hw = &adapter->hw;
1267 	device_t        dev = iflib_get_dev(ctx);
1268 	u64             layer;
1269 
1270 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1271 
1272 	/* Media types with matching FreeBSD media defines */
1273 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1274 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1275 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1276 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1277 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1278 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1279 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1280 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1281 
1282 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1283 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1284 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1285 		    NULL);
1286 
1287 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1288 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1289 		if (hw->phy.multispeed_fiber)
1290 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1291 			    NULL);
1292 	}
1293 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1294 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1295 		if (hw->phy.multispeed_fiber)
1296 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1297 			    NULL);
1298 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1299 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1300 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1301 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1302 
1303 #ifdef IFM_ETH_XTYPE
1304 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1305 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1306 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1307 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1308 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1309 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1310 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1311 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1312 #else
1313 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1314 		device_printf(dev, "Media supported: 10GbaseKR\n");
1315 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1316 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1317 	}
1318 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1319 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1320 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1321 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1322 	}
1323 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1324 		device_printf(dev, "Media supported: 1000baseKX\n");
1325 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1326 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1327 	}
1328 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1329 		device_printf(dev, "Media supported: 2500baseKX\n");
1330 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1331 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1332 	}
1333 #endif
1334 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1335 		device_printf(dev, "Media supported: 1000baseBX\n");
1336 
1337 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1338 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1339 		    0, NULL);
1340 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1341 	}
1342 
1343 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1344 } /* ixgbe_add_media_types */
1345 
1346 /************************************************************************
1347  * ixgbe_is_sfp
1348  ************************************************************************/
1349 static inline bool
1350 ixgbe_is_sfp(struct ixgbe_hw *hw)
1351 {
1352 	switch (hw->mac.type) {
1353 	case ixgbe_mac_82598EB:
1354 		if (hw->phy.type == ixgbe_phy_nl)
1355 			return (TRUE);
1356 		return (FALSE);
1357 	case ixgbe_mac_82599EB:
1358 		switch (hw->mac.ops.get_media_type(hw)) {
1359 		case ixgbe_media_type_fiber:
1360 		case ixgbe_media_type_fiber_qsfp:
1361 			return (TRUE);
1362 		default:
1363 			return (FALSE);
1364 		}
1365 	case ixgbe_mac_X550EM_x:
1366 	case ixgbe_mac_X550EM_a:
1367 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1368 			return (TRUE);
1369 		return (FALSE);
1370 	default:
1371 		return (FALSE);
1372 	}
1373 } /* ixgbe_is_sfp */
1374 
1375 /************************************************************************
1376  * ixgbe_config_link
1377  ************************************************************************/
1378 static void
1379 ixgbe_config_link(if_ctx_t ctx)
1380 {
1381 	struct adapter  *adapter = iflib_get_softc(ctx);
1382 	struct ixgbe_hw *hw = &adapter->hw;
1383 	u32             autoneg, err = 0;
1384 	bool            sfp, negotiate;
1385 
1386 	sfp = ixgbe_is_sfp(hw);
1387 
1388 	if (sfp) {
1389 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1390 		iflib_admin_intr_deferred(ctx);
1391 	} else {
1392 		if (hw->mac.ops.check_link)
1393 			err = ixgbe_check_link(hw, &adapter->link_speed,
1394 			    &adapter->link_up, FALSE);
1395 		if (err)
1396 			return;
1397 		autoneg = hw->phy.autoneg_advertised;
1398 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1399 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1400 			    &negotiate);
1401 		if (err)
1402 			return;
1403 		if (hw->mac.ops.setup_link)
1404 			err = hw->mac.ops.setup_link(hw, autoneg,
1405 			    adapter->link_up);
1406 	}
1407 } /* ixgbe_config_link */
1408 
1409 /************************************************************************
1410  * ixgbe_update_stats_counters - Update board statistics counters.
1411  ************************************************************************/
1412 static void
1413 ixgbe_update_stats_counters(struct adapter *adapter)
1414 {
1415 	struct ixgbe_hw       *hw = &adapter->hw;
1416 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1417 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1418 	u32                   lxoffrxc;
1419 	u64                   total_missed_rx = 0;
1420 
1421 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1422 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1423 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1424 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1425 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1426 
1427 	for (int i = 0; i < 16; i++) {
1428 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1429 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1430 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1431 	}
1432 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1433 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1434 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1435 
1436 	/* Hardware workaround, gprc counts missed packets */
1437 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1438 	stats->gprc -= missed_rx;
1439 
1440 	if (hw->mac.type != ixgbe_mac_82598EB) {
1441 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1442 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1443 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1444 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1445 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1446 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1447 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1448 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1449 		stats->lxoffrxc += lxoffrxc;
1450 	} else {
1451 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1452 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1453 		stats->lxoffrxc += lxoffrxc;
1454 		/* 82598 only has a counter in the high register */
1455 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1456 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1457 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1458 	}
1459 
1460 	/*
1461 	 * For watchdog management we need to know if we have been paused
1462 	 * during the last interval, so capture that here.
1463 	*/
1464 	if (lxoffrxc)
1465 		adapter->shared->isc_pause_frames = 1;
1466 
1467 	/*
1468 	 * Workaround: mprc hardware is incorrectly counting
1469 	 * broadcasts, so for now we subtract those.
1470 	 */
1471 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1472 	stats->bprc += bprc;
1473 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1474 	if (hw->mac.type == ixgbe_mac_82598EB)
1475 		stats->mprc -= bprc;
1476 
1477 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1478 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1479 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1480 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1481 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1482 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1483 
1484 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1485 	stats->lxontxc += lxon;
1486 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1487 	stats->lxofftxc += lxoff;
1488 	total = lxon + lxoff;
1489 
1490 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1491 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1492 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1493 	stats->gptc -= total;
1494 	stats->mptc -= total;
1495 	stats->ptc64 -= total;
1496 	stats->gotc -= total * ETHER_MIN_LEN;
1497 
1498 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1499 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1500 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1501 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1502 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1503 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1504 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1505 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1506 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1507 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1508 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1509 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1510 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1511 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1512 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1513 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1514 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1515 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1516 	/* Only read FCOE on 82599 */
1517 	if (hw->mac.type != ixgbe_mac_82598EB) {
1518 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1519 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1520 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1521 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1522 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1523 	}
1524 
1525 	/* Fill out the OS statistics structure */
1526 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1527 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1528 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1529 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1530 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1531 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1532 	IXGBE_SET_COLLISIONS(adapter, 0);
1533 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1534 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1535 } /* ixgbe_update_stats_counters */
1536 
1537 /************************************************************************
1538  * ixgbe_add_hw_stats
1539  *
1540  *   Add sysctl variables, one per statistic, to the system.
1541  ************************************************************************/
1542 static void
1543 ixgbe_add_hw_stats(struct adapter *adapter)
1544 {
1545 	device_t               dev = iflib_get_dev(adapter->ctx);
1546 	struct ix_rx_queue     *rx_que;
1547 	struct ix_tx_queue     *tx_que;
1548 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1549 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1550 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1551 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1552 	struct sysctl_oid      *stat_node, *queue_node;
1553 	struct sysctl_oid_list *stat_list, *queue_list;
1554 	int                    i;
1555 
1556 #define QUEUE_NAME_LEN 32
1557 	char                   namebuf[QUEUE_NAME_LEN];
1558 
1559 	/* Driver Statistics */
1560 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1561 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1562 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1563 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1564 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1565 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1566 
1567 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1568 		struct tx_ring *txr = &tx_que->txr;
1569 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1570 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1571 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1572 		queue_list = SYSCTL_CHILDREN(queue_node);
1573 
1574 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1575 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1576 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1577 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1578 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1579 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1580 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1581 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1582 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1583 		    CTLFLAG_RD, &txr->total_packets,
1584 		    "Queue Packets Transmitted");
1585 	}
1586 
1587 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1588 		struct rx_ring *rxr = &rx_que->rxr;
1589 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1590 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1591 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1592 		queue_list = SYSCTL_CHILDREN(queue_node);
1593 
1594 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1595 		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1596 		    &adapter->rx_queues[i], 0,
1597 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1598 		    "Interrupt Rate");
1599 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1600 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1601 		    "irqs on this queue");
1602 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1603 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1604 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1605 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1606 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1607 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1608 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1609 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1610 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1611 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1612 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1613 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1614 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1615 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1616 	}
1617 
1618 	/* MAC stats get their own sub node */
1619 
1620 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1621 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1622 	stat_list = SYSCTL_CHILDREN(stat_node);
1623 
1624 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1625 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1626 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1627 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1628 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1629 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1630 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1631 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1632 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1633 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1634 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1635 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1636 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1637 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1638 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1639 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1640 
1641 	/* Flow Control stats */
1642 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1643 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1644 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1645 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1646 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1647 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1648 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1649 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1650 
1651 	/* Packet Reception Stats */
1652 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1653 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1654 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1655 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1656 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1657 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1658 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1659 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1660 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1661 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1662 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1663 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1664 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1665 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1666 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1667 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1668 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1669 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1670 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1671 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1672 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1673 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1674 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1675 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1676 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1677 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1678 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1679 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1680 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1681 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1682 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1683 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1684 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1685 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1686 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1687 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1688 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1689 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1690 
1691 	/* Packet Transmission Stats */
1692 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1693 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1694 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1695 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1696 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1697 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1698 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1699 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1700 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1701 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1702 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1703 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1704 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1705 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1706 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1707 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1708 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1709 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1710 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1711 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1712 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1713 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1714 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1715 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1716 } /* ixgbe_add_hw_stats */
1717 
1718 /************************************************************************
1719  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1720  *
1721  *   Retrieves the TDH value from the hardware
1722  ************************************************************************/
1723 static int
1724 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1725 {
1726 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1727 	int            error;
1728 	unsigned int   val;
1729 
1730 	if (!txr)
1731 		return (0);
1732 
1733 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1734 	error = sysctl_handle_int(oidp, &val, 0, req);
1735 	if (error || !req->newptr)
1736 		return error;
1737 
1738 	return (0);
1739 } /* ixgbe_sysctl_tdh_handler */
1740 
1741 /************************************************************************
1742  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1743  *
1744  *   Retrieves the TDT value from the hardware
1745  ************************************************************************/
1746 static int
1747 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1748 {
1749 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1750 	int            error;
1751 	unsigned int   val;
1752 
1753 	if (!txr)
1754 		return (0);
1755 
1756 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1757 	error = sysctl_handle_int(oidp, &val, 0, req);
1758 	if (error || !req->newptr)
1759 		return error;
1760 
1761 	return (0);
1762 } /* ixgbe_sysctl_tdt_handler */
1763 
1764 /************************************************************************
1765  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1766  *
1767  *   Retrieves the RDH value from the hardware
1768  ************************************************************************/
1769 static int
1770 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1771 {
1772 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1773 	int            error;
1774 	unsigned int   val;
1775 
1776 	if (!rxr)
1777 		return (0);
1778 
1779 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1780 	error = sysctl_handle_int(oidp, &val, 0, req);
1781 	if (error || !req->newptr)
1782 		return error;
1783 
1784 	return (0);
1785 } /* ixgbe_sysctl_rdh_handler */
1786 
1787 /************************************************************************
1788  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1789  *
1790  *   Retrieves the RDT value from the hardware
1791  ************************************************************************/
1792 static int
1793 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1794 {
1795 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1796 	int            error;
1797 	unsigned int   val;
1798 
1799 	if (!rxr)
1800 		return (0);
1801 
1802 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1803 	error = sysctl_handle_int(oidp, &val, 0, req);
1804 	if (error || !req->newptr)
1805 		return error;
1806 
1807 	return (0);
1808 } /* ixgbe_sysctl_rdt_handler */
1809 
1810 /************************************************************************
1811  * ixgbe_if_vlan_register
1812  *
1813  *   Run via vlan config EVENT, it enables us to use the
1814  *   HW Filter table since we can get the vlan id. This
1815  *   just creates the entry in the soft version of the
1816  *   VFTA, init will repopulate the real table.
1817  ************************************************************************/
1818 static void
1819 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1820 {
1821 	struct adapter *adapter = iflib_get_softc(ctx);
1822 	u16            index, bit;
1823 
1824 	index = (vtag >> 5) & 0x7F;
1825 	bit = vtag & 0x1F;
1826 	adapter->shadow_vfta[index] |= (1 << bit);
1827 	++adapter->num_vlans;
1828 	ixgbe_setup_vlan_hw_support(ctx);
1829 } /* ixgbe_if_vlan_register */
1830 
1831 /************************************************************************
1832  * ixgbe_if_vlan_unregister
1833  *
1834  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1835  ************************************************************************/
1836 static void
1837 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1838 {
1839 	struct adapter *adapter = iflib_get_softc(ctx);
1840 	u16            index, bit;
1841 
1842 	index = (vtag >> 5) & 0x7F;
1843 	bit = vtag & 0x1F;
1844 	adapter->shadow_vfta[index] &= ~(1 << bit);
1845 	--adapter->num_vlans;
1846 	/* Re-init to load the changes */
1847 	ixgbe_setup_vlan_hw_support(ctx);
1848 } /* ixgbe_if_vlan_unregister */
1849 
1850 /************************************************************************
1851  * ixgbe_setup_vlan_hw_support
1852  ************************************************************************/
1853 static void
1854 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1855 {
1856 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1857 	struct adapter  *adapter = iflib_get_softc(ctx);
1858 	struct ixgbe_hw *hw = &adapter->hw;
1859 	struct rx_ring  *rxr;
1860 	int             i;
1861 	u32             ctrl;
1862 
1863 
1864 	/*
1865 	 * We get here thru init_locked, meaning
1866 	 * a soft reset, this has already cleared
1867 	 * the VFTA and other state, so if there
1868 	 * have been no vlan's registered do nothing.
1869 	 */
1870 	if (adapter->num_vlans == 0)
1871 		return;
1872 
1873 	/* Setup the queues for vlans */
1874 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1875 		for (i = 0; i < adapter->num_rx_queues; i++) {
1876 			rxr = &adapter->rx_queues[i].rxr;
1877 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1878 			if (hw->mac.type != ixgbe_mac_82598EB) {
1879 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1880 				ctrl |= IXGBE_RXDCTL_VME;
1881 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1882 			}
1883 			rxr->vtag_strip = TRUE;
1884 		}
1885 	}
1886 
1887 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1888 		return;
1889 	/*
1890 	 * A soft reset zero's out the VFTA, so
1891 	 * we need to repopulate it now.
1892 	 */
1893 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1894 		if (adapter->shadow_vfta[i] != 0)
1895 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1896 			    adapter->shadow_vfta[i]);
1897 
1898 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1899 	/* Enable the Filter Table if enabled */
1900 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1901 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1902 		ctrl |= IXGBE_VLNCTRL_VFE;
1903 	}
1904 	if (hw->mac.type == ixgbe_mac_82598EB)
1905 		ctrl |= IXGBE_VLNCTRL_VME;
1906 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1907 } /* ixgbe_setup_vlan_hw_support */
1908 
1909 /************************************************************************
1910  * ixgbe_get_slot_info
1911  *
1912  *   Get the width and transaction speed of
1913  *   the slot this adapter is plugged into.
1914  ************************************************************************/
1915 static void
1916 ixgbe_get_slot_info(struct adapter *adapter)
1917 {
1918 	device_t        dev = iflib_get_dev(adapter->ctx);
1919 	struct ixgbe_hw *hw = &adapter->hw;
1920 	int             bus_info_valid = TRUE;
1921 	u32             offset;
1922 	u16             link;
1923 
1924 	/* Some devices are behind an internal bridge */
1925 	switch (hw->device_id) {
1926 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1927 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1928 		goto get_parent_info;
1929 	default:
1930 		break;
1931 	}
1932 
1933 	ixgbe_get_bus_info(hw);
1934 
1935 	/*
1936 	 * Some devices don't use PCI-E, but there is no need
1937 	 * to display "Unknown" for bus speed and width.
1938 	 */
1939 	switch (hw->mac.type) {
1940 	case ixgbe_mac_X550EM_x:
1941 	case ixgbe_mac_X550EM_a:
1942 		return;
1943 	default:
1944 		goto display;
1945 	}
1946 
1947 get_parent_info:
1948 	/*
1949 	 * For the Quad port adapter we need to parse back
1950 	 * up the PCI tree to find the speed of the expansion
1951 	 * slot into which this adapter is plugged. A bit more work.
1952 	 */
1953 	dev = device_get_parent(device_get_parent(dev));
1954 #ifdef IXGBE_DEBUG
1955 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1956 	    pci_get_slot(dev), pci_get_function(dev));
1957 #endif
1958 	dev = device_get_parent(device_get_parent(dev));
1959 #ifdef IXGBE_DEBUG
1960 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1961 	    pci_get_slot(dev), pci_get_function(dev));
1962 #endif
1963 	/* Now get the PCI Express Capabilities offset */
1964 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1965 		/*
1966 		 * Hmm...can't get PCI-Express capabilities.
1967 		 * Falling back to default method.
1968 		 */
1969 		bus_info_valid = FALSE;
1970 		ixgbe_get_bus_info(hw);
1971 		goto display;
1972 	}
1973 	/* ...and read the Link Status Register */
1974 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1975 	ixgbe_set_pci_config_data_generic(hw, link);
1976 
1977 display:
1978 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1979 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1980 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1981 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1982 	     "Unknown"),
1983 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1984 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1985 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1986 	     "Unknown"));
1987 
1988 	if (bus_info_valid) {
1989 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1990 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1991 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1992 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1993 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1994 		}
1995 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1996 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1997 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1998 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1999 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2000 		}
2001 	} else
2002 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2003 
2004 	return;
2005 } /* ixgbe_get_slot_info */
2006 
2007 /************************************************************************
2008  * ixgbe_if_msix_intr_assign
2009  *
2010  *   Setup MSI-X Interrupt resources and handlers
2011  ************************************************************************/
2012 static int
2013 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2014 {
2015 	struct adapter     *adapter = iflib_get_softc(ctx);
2016 	struct ix_rx_queue *rx_que = adapter->rx_queues;
2017 	struct ix_tx_queue *tx_que;
2018 	int                error, rid, vector = 0;
2019 	int                cpu_id = 0;
2020 	char               buf[16];
2021 
2022 	/* Admin Que is vector 0*/
2023 	rid = vector + 1;
2024 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2025 		rid = vector + 1;
2026 
2027 		snprintf(buf, sizeof(buf), "rxq%d", i);
2028 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2029 		    IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2030 
2031 		if (error) {
2032 			device_printf(iflib_get_dev(ctx),
2033 			    "Failed to allocate que int %d err: %d", i, error);
2034 			adapter->num_rx_queues = i + 1;
2035 			goto fail;
2036 		}
2037 
2038 		rx_que->msix = vector;
2039 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2040 			/*
2041 			 * The queue ID is used as the RSS layer bucket ID.
2042 			 * We look up the queue ID -> RSS CPU ID and select
2043 			 * that.
2044 			 */
2045 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2046 		} else {
2047 			/*
2048 			 * Bind the MSI-X vector, and thus the
2049 			 * rings to the corresponding cpu.
2050 			 *
2051 			 * This just happens to match the default RSS
2052 			 * round-robin bucket -> queue -> CPU allocation.
2053 			 */
2054 			if (adapter->num_rx_queues > 1)
2055 				cpu_id = i;
2056 		}
2057 
2058 	}
2059 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2060 		snprintf(buf, sizeof(buf), "txq%d", i);
2061 		tx_que = &adapter->tx_queues[i];
2062 		tx_que->msix = i % adapter->num_rx_queues;
2063 		iflib_softirq_alloc_generic(ctx,
2064 		    &adapter->rx_queues[tx_que->msix].que_irq,
2065 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2066 	}
2067 	rid = vector + 1;
2068 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2069 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2070 	if (error) {
2071 		device_printf(iflib_get_dev(ctx),
2072 		    "Failed to register admin handler");
2073 		return (error);
2074 	}
2075 
2076 	adapter->vector = vector;
2077 
2078 	return (0);
2079 fail:
2080 	iflib_irq_free(ctx, &adapter->irq);
2081 	rx_que = adapter->rx_queues;
2082 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2083 		iflib_irq_free(ctx, &rx_que->que_irq);
2084 
2085 	return (error);
2086 } /* ixgbe_if_msix_intr_assign */
2087 
2088 /*********************************************************************
2089  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2090  **********************************************************************/
2091 static int
2092 ixgbe_msix_que(void *arg)
2093 {
2094 	struct ix_rx_queue *que = arg;
2095 	struct adapter     *adapter = que->adapter;
2096 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2097 
2098 	/* Protect against spurious interrupts */
2099 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2100 		return (FILTER_HANDLED);
2101 
2102 	ixgbe_disable_queue(adapter, que->msix);
2103 	++que->irqs;
2104 
2105 	return (FILTER_SCHEDULE_THREAD);
2106 } /* ixgbe_msix_que */
2107 
2108 /************************************************************************
2109  * ixgbe_media_status - Media Ioctl callback
2110  *
2111  *   Called whenever the user queries the status of
2112  *   the interface using ifconfig.
2113  ************************************************************************/
2114 static void
2115 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2116 {
2117 	struct adapter  *adapter = iflib_get_softc(ctx);
2118 	struct ixgbe_hw *hw = &adapter->hw;
2119 	int             layer;
2120 
2121 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2122 
2123 	ifmr->ifm_status = IFM_AVALID;
2124 	ifmr->ifm_active = IFM_ETHER;
2125 
2126 	if (!adapter->link_active)
2127 		return;
2128 
2129 	ifmr->ifm_status |= IFM_ACTIVE;
2130 	layer = adapter->phy_layer;
2131 
2132 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2133 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2134 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2135 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2136 		switch (adapter->link_speed) {
2137 		case IXGBE_LINK_SPEED_10GB_FULL:
2138 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2139 			break;
2140 		case IXGBE_LINK_SPEED_1GB_FULL:
2141 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2142 			break;
2143 		case IXGBE_LINK_SPEED_100_FULL:
2144 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2145 			break;
2146 		case IXGBE_LINK_SPEED_10_FULL:
2147 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2148 			break;
2149 		}
2150 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2151 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2152 		switch (adapter->link_speed) {
2153 		case IXGBE_LINK_SPEED_10GB_FULL:
2154 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2155 			break;
2156 		}
2157 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2158 		switch (adapter->link_speed) {
2159 		case IXGBE_LINK_SPEED_10GB_FULL:
2160 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2161 			break;
2162 		case IXGBE_LINK_SPEED_1GB_FULL:
2163 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2164 			break;
2165 		}
2166 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2167 		switch (adapter->link_speed) {
2168 		case IXGBE_LINK_SPEED_10GB_FULL:
2169 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2170 			break;
2171 		case IXGBE_LINK_SPEED_1GB_FULL:
2172 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2173 			break;
2174 		}
2175 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2176 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2177 		switch (adapter->link_speed) {
2178 		case IXGBE_LINK_SPEED_10GB_FULL:
2179 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2180 			break;
2181 		case IXGBE_LINK_SPEED_1GB_FULL:
2182 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2183 			break;
2184 		}
2185 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2186 		switch (adapter->link_speed) {
2187 		case IXGBE_LINK_SPEED_10GB_FULL:
2188 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2189 			break;
2190 		}
2191 	/*
2192 	 * XXX: These need to use the proper media types once
2193 	 * they're added.
2194 	 */
2195 #ifndef IFM_ETH_XTYPE
2196 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2197 		switch (adapter->link_speed) {
2198 		case IXGBE_LINK_SPEED_10GB_FULL:
2199 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2200 			break;
2201 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2202 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2203 			break;
2204 		case IXGBE_LINK_SPEED_1GB_FULL:
2205 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2206 			break;
2207 		}
2208 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2209 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2210 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2211 		switch (adapter->link_speed) {
2212 		case IXGBE_LINK_SPEED_10GB_FULL:
2213 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2214 			break;
2215 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2216 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2217 			break;
2218 		case IXGBE_LINK_SPEED_1GB_FULL:
2219 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2220 			break;
2221 		}
2222 #else
2223 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2224 		switch (adapter->link_speed) {
2225 		case IXGBE_LINK_SPEED_10GB_FULL:
2226 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2227 			break;
2228 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2229 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2230 			break;
2231 		case IXGBE_LINK_SPEED_1GB_FULL:
2232 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2233 			break;
2234 		}
2235 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2236 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2237 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2238 		switch (adapter->link_speed) {
2239 		case IXGBE_LINK_SPEED_10GB_FULL:
2240 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2241 			break;
2242 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2243 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2244 			break;
2245 		case IXGBE_LINK_SPEED_1GB_FULL:
2246 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2247 			break;
2248 		}
2249 #endif
2250 
2251 	/* If nothing is recognized... */
2252 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2253 		ifmr->ifm_active |= IFM_UNKNOWN;
2254 
2255 	/* Display current flow control setting used on link */
2256 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2257 	    hw->fc.current_mode == ixgbe_fc_full)
2258 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2259 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2260 	    hw->fc.current_mode == ixgbe_fc_full)
2261 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2262 } /* ixgbe_media_status */
2263 
2264 /************************************************************************
2265  * ixgbe_media_change - Media Ioctl callback
2266  *
2267  *   Called when the user changes speed/duplex using
2268  *   media/mediopt option with ifconfig.
2269  ************************************************************************/
2270 static int
2271 ixgbe_if_media_change(if_ctx_t ctx)
2272 {
2273 	struct adapter   *adapter = iflib_get_softc(ctx);
2274 	struct ifmedia   *ifm = iflib_get_media(ctx);
2275 	struct ixgbe_hw  *hw = &adapter->hw;
2276 	ixgbe_link_speed speed = 0;
2277 
2278 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2279 
2280 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2281 		return (EINVAL);
2282 
2283 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2284 		return (EPERM);
2285 
2286 	/*
2287 	 * We don't actually need to check against the supported
2288 	 * media types of the adapter; ifmedia will take care of
2289 	 * that for us.
2290 	 */
2291 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2292 	case IFM_AUTO:
2293 	case IFM_10G_T:
2294 		speed |= IXGBE_LINK_SPEED_100_FULL;
2295 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2296 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2297 		break;
2298 	case IFM_10G_LRM:
2299 	case IFM_10G_LR:
2300 #ifndef IFM_ETH_XTYPE
2301 	case IFM_10G_SR: /* KR, too */
2302 	case IFM_10G_CX4: /* KX4 */
2303 #else
2304 	case IFM_10G_KR:
2305 	case IFM_10G_KX4:
2306 #endif
2307 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2308 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2309 		break;
2310 #ifndef IFM_ETH_XTYPE
2311 	case IFM_1000_CX: /* KX */
2312 #else
2313 	case IFM_1000_KX:
2314 #endif
2315 	case IFM_1000_LX:
2316 	case IFM_1000_SX:
2317 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2318 		break;
2319 	case IFM_1000_T:
2320 		speed |= IXGBE_LINK_SPEED_100_FULL;
2321 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2322 		break;
2323 	case IFM_10G_TWINAX:
2324 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2325 		break;
2326 	case IFM_100_TX:
2327 		speed |= IXGBE_LINK_SPEED_100_FULL;
2328 		break;
2329 	case IFM_10_T:
2330 		speed |= IXGBE_LINK_SPEED_10_FULL;
2331 		break;
2332 	default:
2333 		goto invalid;
2334 	}
2335 
2336 	hw->mac.autotry_restart = TRUE;
2337 	hw->mac.ops.setup_link(hw, speed, TRUE);
2338 	adapter->advertise =
2339 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2340 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2341 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2342 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2343 
2344 	return (0);
2345 
2346 invalid:
2347 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2348 
2349 	return (EINVAL);
2350 } /* ixgbe_if_media_change */
2351 
2352 /************************************************************************
2353  * ixgbe_set_promisc
2354  ************************************************************************/
2355 static int
2356 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2357 {
2358 	struct adapter *adapter = iflib_get_softc(ctx);
2359 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2360 	u32            rctl;
2361 	int            mcnt = 0;
2362 
2363 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2364 	rctl &= (~IXGBE_FCTRL_UPE);
2365 	if (ifp->if_flags & IFF_ALLMULTI)
2366 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2367 	else {
2368 		mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2369 	}
2370 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2371 		rctl &= (~IXGBE_FCTRL_MPE);
2372 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2373 
2374 	if (ifp->if_flags & IFF_PROMISC) {
2375 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2376 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2377 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2378 		rctl |= IXGBE_FCTRL_MPE;
2379 		rctl &= ~IXGBE_FCTRL_UPE;
2380 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2381 	}
2382 	return (0);
2383 } /* ixgbe_if_promisc_set */
2384 
2385 /************************************************************************
2386  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2387  ************************************************************************/
2388 static int
2389 ixgbe_msix_link(void *arg)
2390 {
2391 	struct adapter  *adapter = arg;
2392 	struct ixgbe_hw *hw = &adapter->hw;
2393 	u32             eicr, eicr_mask;
2394 	s32             retval;
2395 
2396 	++adapter->link_irq;
2397 
2398 	/* Pause other interrupts */
2399 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2400 
2401 	/* First get the cause */
2402 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2403 	/* Be sure the queue bits are not cleared */
2404 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2405 	/* Clear interrupt with write */
2406 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2407 
2408 	/* Link status change */
2409 	if (eicr & IXGBE_EICR_LSC) {
2410 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2411 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2412 	}
2413 
2414 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2415 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2416 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2417 			/* This is probably overkill :) */
2418 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2419 				return (FILTER_HANDLED);
2420 			/* Disable the interrupt */
2421 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2422 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2423 		} else
2424 			if (eicr & IXGBE_EICR_ECC) {
2425 				device_printf(iflib_get_dev(adapter->ctx),
2426 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2427 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2428 			}
2429 
2430 		/* Check for over temp condition */
2431 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2432 			switch (adapter->hw.mac.type) {
2433 			case ixgbe_mac_X550EM_a:
2434 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2435 					break;
2436 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2437 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2438 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2439 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2440 				retval = hw->phy.ops.check_overtemp(hw);
2441 				if (retval != IXGBE_ERR_OVERTEMP)
2442 					break;
2443 				device_printf(iflib_get_dev(adapter->ctx),
2444 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2445 				device_printf(iflib_get_dev(adapter->ctx),
2446 				    "System shutdown required!\n");
2447 				break;
2448 			default:
2449 				if (!(eicr & IXGBE_EICR_TS))
2450 					break;
2451 				retval = hw->phy.ops.check_overtemp(hw);
2452 				if (retval != IXGBE_ERR_OVERTEMP)
2453 					break;
2454 				device_printf(iflib_get_dev(adapter->ctx),
2455 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2456 				device_printf(iflib_get_dev(adapter->ctx),
2457 				    "System shutdown required!\n");
2458 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2459 				break;
2460 			}
2461 		}
2462 
2463 		/* Check for VF message */
2464 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2465 		    (eicr & IXGBE_EICR_MAILBOX))
2466 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2467 	}
2468 
2469 	if (ixgbe_is_sfp(hw)) {
2470 		/* Pluggable optics-related interrupt */
2471 		if (hw->mac.type >= ixgbe_mac_X540)
2472 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2473 		else
2474 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2475 
2476 		if (eicr & eicr_mask) {
2477 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2478 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2479 		}
2480 
2481 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2482 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2483 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2484 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2485 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2486 		}
2487 	}
2488 
2489 	/* Check for fan failure */
2490 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2491 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2492 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2493 	}
2494 
2495 	/* External PHY interrupt */
2496 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2497 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2498 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2499 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2500 	}
2501 
2502 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2503 } /* ixgbe_msix_link */
2504 
2505 /************************************************************************
2506  * ixgbe_sysctl_interrupt_rate_handler
2507  ************************************************************************/
2508 static int
2509 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2510 {
2511 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2512 	int                error;
2513 	unsigned int       reg, usec, rate;
2514 
2515 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2516 	usec = ((reg & 0x0FF8) >> 3);
2517 	if (usec > 0)
2518 		rate = 500000 / usec;
2519 	else
2520 		rate = 0;
2521 	error = sysctl_handle_int(oidp, &rate, 0, req);
2522 	if (error || !req->newptr)
2523 		return error;
2524 	reg &= ~0xfff; /* default, no limitation */
2525 	ixgbe_max_interrupt_rate = 0;
2526 	if (rate > 0 && rate < 500000) {
2527 		if (rate < 1000)
2528 			rate = 1000;
2529 		ixgbe_max_interrupt_rate = rate;
2530 		reg |= ((4000000/rate) & 0xff8);
2531 	}
2532 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2533 
2534 	return (0);
2535 } /* ixgbe_sysctl_interrupt_rate_handler */
2536 
2537 /************************************************************************
2538  * ixgbe_add_device_sysctls
2539  ************************************************************************/
2540 static void
2541 ixgbe_add_device_sysctls(if_ctx_t ctx)
2542 {
2543 	struct adapter         *adapter = iflib_get_softc(ctx);
2544 	device_t               dev = iflib_get_dev(ctx);
2545 	struct ixgbe_hw        *hw = &adapter->hw;
2546 	struct sysctl_oid_list *child;
2547 	struct sysctl_ctx_list *ctx_list;
2548 
2549 	ctx_list = device_get_sysctl_ctx(dev);
2550 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2551 
2552 	/* Sysctls for all devices */
2553 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2554 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2555 	    adapter, 0, ixgbe_sysctl_flowcntl, "I",
2556 	    IXGBE_SYSCTL_DESC_SET_FC);
2557 
2558 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2559 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2560 	    adapter, 0, ixgbe_sysctl_advertise, "I",
2561 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2562 
2563 #ifdef IXGBE_DEBUG
2564 	/* testing sysctls (for all devices) */
2565 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2566 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2567 	    adapter, 0, ixgbe_sysctl_power_state,
2568 	    "I", "PCI Power State");
2569 
2570 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2571 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2572 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2573 #endif
2574 	/* for X550 series devices */
2575 	if (hw->mac.type >= ixgbe_mac_X550)
2576 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2577 		    CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2578 		    adapter, 0, ixgbe_sysctl_dmac,
2579 		    "I", "DMA Coalesce");
2580 
2581 	/* for WoL-capable devices */
2582 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2583 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2584 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2585 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2586 
2587 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2588 		    CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2589 		    adapter, 0, ixgbe_sysctl_wufc,
2590 		    "I", "Enable/Disable Wake Up Filters");
2591 	}
2592 
2593 	/* for X552/X557-AT devices */
2594 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2595 		struct sysctl_oid *phy_node;
2596 		struct sysctl_oid_list *phy_list;
2597 
2598 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2599 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2600 		phy_list = SYSCTL_CHILDREN(phy_node);
2601 
2602 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2603 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2604 		    adapter, 0, ixgbe_sysctl_phy_temp,
2605 		    "I", "Current External PHY Temperature (Celsius)");
2606 
2607 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2608 		    "overtemp_occurred",
2609 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2610 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2611 		    "External PHY High Temperature Event Occurred");
2612 	}
2613 
2614 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2615 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2616 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2617 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2618 	}
2619 } /* ixgbe_add_device_sysctls */
2620 
2621 /************************************************************************
2622  * ixgbe_allocate_pci_resources
2623  ************************************************************************/
2624 static int
2625 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2626 {
2627 	struct adapter *adapter = iflib_get_softc(ctx);
2628 	device_t        dev = iflib_get_dev(ctx);
2629 	int             rid;
2630 
2631 	rid = PCIR_BAR(0);
2632 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2633 	    RF_ACTIVE);
2634 
2635 	if (!(adapter->pci_mem)) {
2636 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2637 		return (ENXIO);
2638 	}
2639 
2640 	/* Save bus_space values for READ/WRITE_REG macros */
2641 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2642 	adapter->osdep.mem_bus_space_handle =
2643 	    rman_get_bushandle(adapter->pci_mem);
2644 	/* Set hw values for shared code */
2645 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2646 
2647 	return (0);
2648 } /* ixgbe_allocate_pci_resources */
2649 
2650 /************************************************************************
2651  * ixgbe_detach - Device removal routine
2652  *
2653  *   Called when the driver is being removed.
2654  *   Stops the adapter and deallocates all the resources
2655  *   that were allocated for driver operation.
2656  *
2657  *   return 0 on success, positive on failure
2658  ************************************************************************/
2659 static int
2660 ixgbe_if_detach(if_ctx_t ctx)
2661 {
2662 	struct adapter *adapter = iflib_get_softc(ctx);
2663 	device_t       dev = iflib_get_dev(ctx);
2664 	u32            ctrl_ext;
2665 
2666 	INIT_DEBUGOUT("ixgbe_detach: begin");
2667 
2668 	if (ixgbe_pci_iov_detach(dev) != 0) {
2669 		device_printf(dev, "SR-IOV in use; detach first.\n");
2670 		return (EBUSY);
2671 	}
2672 
2673 	ixgbe_setup_low_power_mode(ctx);
2674 
2675 	/* let hardware know driver is unloading */
2676 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2677 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2678 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2679 
2680 	ixgbe_free_pci_resources(ctx);
2681 	free(adapter->mta, M_IXGBE);
2682 
2683 	return (0);
2684 } /* ixgbe_if_detach */
2685 
2686 /************************************************************************
2687  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2688  *
2689  *   Prepare the adapter/port for LPLU and/or WoL
2690  ************************************************************************/
2691 static int
2692 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2693 {
2694 	struct adapter  *adapter = iflib_get_softc(ctx);
2695 	struct ixgbe_hw *hw = &adapter->hw;
2696 	device_t        dev = iflib_get_dev(ctx);
2697 	s32             error = 0;
2698 
2699 	if (!hw->wol_enabled)
2700 		ixgbe_set_phy_power(hw, FALSE);
2701 
2702 	/* Limit power management flow to X550EM baseT */
2703 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2704 	    hw->phy.ops.enter_lplu) {
2705 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2706 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2707 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2708 
2709 		/*
2710 		 * Clear Wake Up Status register to prevent any previous wakeup
2711 		 * events from waking us up immediately after we suspend.
2712 		 */
2713 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2714 
2715 		/*
2716 		 * Program the Wakeup Filter Control register with user filter
2717 		 * settings
2718 		 */
2719 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2720 
2721 		/* Enable wakeups and power management in Wakeup Control */
2722 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2723 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2724 
2725 		/* X550EM baseT adapters need a special LPLU flow */
2726 		hw->phy.reset_disable = TRUE;
2727 		ixgbe_if_stop(ctx);
2728 		error = hw->phy.ops.enter_lplu(hw);
2729 		if (error)
2730 			device_printf(dev, "Error entering LPLU: %d\n", error);
2731 		hw->phy.reset_disable = FALSE;
2732 	} else {
2733 		/* Just stop for other adapters */
2734 		ixgbe_if_stop(ctx);
2735 	}
2736 
2737 	return error;
2738 } /* ixgbe_setup_low_power_mode */
2739 
2740 /************************************************************************
2741  * ixgbe_shutdown - Shutdown entry point
2742  ************************************************************************/
2743 static int
2744 ixgbe_if_shutdown(if_ctx_t ctx)
2745 {
2746 	int error = 0;
2747 
2748 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2749 
2750 	error = ixgbe_setup_low_power_mode(ctx);
2751 
2752 	return (error);
2753 } /* ixgbe_if_shutdown */
2754 
2755 /************************************************************************
2756  * ixgbe_suspend
2757  *
2758  *   From D0 to D3
2759  ************************************************************************/
2760 static int
2761 ixgbe_if_suspend(if_ctx_t ctx)
2762 {
2763 	int error = 0;
2764 
2765 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2766 
2767 	error = ixgbe_setup_low_power_mode(ctx);
2768 
2769 	return (error);
2770 } /* ixgbe_if_suspend */
2771 
2772 /************************************************************************
2773  * ixgbe_resume
2774  *
2775  *   From D3 to D0
2776  ************************************************************************/
2777 static int
2778 ixgbe_if_resume(if_ctx_t ctx)
2779 {
2780 	struct adapter  *adapter = iflib_get_softc(ctx);
2781 	device_t        dev = iflib_get_dev(ctx);
2782 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2783 	struct ixgbe_hw *hw = &adapter->hw;
2784 	u32             wus;
2785 
2786 	INIT_DEBUGOUT("ixgbe_resume: begin");
2787 
2788 	/* Read & clear WUS register */
2789 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2790 	if (wus)
2791 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2792 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2793 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2794 	/* And clear WUFC until next low-power transition */
2795 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2796 
2797 	/*
2798 	 * Required after D3->D0 transition;
2799 	 * will re-advertise all previous advertised speeds
2800 	 */
2801 	if (ifp->if_flags & IFF_UP)
2802 		ixgbe_if_init(ctx);
2803 
2804 	return (0);
2805 } /* ixgbe_if_resume */
2806 
2807 /************************************************************************
2808  * ixgbe_if_mtu_set - Ioctl mtu entry point
2809  *
2810  *   Return 0 on success, EINVAL on failure
2811  ************************************************************************/
2812 static int
2813 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2814 {
2815 	struct adapter *adapter = iflib_get_softc(ctx);
2816 	int error = 0;
2817 
2818 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2819 
2820 	if (mtu > IXGBE_MAX_MTU) {
2821 		error = EINVAL;
2822 	} else {
2823 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2824 	}
2825 
2826 	return error;
2827 } /* ixgbe_if_mtu_set */
2828 
2829 /************************************************************************
2830  * ixgbe_if_crcstrip_set
2831  ************************************************************************/
2832 static void
2833 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2834 {
2835 	struct adapter *sc = iflib_get_softc(ctx);
2836 	struct ixgbe_hw *hw = &sc->hw;
2837 	/* crc stripping is set in two places:
2838 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2839 	 * IXGBE_RDRXCTL (set by the original driver in
2840 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2841 	 *	We disable the setting when netmap is compiled in).
2842 	 * We update the values here, but also in ixgbe.c because
2843 	 * init_locked sometimes is called outside our control.
2844 	 */
2845 	uint32_t hl, rxc;
2846 
2847 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2848 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2849 #ifdef NETMAP
2850 	if (netmap_verbose)
2851 		D("%s read  HLREG 0x%x rxc 0x%x",
2852 			onoff ? "enter" : "exit", hl, rxc);
2853 #endif
2854 	/* hw requirements ... */
2855 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2856 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2857 	if (onoff && !crcstrip) {
2858 		/* keep the crc. Fast rx */
2859 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2860 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2861 	} else {
2862 		/* reset default mode */
2863 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2864 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2865 	}
2866 #ifdef NETMAP
2867 	if (netmap_verbose)
2868 		D("%s write HLREG 0x%x rxc 0x%x",
2869 			onoff ? "enter" : "exit", hl, rxc);
2870 #endif
2871 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2872 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2873 } /* ixgbe_if_crcstrip_set */
2874 
2875 /*********************************************************************
2876  * ixgbe_if_init - Init entry point
2877  *
2878  *   Used in two ways: It is used by the stack as an init
2879  *   entry point in network interface structure. It is also
2880  *   used by the driver as a hw/sw initialization routine to
2881  *   get to a consistent state.
2882  *
2883  *   Return 0 on success, positive on failure
2884  **********************************************************************/
2885 void
2886 ixgbe_if_init(if_ctx_t ctx)
2887 {
2888 	struct adapter     *adapter = iflib_get_softc(ctx);
2889 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2890 	device_t           dev = iflib_get_dev(ctx);
2891 	struct ixgbe_hw *hw = &adapter->hw;
2892 	struct ix_rx_queue *rx_que;
2893 	struct ix_tx_queue *tx_que;
2894 	u32             txdctl, mhadd;
2895 	u32             rxdctl, rxctrl;
2896 	u32             ctrl_ext;
2897 
2898 	int             i, j, err;
2899 
2900 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2901 
2902 	/* Queue indices may change with IOV mode */
2903 	ixgbe_align_all_queue_indices(adapter);
2904 
2905 	/* reprogram the RAR[0] in case user changed it. */
2906 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2907 
2908 	/* Get the latest mac address, User can use a LAA */
2909 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2910 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2911 	hw->addr_ctrl.rar_used_count = 1;
2912 
2913 	ixgbe_init_hw(hw);
2914 
2915 	ixgbe_initialize_iov(adapter);
2916 
2917 	ixgbe_initialize_transmit_units(ctx);
2918 
2919 	/* Setup Multicast table */
2920 	ixgbe_if_multi_set(ctx);
2921 
2922 	/* Determine the correct mbuf pool, based on frame size */
2923 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2924 
2925 	/* Configure RX settings */
2926 	ixgbe_initialize_receive_units(ctx);
2927 
2928 	/*
2929 	 * Initialize variable holding task enqueue requests
2930 	 * from MSI-X interrupts
2931 	 */
2932 	adapter->task_requests = 0;
2933 
2934 	/* Enable SDP & MSI-X interrupts based on adapter */
2935 	ixgbe_config_gpie(adapter);
2936 
2937 	/* Set MTU size */
2938 	if (ifp->if_mtu > ETHERMTU) {
2939 		/* aka IXGBE_MAXFRS on 82599 and newer */
2940 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2941 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2942 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2943 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2944 	}
2945 
2946 	/* Now enable all the queues */
2947 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2948 		struct tx_ring *txr = &tx_que->txr;
2949 
2950 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2951 		txdctl |= IXGBE_TXDCTL_ENABLE;
2952 		/* Set WTHRESH to 8, burst writeback */
2953 		txdctl |= (8 << 16);
2954 		/*
2955 		 * When the internal queue falls below PTHRESH (32),
2956 		 * start prefetching as long as there are at least
2957 		 * HTHRESH (1) buffers ready. The values are taken
2958 		 * from the Intel linux driver 3.8.21.
2959 		 * Prefetching enables tx line rate even with 1 queue.
2960 		 */
2961 		txdctl |= (32 << 0) | (1 << 8);
2962 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2963 	}
2964 
2965 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2966 		struct rx_ring *rxr = &rx_que->rxr;
2967 
2968 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2969 		if (hw->mac.type == ixgbe_mac_82598EB) {
2970 			/*
2971 			 * PTHRESH = 21
2972 			 * HTHRESH = 4
2973 			 * WTHRESH = 8
2974 			 */
2975 			rxdctl &= ~0x3FFFFF;
2976 			rxdctl |= 0x080420;
2977 		}
2978 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2979 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2980 		for (j = 0; j < 10; j++) {
2981 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2982 			    IXGBE_RXDCTL_ENABLE)
2983 				break;
2984 			else
2985 				msec_delay(1);
2986 		}
2987 		wmb();
2988 	}
2989 
2990 	/* Enable Receive engine */
2991 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2992 	if (hw->mac.type == ixgbe_mac_82598EB)
2993 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2994 	rxctrl |= IXGBE_RXCTRL_RXEN;
2995 	ixgbe_enable_rx_dma(hw, rxctrl);
2996 
2997 	/* Set up MSI/MSI-X routing */
2998 	if (ixgbe_enable_msix)  {
2999 		ixgbe_configure_ivars(adapter);
3000 		/* Set up auto-mask */
3001 		if (hw->mac.type == ixgbe_mac_82598EB)
3002 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3003 		else {
3004 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3005 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3006 		}
3007 	} else {  /* Simple settings for Legacy/MSI */
3008 		ixgbe_set_ivar(adapter, 0, 0, 0);
3009 		ixgbe_set_ivar(adapter, 0, 0, 1);
3010 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3011 	}
3012 
3013 	ixgbe_init_fdir(adapter);
3014 
3015 	/*
3016 	 * Check on any SFP devices that
3017 	 * need to be kick-started
3018 	 */
3019 	if (hw->phy.type == ixgbe_phy_none) {
3020 		err = hw->phy.ops.identify(hw);
3021 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3022 			device_printf(dev,
3023 			    "Unsupported SFP+ module type was detected.\n");
3024 			return;
3025 		}
3026 	}
3027 
3028 	/* Set moderation on the Link interrupt */
3029 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3030 
3031 	/* Enable power to the phy. */
3032 	ixgbe_set_phy_power(hw, TRUE);
3033 
3034 	/* Config/Enable Link */
3035 	ixgbe_config_link(ctx);
3036 
3037 	/* Hardware Packet Buffer & Flow Control setup */
3038 	ixgbe_config_delay_values(adapter);
3039 
3040 	/* Initialize the FC settings */
3041 	ixgbe_start_hw(hw);
3042 
3043 	/* Set up VLAN support and filter */
3044 	ixgbe_setup_vlan_hw_support(ctx);
3045 
3046 	/* Setup DMA Coalescing */
3047 	ixgbe_config_dmac(adapter);
3048 
3049 	/* And now turn on interrupts */
3050 	ixgbe_if_enable_intr(ctx);
3051 
3052 	/* Enable the use of the MBX by the VF's */
3053 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3054 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3055 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3056 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3057 	}
3058 
3059 } /* ixgbe_init_locked */
3060 
3061 /************************************************************************
3062  * ixgbe_set_ivar
3063  *
3064  *   Setup the correct IVAR register for a particular MSI-X interrupt
3065  *     (yes this is all very magic and confusing :)
3066  *    - entry is the register array entry
3067  *    - vector is the MSI-X vector for this queue
3068  *    - type is RX/TX/MISC
3069  ************************************************************************/
3070 static void
3071 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3072 {
3073 	struct ixgbe_hw *hw = &adapter->hw;
3074 	u32 ivar, index;
3075 
3076 	vector |= IXGBE_IVAR_ALLOC_VAL;
3077 
3078 	switch (hw->mac.type) {
3079 	case ixgbe_mac_82598EB:
3080 		if (type == -1)
3081 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3082 		else
3083 			entry += (type * 64);
3084 		index = (entry >> 2) & 0x1F;
3085 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3086 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3087 		ivar |= (vector << (8 * (entry & 0x3)));
3088 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3089 		break;
3090 	case ixgbe_mac_82599EB:
3091 	case ixgbe_mac_X540:
3092 	case ixgbe_mac_X550:
3093 	case ixgbe_mac_X550EM_x:
3094 	case ixgbe_mac_X550EM_a:
3095 		if (type == -1) { /* MISC IVAR */
3096 			index = (entry & 1) * 8;
3097 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3098 			ivar &= ~(0xFF << index);
3099 			ivar |= (vector << index);
3100 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3101 		} else {          /* RX/TX IVARS */
3102 			index = (16 * (entry & 1)) + (8 * type);
3103 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3104 			ivar &= ~(0xFF << index);
3105 			ivar |= (vector << index);
3106 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3107 		}
3108 	default:
3109 		break;
3110 	}
3111 } /* ixgbe_set_ivar */
3112 
3113 /************************************************************************
3114  * ixgbe_configure_ivars
3115  ************************************************************************/
3116 static void
3117 ixgbe_configure_ivars(struct adapter *adapter)
3118 {
3119 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3120 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3121 	u32                newitr;
3122 
3123 	if (ixgbe_max_interrupt_rate > 0)
3124 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3125 	else {
3126 		/*
3127 		 * Disable DMA coalescing if interrupt moderation is
3128 		 * disabled.
3129 		 */
3130 		adapter->dmac = 0;
3131 		newitr = 0;
3132 	}
3133 
3134 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3135 		struct rx_ring *rxr = &rx_que->rxr;
3136 
3137 		/* First the RX queue entry */
3138 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3139 
3140 		/* Set an Initial EITR value */
3141 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3142 	}
3143 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3144 		struct tx_ring *txr = &tx_que->txr;
3145 
3146 		/* ... and the TX */
3147 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3148 	}
3149 	/* For the Link interrupt */
3150 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3151 } /* ixgbe_configure_ivars */
3152 
3153 /************************************************************************
3154  * ixgbe_config_gpie
3155  ************************************************************************/
3156 static void
3157 ixgbe_config_gpie(struct adapter *adapter)
3158 {
3159 	struct ixgbe_hw *hw = &adapter->hw;
3160 	u32             gpie;
3161 
3162 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3163 
3164 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3165 		/* Enable Enhanced MSI-X mode */
3166 		gpie |= IXGBE_GPIE_MSIX_MODE
3167 		     |  IXGBE_GPIE_EIAME
3168 		     |  IXGBE_GPIE_PBA_SUPPORT
3169 		     |  IXGBE_GPIE_OCD;
3170 	}
3171 
3172 	/* Fan Failure Interrupt */
3173 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3174 		gpie |= IXGBE_SDP1_GPIEN;
3175 
3176 	/* Thermal Sensor Interrupt */
3177 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3178 		gpie |= IXGBE_SDP0_GPIEN_X540;
3179 
3180 	/* Link detection */
3181 	switch (hw->mac.type) {
3182 	case ixgbe_mac_82599EB:
3183 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3184 		break;
3185 	case ixgbe_mac_X550EM_x:
3186 	case ixgbe_mac_X550EM_a:
3187 		gpie |= IXGBE_SDP0_GPIEN_X540;
3188 		break;
3189 	default:
3190 		break;
3191 	}
3192 
3193 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3194 
3195 } /* ixgbe_config_gpie */
3196 
3197 /************************************************************************
3198  * ixgbe_config_delay_values
3199  *
3200  *   Requires adapter->max_frame_size to be set.
3201  ************************************************************************/
3202 static void
3203 ixgbe_config_delay_values(struct adapter *adapter)
3204 {
3205 	struct ixgbe_hw *hw = &adapter->hw;
3206 	u32             rxpb, frame, size, tmp;
3207 
3208 	frame = adapter->max_frame_size;
3209 
3210 	/* Calculate High Water */
3211 	switch (hw->mac.type) {
3212 	case ixgbe_mac_X540:
3213 	case ixgbe_mac_X550:
3214 	case ixgbe_mac_X550EM_x:
3215 	case ixgbe_mac_X550EM_a:
3216 		tmp = IXGBE_DV_X540(frame, frame);
3217 		break;
3218 	default:
3219 		tmp = IXGBE_DV(frame, frame);
3220 		break;
3221 	}
3222 	size = IXGBE_BT2KB(tmp);
3223 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3224 	hw->fc.high_water[0] = rxpb - size;
3225 
3226 	/* Now calculate Low Water */
3227 	switch (hw->mac.type) {
3228 	case ixgbe_mac_X540:
3229 	case ixgbe_mac_X550:
3230 	case ixgbe_mac_X550EM_x:
3231 	case ixgbe_mac_X550EM_a:
3232 		tmp = IXGBE_LOW_DV_X540(frame);
3233 		break;
3234 	default:
3235 		tmp = IXGBE_LOW_DV(frame);
3236 		break;
3237 	}
3238 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3239 
3240 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3241 	hw->fc.send_xon = TRUE;
3242 } /* ixgbe_config_delay_values */
3243 
3244 /************************************************************************
3245  * ixgbe_set_multi - Multicast Update
3246  *
3247  *   Called whenever multicast address list is updated.
3248  ************************************************************************/
3249 static u_int
3250 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count)
3251 {
3252 	struct adapter *adapter = arg;
3253 	struct ixgbe_mc_addr *mta = adapter->mta;
3254 
3255 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3256 		return (0);
3257 	bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3258 	mta[count].vmdq = adapter->pool;
3259 
3260 	return (1);
3261 } /* ixgbe_mc_filter_apply */
3262 
3263 static void
3264 ixgbe_if_multi_set(if_ctx_t ctx)
3265 {
3266 	struct adapter       *adapter = iflib_get_softc(ctx);
3267 	struct ixgbe_mc_addr *mta;
3268 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3269 	u8                   *update_ptr;
3270 	u32                  fctrl;
3271 	u_int		     mcnt;
3272 
3273 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3274 
3275 	mta = adapter->mta;
3276 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3277 
3278 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3279 	    adapter);
3280 
3281 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3282 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3283 	if (ifp->if_flags & IFF_PROMISC)
3284 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3285 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3286 	    ifp->if_flags & IFF_ALLMULTI) {
3287 		fctrl |= IXGBE_FCTRL_MPE;
3288 		fctrl &= ~IXGBE_FCTRL_UPE;
3289 	} else
3290 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3291 
3292 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3293 
3294 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3295 		update_ptr = (u8 *)mta;
3296 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3297 		    ixgbe_mc_array_itr, TRUE);
3298 	}
3299 
3300 } /* ixgbe_if_multi_set */
3301 
3302 /************************************************************************
3303  * ixgbe_mc_array_itr
3304  *
3305  *   An iterator function needed by the multicast shared code.
3306  *   It feeds the shared code routine the addresses in the
3307  *   array of ixgbe_set_multi() one by one.
3308  ************************************************************************/
3309 static u8 *
3310 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3311 {
3312 	struct ixgbe_mc_addr *mta;
3313 
3314 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3315 	*vmdq = mta->vmdq;
3316 
3317 	*update_ptr = (u8*)(mta + 1);
3318 
3319 	return (mta->addr);
3320 } /* ixgbe_mc_array_itr */
3321 
3322 /************************************************************************
3323  * ixgbe_local_timer - Timer routine
3324  *
3325  *   Checks for link status, updates statistics,
3326  *   and runs the watchdog check.
3327  ************************************************************************/
3328 static void
3329 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3330 {
3331 	struct adapter *adapter = iflib_get_softc(ctx);
3332 
3333 	if (qid != 0)
3334 		return;
3335 
3336 	/* Check for pluggable optics */
3337 	if (adapter->sfp_probe)
3338 		if (!ixgbe_sfp_probe(ctx))
3339 			return; /* Nothing to do */
3340 
3341 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3342 	    &adapter->link_up, 0);
3343 
3344 	/* Fire off the adminq task */
3345 	iflib_admin_intr_deferred(ctx);
3346 
3347 } /* ixgbe_if_timer */
3348 
3349 /************************************************************************
3350  * ixgbe_sfp_probe
3351  *
3352  *   Determine if a port had optics inserted.
3353  ************************************************************************/
3354 static bool
3355 ixgbe_sfp_probe(if_ctx_t ctx)
3356 {
3357 	struct adapter  *adapter = iflib_get_softc(ctx);
3358 	struct ixgbe_hw *hw = &adapter->hw;
3359 	device_t        dev = iflib_get_dev(ctx);
3360 	bool            result = FALSE;
3361 
3362 	if ((hw->phy.type == ixgbe_phy_nl) &&
3363 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3364 		s32 ret = hw->phy.ops.identify_sfp(hw);
3365 		if (ret)
3366 			goto out;
3367 		ret = hw->phy.ops.reset(hw);
3368 		adapter->sfp_probe = FALSE;
3369 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3370 			device_printf(dev, "Unsupported SFP+ module detected!");
3371 			device_printf(dev,
3372 			    "Reload driver with supported module.\n");
3373 			goto out;
3374 		} else
3375 			device_printf(dev, "SFP+ module detected!\n");
3376 		/* We now have supported optics */
3377 		result = TRUE;
3378 	}
3379 out:
3380 
3381 	return (result);
3382 } /* ixgbe_sfp_probe */
3383 
3384 /************************************************************************
3385  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3386  ************************************************************************/
3387 static void
3388 ixgbe_handle_mod(void *context)
3389 {
3390 	if_ctx_t        ctx = context;
3391 	struct adapter  *adapter = iflib_get_softc(ctx);
3392 	struct ixgbe_hw *hw = &adapter->hw;
3393 	device_t        dev = iflib_get_dev(ctx);
3394 	u32             err, cage_full = 0;
3395 
3396 	if (adapter->hw.need_crosstalk_fix) {
3397 		switch (hw->mac.type) {
3398 		case ixgbe_mac_82599EB:
3399 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3400 			    IXGBE_ESDP_SDP2;
3401 			break;
3402 		case ixgbe_mac_X550EM_x:
3403 		case ixgbe_mac_X550EM_a:
3404 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3405 			    IXGBE_ESDP_SDP0;
3406 			break;
3407 		default:
3408 			break;
3409 		}
3410 
3411 		if (!cage_full)
3412 			goto handle_mod_out;
3413 	}
3414 
3415 	err = hw->phy.ops.identify_sfp(hw);
3416 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3417 		device_printf(dev,
3418 		    "Unsupported SFP+ module type was detected.\n");
3419 		goto handle_mod_out;
3420 	}
3421 
3422 	if (hw->mac.type == ixgbe_mac_82598EB)
3423 		err = hw->phy.ops.reset(hw);
3424 	else
3425 		err = hw->mac.ops.setup_sfp(hw);
3426 
3427 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3428 		device_printf(dev,
3429 		    "Setup failure - unsupported SFP+ module type.\n");
3430 		goto handle_mod_out;
3431 	}
3432 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3433 	return;
3434 
3435 handle_mod_out:
3436 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3437 } /* ixgbe_handle_mod */
3438 
3439 
3440 /************************************************************************
3441  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3442  ************************************************************************/
3443 static void
3444 ixgbe_handle_msf(void *context)
3445 {
3446 	if_ctx_t        ctx = context;
3447 	struct adapter  *adapter = iflib_get_softc(ctx);
3448 	struct ixgbe_hw *hw = &adapter->hw;
3449 	u32             autoneg;
3450 	bool            negotiate;
3451 
3452 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3453 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3454 
3455 	autoneg = hw->phy.autoneg_advertised;
3456 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3457 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3458 	if (hw->mac.ops.setup_link)
3459 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3460 
3461 	/* Adjust media types shown in ifconfig */
3462 	ifmedia_removeall(adapter->media);
3463 	ixgbe_add_media_types(adapter->ctx);
3464 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3465 } /* ixgbe_handle_msf */
3466 
3467 /************************************************************************
3468  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3469  ************************************************************************/
3470 static void
3471 ixgbe_handle_phy(void *context)
3472 {
3473 	if_ctx_t        ctx = context;
3474 	struct adapter  *adapter = iflib_get_softc(ctx);
3475 	struct ixgbe_hw *hw = &adapter->hw;
3476 	int             error;
3477 
3478 	error = hw->phy.ops.handle_lasi(hw);
3479 	if (error == IXGBE_ERR_OVERTEMP)
3480 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3481 	else if (error)
3482 		device_printf(adapter->dev,
3483 		    "Error handling LASI interrupt: %d\n", error);
3484 } /* ixgbe_handle_phy */
3485 
3486 /************************************************************************
3487  * ixgbe_if_stop - Stop the hardware
3488  *
3489  *   Disables all traffic on the adapter by issuing a
3490  *   global reset on the MAC and deallocates TX/RX buffers.
3491  ************************************************************************/
3492 static void
3493 ixgbe_if_stop(if_ctx_t ctx)
3494 {
3495 	struct adapter  *adapter = iflib_get_softc(ctx);
3496 	struct ixgbe_hw *hw = &adapter->hw;
3497 
3498 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3499 
3500 	ixgbe_reset_hw(hw);
3501 	hw->adapter_stopped = FALSE;
3502 	ixgbe_stop_adapter(hw);
3503 	if (hw->mac.type == ixgbe_mac_82599EB)
3504 		ixgbe_stop_mac_link_on_d3_82599(hw);
3505 	/* Turn off the laser - noop with no optics */
3506 	ixgbe_disable_tx_laser(hw);
3507 
3508 	/* Update the stack */
3509 	adapter->link_up = FALSE;
3510 	ixgbe_if_update_admin_status(ctx);
3511 
3512 	/* reprogram the RAR[0] in case user changed it. */
3513 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3514 
3515 	return;
3516 } /* ixgbe_if_stop */
3517 
3518 /************************************************************************
3519  * ixgbe_update_link_status - Update OS on link state
3520  *
3521  * Note: Only updates the OS on the cached link state.
3522  *       The real check of the hardware only happens with
3523  *       a link interrupt.
3524  ************************************************************************/
3525 static void
3526 ixgbe_if_update_admin_status(if_ctx_t ctx)
3527 {
3528 	struct adapter *adapter = iflib_get_softc(ctx);
3529 	device_t       dev = iflib_get_dev(ctx);
3530 
3531 	if (adapter->link_up) {
3532 		if (adapter->link_active == FALSE) {
3533 			if (bootverbose)
3534 				device_printf(dev, "Link is up %d Gbps %s \n",
3535 				    ((adapter->link_speed == 128) ? 10 : 1),
3536 				    "Full Duplex");
3537 			adapter->link_active = TRUE;
3538 			/* Update any Flow Control changes */
3539 			ixgbe_fc_enable(&adapter->hw);
3540 			/* Update DMA coalescing config */
3541 			ixgbe_config_dmac(adapter);
3542 			/* should actually be negotiated value */
3543 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3544 
3545 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3546 				ixgbe_ping_all_vfs(adapter);
3547 		}
3548 	} else { /* Link down */
3549 		if (adapter->link_active == TRUE) {
3550 			if (bootverbose)
3551 				device_printf(dev, "Link is Down\n");
3552 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3553 			adapter->link_active = FALSE;
3554 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3555 				ixgbe_ping_all_vfs(adapter);
3556 		}
3557 	}
3558 
3559 	/* Handle task requests from msix_link() */
3560 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3561 		ixgbe_handle_mod(ctx);
3562 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3563 		ixgbe_handle_msf(ctx);
3564 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3565 		ixgbe_handle_mbx(ctx);
3566 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3567 		ixgbe_reinit_fdir(ctx);
3568 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3569 		ixgbe_handle_phy(ctx);
3570 	adapter->task_requests = 0;
3571 
3572 	ixgbe_update_stats_counters(adapter);
3573 } /* ixgbe_if_update_admin_status */
3574 
3575 /************************************************************************
3576  * ixgbe_config_dmac - Configure DMA Coalescing
3577  ************************************************************************/
3578 static void
3579 ixgbe_config_dmac(struct adapter *adapter)
3580 {
3581 	struct ixgbe_hw          *hw = &adapter->hw;
3582 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3583 
3584 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3585 		return;
3586 
3587 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3588 	    dcfg->link_speed ^ adapter->link_speed) {
3589 		dcfg->watchdog_timer = adapter->dmac;
3590 		dcfg->fcoe_en = FALSE;
3591 		dcfg->link_speed = adapter->link_speed;
3592 		dcfg->num_tcs = 1;
3593 
3594 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3595 		    dcfg->watchdog_timer, dcfg->link_speed);
3596 
3597 		hw->mac.ops.dmac_config(hw);
3598 	}
3599 } /* ixgbe_config_dmac */
3600 
3601 /************************************************************************
3602  * ixgbe_if_enable_intr
3603  ************************************************************************/
3604 void
3605 ixgbe_if_enable_intr(if_ctx_t ctx)
3606 {
3607 	struct adapter     *adapter = iflib_get_softc(ctx);
3608 	struct ixgbe_hw    *hw = &adapter->hw;
3609 	struct ix_rx_queue *que = adapter->rx_queues;
3610 	u32                mask, fwsm;
3611 
3612 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3613 
3614 	switch (adapter->hw.mac.type) {
3615 	case ixgbe_mac_82599EB:
3616 		mask |= IXGBE_EIMS_ECC;
3617 		/* Temperature sensor on some adapters */
3618 		mask |= IXGBE_EIMS_GPI_SDP0;
3619 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3620 		mask |= IXGBE_EIMS_GPI_SDP1;
3621 		mask |= IXGBE_EIMS_GPI_SDP2;
3622 		break;
3623 	case ixgbe_mac_X540:
3624 		/* Detect if Thermal Sensor is enabled */
3625 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3626 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3627 			mask |= IXGBE_EIMS_TS;
3628 		mask |= IXGBE_EIMS_ECC;
3629 		break;
3630 	case ixgbe_mac_X550:
3631 		/* MAC thermal sensor is automatically enabled */
3632 		mask |= IXGBE_EIMS_TS;
3633 		mask |= IXGBE_EIMS_ECC;
3634 		break;
3635 	case ixgbe_mac_X550EM_x:
3636 	case ixgbe_mac_X550EM_a:
3637 		/* Some devices use SDP0 for important information */
3638 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3639 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3640 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3641 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3642 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3643 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3644 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3645 		mask |= IXGBE_EIMS_ECC;
3646 		break;
3647 	default:
3648 		break;
3649 	}
3650 
3651 	/* Enable Fan Failure detection */
3652 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3653 		mask |= IXGBE_EIMS_GPI_SDP1;
3654 	/* Enable SR-IOV */
3655 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3656 		mask |= IXGBE_EIMS_MAILBOX;
3657 	/* Enable Flow Director */
3658 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3659 		mask |= IXGBE_EIMS_FLOW_DIR;
3660 
3661 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3662 
3663 	/* With MSI-X we use auto clear */
3664 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3665 		mask = IXGBE_EIMS_ENABLE_MASK;
3666 		/* Don't autoclear Link */
3667 		mask &= ~IXGBE_EIMS_OTHER;
3668 		mask &= ~IXGBE_EIMS_LSC;
3669 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3670 			mask &= ~IXGBE_EIMS_MAILBOX;
3671 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3672 	}
3673 
3674 	/*
3675 	 * Now enable all queues, this is done separately to
3676 	 * allow for handling the extended (beyond 32) MSI-X
3677 	 * vectors that can be used by 82599
3678 	 */
3679 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3680 		ixgbe_enable_queue(adapter, que->msix);
3681 
3682 	IXGBE_WRITE_FLUSH(hw);
3683 
3684 } /* ixgbe_if_enable_intr */
3685 
3686 /************************************************************************
3687  * ixgbe_disable_intr
3688  ************************************************************************/
3689 static void
3690 ixgbe_if_disable_intr(if_ctx_t ctx)
3691 {
3692 	struct adapter *adapter = iflib_get_softc(ctx);
3693 
3694 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3695 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3696 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3697 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3698 	} else {
3699 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3700 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3701 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3702 	}
3703 	IXGBE_WRITE_FLUSH(&adapter->hw);
3704 
3705 } /* ixgbe_if_disable_intr */
3706 
3707 /************************************************************************
3708  * ixgbe_link_intr_enable
3709  ************************************************************************/
3710 static void
3711 ixgbe_link_intr_enable(if_ctx_t ctx)
3712 {
3713 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3714 
3715 	/* Re-enable other interrupts */
3716 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3717 } /* ixgbe_link_intr_enable */
3718 
3719 /************************************************************************
3720  * ixgbe_if_rx_queue_intr_enable
3721  ************************************************************************/
3722 static int
3723 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3724 {
3725 	struct adapter     *adapter = iflib_get_softc(ctx);
3726 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3727 
3728 	ixgbe_enable_queue(adapter, que->msix);
3729 
3730 	return (0);
3731 } /* ixgbe_if_rx_queue_intr_enable */
3732 
3733 /************************************************************************
3734  * ixgbe_enable_queue
3735  ************************************************************************/
3736 static void
3737 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3738 {
3739 	struct ixgbe_hw *hw = &adapter->hw;
3740 	u64             queue = 1ULL << vector;
3741 	u32             mask;
3742 
3743 	if (hw->mac.type == ixgbe_mac_82598EB) {
3744 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3745 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3746 	} else {
3747 		mask = (queue & 0xFFFFFFFF);
3748 		if (mask)
3749 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3750 		mask = (queue >> 32);
3751 		if (mask)
3752 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3753 	}
3754 } /* ixgbe_enable_queue */
3755 
3756 /************************************************************************
3757  * ixgbe_disable_queue
3758  ************************************************************************/
3759 static void
3760 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3761 {
3762 	struct ixgbe_hw *hw = &adapter->hw;
3763 	u64             queue = 1ULL << vector;
3764 	u32             mask;
3765 
3766 	if (hw->mac.type == ixgbe_mac_82598EB) {
3767 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3768 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3769 	} else {
3770 		mask = (queue & 0xFFFFFFFF);
3771 		if (mask)
3772 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3773 		mask = (queue >> 32);
3774 		if (mask)
3775 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3776 	}
3777 } /* ixgbe_disable_queue */
3778 
3779 /************************************************************************
3780  * ixgbe_intr - Legacy Interrupt Service Routine
3781  ************************************************************************/
3782 int
3783 ixgbe_intr(void *arg)
3784 {
3785 	struct adapter     *adapter = arg;
3786 	struct ix_rx_queue *que = adapter->rx_queues;
3787 	struct ixgbe_hw    *hw = &adapter->hw;
3788 	if_ctx_t           ctx = adapter->ctx;
3789 	u32                eicr, eicr_mask;
3790 
3791 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3792 
3793 	++que->irqs;
3794 	if (eicr == 0) {
3795 		ixgbe_if_enable_intr(ctx);
3796 		return (FILTER_HANDLED);
3797 	}
3798 
3799 	/* Check for fan failure */
3800 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3801 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3802 		device_printf(adapter->dev,
3803 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3804 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3805 	}
3806 
3807 	/* Link status change */
3808 	if (eicr & IXGBE_EICR_LSC) {
3809 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3810 		iflib_admin_intr_deferred(ctx);
3811 	}
3812 
3813 	if (ixgbe_is_sfp(hw)) {
3814 		/* Pluggable optics-related interrupt */
3815 		if (hw->mac.type >= ixgbe_mac_X540)
3816 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3817 		else
3818 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3819 
3820 		if (eicr & eicr_mask) {
3821 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3822 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3823 		}
3824 
3825 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3826 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3827 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3828 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3829 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3830 		}
3831 	}
3832 
3833 	/* External PHY interrupt */
3834 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3835 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3836 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3837 
3838 	return (FILTER_SCHEDULE_THREAD);
3839 } /* ixgbe_intr */
3840 
3841 /************************************************************************
3842  * ixgbe_free_pci_resources
3843  ************************************************************************/
3844 static void
3845 ixgbe_free_pci_resources(if_ctx_t ctx)
3846 {
3847 	struct adapter *adapter = iflib_get_softc(ctx);
3848 	struct         ix_rx_queue *que = adapter->rx_queues;
3849 	device_t       dev = iflib_get_dev(ctx);
3850 
3851 	/* Release all MSI-X queue resources */
3852 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3853 		iflib_irq_free(ctx, &adapter->irq);
3854 
3855 	if (que != NULL) {
3856 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3857 			iflib_irq_free(ctx, &que->que_irq);
3858 		}
3859 	}
3860 
3861 	if (adapter->pci_mem != NULL)
3862 		bus_release_resource(dev, SYS_RES_MEMORY,
3863 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3864 } /* ixgbe_free_pci_resources */
3865 
3866 /************************************************************************
3867  * ixgbe_sysctl_flowcntl
3868  *
3869  *   SYSCTL wrapper around setting Flow Control
3870  ************************************************************************/
3871 static int
3872 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3873 {
3874 	struct adapter *adapter;
3875 	int            error, fc;
3876 
3877 	adapter = (struct adapter *)arg1;
3878 	fc = adapter->hw.fc.current_mode;
3879 
3880 	error = sysctl_handle_int(oidp, &fc, 0, req);
3881 	if ((error) || (req->newptr == NULL))
3882 		return (error);
3883 
3884 	/* Don't bother if it's not changed */
3885 	if (fc == adapter->hw.fc.current_mode)
3886 		return (0);
3887 
3888 	return ixgbe_set_flowcntl(adapter, fc);
3889 } /* ixgbe_sysctl_flowcntl */
3890 
3891 /************************************************************************
3892  * ixgbe_set_flowcntl - Set flow control
3893  *
3894  *   Flow control values:
3895  *     0 - off
3896  *     1 - rx pause
3897  *     2 - tx pause
3898  *     3 - full
3899  ************************************************************************/
3900 static int
3901 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3902 {
3903 	switch (fc) {
3904 	case ixgbe_fc_rx_pause:
3905 	case ixgbe_fc_tx_pause:
3906 	case ixgbe_fc_full:
3907 		adapter->hw.fc.requested_mode = fc;
3908 		if (adapter->num_rx_queues > 1)
3909 			ixgbe_disable_rx_drop(adapter);
3910 		break;
3911 	case ixgbe_fc_none:
3912 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3913 		if (adapter->num_rx_queues > 1)
3914 			ixgbe_enable_rx_drop(adapter);
3915 		break;
3916 	default:
3917 		return (EINVAL);
3918 	}
3919 
3920 	/* Don't autoneg if forcing a value */
3921 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3922 	ixgbe_fc_enable(&adapter->hw);
3923 
3924 	return (0);
3925 } /* ixgbe_set_flowcntl */
3926 
3927 /************************************************************************
3928  * ixgbe_enable_rx_drop
3929  *
3930  *   Enable the hardware to drop packets when the buffer is
3931  *   full. This is useful with multiqueue, so that no single
3932  *   queue being full stalls the entire RX engine. We only
3933  *   enable this when Multiqueue is enabled AND Flow Control
3934  *   is disabled.
3935  ************************************************************************/
3936 static void
3937 ixgbe_enable_rx_drop(struct adapter *adapter)
3938 {
3939 	struct ixgbe_hw *hw = &adapter->hw;
3940 	struct rx_ring  *rxr;
3941 	u32             srrctl;
3942 
3943 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3944 		rxr = &adapter->rx_queues[i].rxr;
3945 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3946 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3947 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3948 	}
3949 
3950 	/* enable drop for each vf */
3951 	for (int i = 0; i < adapter->num_vfs; i++) {
3952 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3953 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3954 		                IXGBE_QDE_ENABLE));
3955 	}
3956 } /* ixgbe_enable_rx_drop */
3957 
3958 /************************************************************************
3959  * ixgbe_disable_rx_drop
3960  ************************************************************************/
3961 static void
3962 ixgbe_disable_rx_drop(struct adapter *adapter)
3963 {
3964 	struct ixgbe_hw *hw = &adapter->hw;
3965 	struct rx_ring  *rxr;
3966 	u32             srrctl;
3967 
3968 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3969 		rxr = &adapter->rx_queues[i].rxr;
3970 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3971 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3972 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3973 	}
3974 
3975 	/* disable drop for each vf */
3976 	for (int i = 0; i < adapter->num_vfs; i++) {
3977 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3978 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3979 	}
3980 } /* ixgbe_disable_rx_drop */
3981 
3982 /************************************************************************
3983  * ixgbe_sysctl_advertise
3984  *
3985  *   SYSCTL wrapper around setting advertised speed
3986  ************************************************************************/
3987 static int
3988 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3989 {
3990 	struct adapter *adapter;
3991 	int            error, advertise;
3992 
3993 	adapter = (struct adapter *)arg1;
3994 	advertise = adapter->advertise;
3995 
3996 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3997 	if ((error) || (req->newptr == NULL))
3998 		return (error);
3999 
4000 	return ixgbe_set_advertise(adapter, advertise);
4001 } /* ixgbe_sysctl_advertise */
4002 
4003 /************************************************************************
4004  * ixgbe_set_advertise - Control advertised link speed
4005  *
4006  *   Flags:
4007  *     0x1 - advertise 100 Mb
4008  *     0x2 - advertise 1G
4009  *     0x4 - advertise 10G
4010  *     0x8 - advertise 10 Mb (yes, Mb)
4011  ************************************************************************/
4012 static int
4013 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4014 {
4015 	device_t         dev = iflib_get_dev(adapter->ctx);
4016 	struct ixgbe_hw  *hw;
4017 	ixgbe_link_speed speed = 0;
4018 	ixgbe_link_speed link_caps = 0;
4019 	s32              err = IXGBE_NOT_IMPLEMENTED;
4020 	bool             negotiate = FALSE;
4021 
4022 	/* Checks to validate new value */
4023 	if (adapter->advertise == advertise) /* no change */
4024 		return (0);
4025 
4026 	hw = &adapter->hw;
4027 
4028 	/* No speed changes for backplane media */
4029 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4030 		return (ENODEV);
4031 
4032 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4033 	      (hw->phy.multispeed_fiber))) {
4034 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4035 		return (EINVAL);
4036 	}
4037 
4038 	if (advertise < 0x1 || advertise > 0xF) {
4039 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4040 		return (EINVAL);
4041 	}
4042 
4043 	if (hw->mac.ops.get_link_capabilities) {
4044 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4045 		    &negotiate);
4046 		if (err != IXGBE_SUCCESS) {
4047 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4048 			return (ENODEV);
4049 		}
4050 	}
4051 
4052 	/* Set new value and report new advertised mode */
4053 	if (advertise & 0x1) {
4054 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4055 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4056 			return (EINVAL);
4057 		}
4058 		speed |= IXGBE_LINK_SPEED_100_FULL;
4059 	}
4060 	if (advertise & 0x2) {
4061 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4062 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4063 			return (EINVAL);
4064 		}
4065 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4066 	}
4067 	if (advertise & 0x4) {
4068 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4069 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4070 			return (EINVAL);
4071 		}
4072 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4073 	}
4074 	if (advertise & 0x8) {
4075 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4076 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4077 			return (EINVAL);
4078 		}
4079 		speed |= IXGBE_LINK_SPEED_10_FULL;
4080 	}
4081 
4082 	hw->mac.autotry_restart = TRUE;
4083 	hw->mac.ops.setup_link(hw, speed, TRUE);
4084 	adapter->advertise = advertise;
4085 
4086 	return (0);
4087 } /* ixgbe_set_advertise */
4088 
4089 /************************************************************************
4090  * ixgbe_get_advertise - Get current advertised speed settings
4091  *
4092  *   Formatted for sysctl usage.
4093  *   Flags:
4094  *     0x1 - advertise 100 Mb
4095  *     0x2 - advertise 1G
4096  *     0x4 - advertise 10G
4097  *     0x8 - advertise 10 Mb (yes, Mb)
4098  ************************************************************************/
4099 static int
4100 ixgbe_get_advertise(struct adapter *adapter)
4101 {
4102 	struct ixgbe_hw  *hw = &adapter->hw;
4103 	int              speed;
4104 	ixgbe_link_speed link_caps = 0;
4105 	s32              err;
4106 	bool             negotiate = FALSE;
4107 
4108 	/*
4109 	 * Advertised speed means nothing unless it's copper or
4110 	 * multi-speed fiber
4111 	 */
4112 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4113 	    !(hw->phy.multispeed_fiber))
4114 		return (0);
4115 
4116 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4117 	if (err != IXGBE_SUCCESS)
4118 		return (0);
4119 
4120 	speed =
4121 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4122 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4123 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4124 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4125 
4126 	return speed;
4127 } /* ixgbe_get_advertise */
4128 
4129 /************************************************************************
4130  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4131  *
4132  *   Control values:
4133  *     0/1 - off / on (use default value of 1000)
4134  *
4135  *     Legal timer values are:
4136  *     50,100,250,500,1000,2000,5000,10000
4137  *
4138  *     Turning off interrupt moderation will also turn this off.
4139  ************************************************************************/
4140 static int
4141 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4142 {
4143 	struct adapter *adapter = (struct adapter *)arg1;
4144 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4145 	int            error;
4146 	u16            newval;
4147 
4148 	newval = adapter->dmac;
4149 	error = sysctl_handle_16(oidp, &newval, 0, req);
4150 	if ((error) || (req->newptr == NULL))
4151 		return (error);
4152 
4153 	switch (newval) {
4154 	case 0:
4155 		/* Disabled */
4156 		adapter->dmac = 0;
4157 		break;
4158 	case 1:
4159 		/* Enable and use default */
4160 		adapter->dmac = 1000;
4161 		break;
4162 	case 50:
4163 	case 100:
4164 	case 250:
4165 	case 500:
4166 	case 1000:
4167 	case 2000:
4168 	case 5000:
4169 	case 10000:
4170 		/* Legal values - allow */
4171 		adapter->dmac = newval;
4172 		break;
4173 	default:
4174 		/* Do nothing, illegal value */
4175 		return (EINVAL);
4176 	}
4177 
4178 	/* Re-initialize hardware if it's already running */
4179 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4180 		ifp->if_init(ifp);
4181 
4182 	return (0);
4183 } /* ixgbe_sysctl_dmac */
4184 
4185 #ifdef IXGBE_DEBUG
4186 /************************************************************************
4187  * ixgbe_sysctl_power_state
4188  *
4189  *   Sysctl to test power states
4190  *   Values:
4191  *     0      - set device to D0
4192  *     3      - set device to D3
4193  *     (none) - get current device power state
4194  ************************************************************************/
4195 static int
4196 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4197 {
4198 	struct adapter *adapter = (struct adapter *)arg1;
4199 	device_t       dev = adapter->dev;
4200 	int            curr_ps, new_ps, error = 0;
4201 
4202 	curr_ps = new_ps = pci_get_powerstate(dev);
4203 
4204 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4205 	if ((error) || (req->newptr == NULL))
4206 		return (error);
4207 
4208 	if (new_ps == curr_ps)
4209 		return (0);
4210 
4211 	if (new_ps == 3 && curr_ps == 0)
4212 		error = DEVICE_SUSPEND(dev);
4213 	else if (new_ps == 0 && curr_ps == 3)
4214 		error = DEVICE_RESUME(dev);
4215 	else
4216 		return (EINVAL);
4217 
4218 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4219 
4220 	return (error);
4221 } /* ixgbe_sysctl_power_state */
4222 #endif
4223 
4224 /************************************************************************
4225  * ixgbe_sysctl_wol_enable
4226  *
4227  *   Sysctl to enable/disable the WoL capability,
4228  *   if supported by the adapter.
4229  *
4230  *   Values:
4231  *     0 - disabled
4232  *     1 - enabled
4233  ************************************************************************/
4234 static int
4235 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4236 {
4237 	struct adapter  *adapter = (struct adapter *)arg1;
4238 	struct ixgbe_hw *hw = &adapter->hw;
4239 	int             new_wol_enabled;
4240 	int             error = 0;
4241 
4242 	new_wol_enabled = hw->wol_enabled;
4243 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4244 	if ((error) || (req->newptr == NULL))
4245 		return (error);
4246 	new_wol_enabled = !!(new_wol_enabled);
4247 	if (new_wol_enabled == hw->wol_enabled)
4248 		return (0);
4249 
4250 	if (new_wol_enabled > 0 && !adapter->wol_support)
4251 		return (ENODEV);
4252 	else
4253 		hw->wol_enabled = new_wol_enabled;
4254 
4255 	return (0);
4256 } /* ixgbe_sysctl_wol_enable */
4257 
4258 /************************************************************************
4259  * ixgbe_sysctl_wufc - Wake Up Filter Control
4260  *
4261  *   Sysctl to enable/disable the types of packets that the
4262  *   adapter will wake up on upon receipt.
4263  *   Flags:
4264  *     0x1  - Link Status Change
4265  *     0x2  - Magic Packet
4266  *     0x4  - Direct Exact
4267  *     0x8  - Directed Multicast
4268  *     0x10 - Broadcast
4269  *     0x20 - ARP/IPv4 Request Packet
4270  *     0x40 - Direct IPv4 Packet
4271  *     0x80 - Direct IPv6 Packet
4272  *
4273  *   Settings not listed above will cause the sysctl to return an error.
4274  ************************************************************************/
4275 static int
4276 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4277 {
4278 	struct adapter *adapter = (struct adapter *)arg1;
4279 	int            error = 0;
4280 	u32            new_wufc;
4281 
4282 	new_wufc = adapter->wufc;
4283 
4284 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4285 	if ((error) || (req->newptr == NULL))
4286 		return (error);
4287 	if (new_wufc == adapter->wufc)
4288 		return (0);
4289 
4290 	if (new_wufc & 0xffffff00)
4291 		return (EINVAL);
4292 
4293 	new_wufc &= 0xff;
4294 	new_wufc |= (0xffffff & adapter->wufc);
4295 	adapter->wufc = new_wufc;
4296 
4297 	return (0);
4298 } /* ixgbe_sysctl_wufc */
4299 
4300 #ifdef IXGBE_DEBUG
4301 /************************************************************************
4302  * ixgbe_sysctl_print_rss_config
4303  ************************************************************************/
4304 static int
4305 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4306 {
4307 	struct adapter  *adapter = (struct adapter *)arg1;
4308 	struct ixgbe_hw *hw = &adapter->hw;
4309 	device_t        dev = adapter->dev;
4310 	struct sbuf     *buf;
4311 	int             error = 0, reta_size;
4312 	u32             reg;
4313 
4314 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4315 	if (!buf) {
4316 		device_printf(dev, "Could not allocate sbuf for output.\n");
4317 		return (ENOMEM);
4318 	}
4319 
4320 	// TODO: use sbufs to make a string to print out
4321 	/* Set multiplier for RETA setup and table size based on MAC */
4322 	switch (adapter->hw.mac.type) {
4323 	case ixgbe_mac_X550:
4324 	case ixgbe_mac_X550EM_x:
4325 	case ixgbe_mac_X550EM_a:
4326 		reta_size = 128;
4327 		break;
4328 	default:
4329 		reta_size = 32;
4330 		break;
4331 	}
4332 
4333 	/* Print out the redirection table */
4334 	sbuf_cat(buf, "\n");
4335 	for (int i = 0; i < reta_size; i++) {
4336 		if (i < 32) {
4337 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4338 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4339 		} else {
4340 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4341 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4342 		}
4343 	}
4344 
4345 	// TODO: print more config
4346 
4347 	error = sbuf_finish(buf);
4348 	if (error)
4349 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4350 
4351 	sbuf_delete(buf);
4352 
4353 	return (0);
4354 } /* ixgbe_sysctl_print_rss_config */
4355 #endif /* IXGBE_DEBUG */
4356 
4357 /************************************************************************
4358  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4359  *
4360  *   For X552/X557-AT devices using an external PHY
4361  ************************************************************************/
4362 static int
4363 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4364 {
4365 	struct adapter  *adapter = (struct adapter *)arg1;
4366 	struct ixgbe_hw *hw = &adapter->hw;
4367 	u16             reg;
4368 
4369 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4370 		device_printf(iflib_get_dev(adapter->ctx),
4371 		    "Device has no supported external thermal sensor.\n");
4372 		return (ENODEV);
4373 	}
4374 
4375 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4376 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4377 		device_printf(iflib_get_dev(adapter->ctx),
4378 		    "Error reading from PHY's current temperature register\n");
4379 		return (EAGAIN);
4380 	}
4381 
4382 	/* Shift temp for output */
4383 	reg = reg >> 8;
4384 
4385 	return (sysctl_handle_16(oidp, NULL, reg, req));
4386 } /* ixgbe_sysctl_phy_temp */
4387 
4388 /************************************************************************
4389  * ixgbe_sysctl_phy_overtemp_occurred
4390  *
4391  *   Reports (directly from the PHY) whether the current PHY
4392  *   temperature is over the overtemp threshold.
4393  ************************************************************************/
4394 static int
4395 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4396 {
4397 	struct adapter  *adapter = (struct adapter *)arg1;
4398 	struct ixgbe_hw *hw = &adapter->hw;
4399 	u16             reg;
4400 
4401 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4402 		device_printf(iflib_get_dev(adapter->ctx),
4403 		    "Device has no supported external thermal sensor.\n");
4404 		return (ENODEV);
4405 	}
4406 
4407 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4408 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4409 		device_printf(iflib_get_dev(adapter->ctx),
4410 		    "Error reading from PHY's temperature status register\n");
4411 		return (EAGAIN);
4412 	}
4413 
4414 	/* Get occurrence bit */
4415 	reg = !!(reg & 0x4000);
4416 
4417 	return (sysctl_handle_16(oidp, 0, reg, req));
4418 } /* ixgbe_sysctl_phy_overtemp_occurred */
4419 
4420 /************************************************************************
4421  * ixgbe_sysctl_eee_state
4422  *
4423  *   Sysctl to set EEE power saving feature
4424  *   Values:
4425  *     0      - disable EEE
4426  *     1      - enable EEE
4427  *     (none) - get current device EEE state
4428  ************************************************************************/
4429 static int
4430 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4431 {
4432 	struct adapter *adapter = (struct adapter *)arg1;
4433 	device_t       dev = adapter->dev;
4434 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4435 	int            curr_eee, new_eee, error = 0;
4436 	s32            retval;
4437 
4438 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4439 
4440 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4441 	if ((error) || (req->newptr == NULL))
4442 		return (error);
4443 
4444 	/* Nothing to do */
4445 	if (new_eee == curr_eee)
4446 		return (0);
4447 
4448 	/* Not supported */
4449 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4450 		return (EINVAL);
4451 
4452 	/* Bounds checking */
4453 	if ((new_eee < 0) || (new_eee > 1))
4454 		return (EINVAL);
4455 
4456 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4457 	if (retval) {
4458 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4459 		return (EINVAL);
4460 	}
4461 
4462 	/* Restart auto-neg */
4463 	ifp->if_init(ifp);
4464 
4465 	device_printf(dev, "New EEE state: %d\n", new_eee);
4466 
4467 	/* Cache new value */
4468 	if (new_eee)
4469 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4470 	else
4471 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4472 
4473 	return (error);
4474 } /* ixgbe_sysctl_eee_state */
4475 
4476 /************************************************************************
4477  * ixgbe_init_device_features
4478  ************************************************************************/
4479 static void
4480 ixgbe_init_device_features(struct adapter *adapter)
4481 {
4482 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4483 	                  | IXGBE_FEATURE_RSS
4484 	                  | IXGBE_FEATURE_MSI
4485 	                  | IXGBE_FEATURE_MSIX
4486 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4487 
4488 	/* Set capabilities first... */
4489 	switch (adapter->hw.mac.type) {
4490 	case ixgbe_mac_82598EB:
4491 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4492 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4493 		break;
4494 	case ixgbe_mac_X540:
4495 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4496 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4497 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4498 		    (adapter->hw.bus.func == 0))
4499 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4500 		break;
4501 	case ixgbe_mac_X550:
4502 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4503 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4504 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4505 		break;
4506 	case ixgbe_mac_X550EM_x:
4507 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4508 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4509 		break;
4510 	case ixgbe_mac_X550EM_a:
4511 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4512 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4513 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4514 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4515 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4516 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4517 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4518 		}
4519 		break;
4520 	case ixgbe_mac_82599EB:
4521 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4522 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4523 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4524 		    (adapter->hw.bus.func == 0))
4525 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4526 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4527 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4528 		break;
4529 	default:
4530 		break;
4531 	}
4532 
4533 	/* Enabled by default... */
4534 	/* Fan failure detection */
4535 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4536 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4537 	/* Netmap */
4538 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4539 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4540 	/* EEE */
4541 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4542 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4543 	/* Thermal Sensor */
4544 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4545 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4546 
4547 	/* Enabled via global sysctl... */
4548 	/* Flow Director */
4549 	if (ixgbe_enable_fdir) {
4550 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4551 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4552 		else
4553 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4554 	}
4555 	/*
4556 	 * Message Signal Interrupts - Extended (MSI-X)
4557 	 * Normal MSI is only enabled if MSI-X calls fail.
4558 	 */
4559 	if (!ixgbe_enable_msix)
4560 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4561 	/* Receive-Side Scaling (RSS) */
4562 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4563 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4564 
4565 	/* Disable features with unmet dependencies... */
4566 	/* No MSI-X */
4567 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4568 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4569 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4570 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4571 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4572 	}
4573 } /* ixgbe_init_device_features */
4574 
4575 /************************************************************************
4576  * ixgbe_check_fan_failure
4577  ************************************************************************/
4578 static void
4579 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4580 {
4581 	u32 mask;
4582 
4583 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4584 	    IXGBE_ESDP_SDP1;
4585 
4586 	if (reg & mask)
4587 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4588 } /* ixgbe_check_fan_failure */
4589