xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 028616d0dd69a3da7a30cb94d35f040bf2ced6b9)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
143 int ixgbe_intr(void *arg);
144 
145 /************************************************************************
146  * Function prototypes
147  ************************************************************************/
148 #if __FreeBSD_version >= 1100036
149 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
150 #endif
151 
152 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
154 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
155 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
156 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
157 
158 static void ixgbe_config_dmac(struct adapter *adapter);
159 static void ixgbe_configure_ivars(struct adapter *adapter);
160 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
161                            s8 type);
162 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static bool ixgbe_sfp_probe(if_ctx_t ctx);
164 
165 static void ixgbe_free_pci_resources(if_ctx_t ctx);
166 
167 static int  ixgbe_msix_link(void *arg);
168 static int  ixgbe_msix_que(void *arg);
169 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
170 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
171 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
172 
173 static int  ixgbe_setup_interface(if_ctx_t ctx);
174 static void ixgbe_init_device_features(struct adapter *adapter);
175 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
176 static void ixgbe_add_media_types(if_ctx_t ctx);
177 static void ixgbe_update_stats_counters(struct adapter *adapter);
178 static void ixgbe_config_link(if_ctx_t ctx);
179 static void ixgbe_get_slot_info(struct adapter *);
180 static void ixgbe_check_wol_support(struct adapter *adapter);
181 static void ixgbe_enable_rx_drop(struct adapter *);
182 static void ixgbe_disable_rx_drop(struct adapter *);
183 
184 static void ixgbe_add_hw_stats(struct adapter *adapter);
185 static int  ixgbe_set_flowcntl(struct adapter *, int);
186 static int  ixgbe_set_advertise(struct adapter *, int);
187 static int  ixgbe_get_advertise(struct adapter *);
188 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
189 static void ixgbe_config_gpie(struct adapter *adapter);
190 static void ixgbe_config_delay_values(struct adapter *adapter);
191 
192 /* Sysctl handlers */
193 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
198 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
199 #ifdef IXGBE_DEBUG
200 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
201 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
202 #endif
203 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
209 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
210 
211 /* Deferred interrupt tasklets */
212 static void ixgbe_handle_msf(void *);
213 static void ixgbe_handle_mod(void *);
214 static void ixgbe_handle_phy(void *);
215 
216 /************************************************************************
217  *  FreeBSD Device Interface Entry Points
218  ************************************************************************/
219 static device_method_t ix_methods[] = {
220 	/* Device interface */
221 	DEVMETHOD(device_register, ixgbe_register),
222 	DEVMETHOD(device_probe, iflib_device_probe),
223 	DEVMETHOD(device_attach, iflib_device_attach),
224 	DEVMETHOD(device_detach, iflib_device_detach),
225 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
226 	DEVMETHOD(device_suspend, iflib_device_suspend),
227 	DEVMETHOD(device_resume, iflib_device_resume),
228 #ifdef PCI_IOV
229 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
230 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
231 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
232 #endif /* PCI_IOV */
233 	DEVMETHOD_END
234 };
235 
236 static driver_t ix_driver = {
237 	"ix", ix_methods, sizeof(struct adapter),
238 };
239 
240 devclass_t ix_devclass;
241 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
242 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
243 MODULE_DEPEND(ix, pci, 1, 1, 1);
244 MODULE_DEPEND(ix, ether, 1, 1, 1);
245 MODULE_DEPEND(ix, iflib, 1, 1, 1);
246 
247 static device_method_t ixgbe_if_methods[] = {
248 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
249 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
250 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
251 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
252 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
253 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
254 	DEVMETHOD(ifdi_init, ixgbe_if_init),
255 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
256 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
257 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
258 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
259 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
260 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
262 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
263 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
264 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
265 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
266 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
267 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
268 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
269 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
270 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
271 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
272 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
273 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
274 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
275 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
276 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
277 	DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
278 #ifdef PCI_IOV
279 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
280 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
281 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
282 #endif /* PCI_IOV */
283 	DEVMETHOD_END
284 };
285 
286 /*
287  * TUNEABLE PARAMETERS:
288  */
289 
290 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
291     "IXGBE driver parameters");
292 static driver_t ixgbe_if_driver = {
293   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
294 };
295 
296 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
297 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
298     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
299 
300 /* Flow control setting, default to full */
301 static int ixgbe_flow_control = ixgbe_fc_full;
302 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
303     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
304 
305 /* Advertise Speed, default to 0 (auto) */
306 static int ixgbe_advertise_speed = 0;
307 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
308     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
309 
310 /*
311  * Smart speed setting, default to on
312  * this only works as a compile option
313  * right now as its during attach, set
314  * this to 'ixgbe_smart_speed_off' to
315  * disable.
316  */
317 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
318 
319 /*
320  * MSI-X should be the default for best performance,
321  * but this allows it to be forced off for testing.
322  */
323 static int ixgbe_enable_msix = 1;
324 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
325     "Enable MSI-X interrupts");
326 
327 /*
328  * Defining this on will allow the use
329  * of unsupported SFP+ modules, note that
330  * doing so you are on your own :)
331  */
332 static int allow_unsupported_sfp = FALSE;
333 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
334     &allow_unsupported_sfp, 0,
335     "Allow unsupported SFP modules...use at your own risk");
336 
337 /*
338  * Not sure if Flow Director is fully baked,
339  * so we'll default to turning it off.
340  */
341 static int ixgbe_enable_fdir = 0;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
343     "Enable Flow Director");
344 
345 /* Receive-Side Scaling */
346 static int ixgbe_enable_rss = 1;
347 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
348     "Enable Receive-Side Scaling (RSS)");
349 
350 #if 0
351 /* Keep running tab on them for sanity check */
352 static int ixgbe_total_ports;
353 #endif
354 
355 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
356 
357 /*
358  * For Flow Director: this is the number of TX packets we sample
359  * for the filter pool, this means every 20th packet will be probed.
360  *
361  * This feature can be disabled by setting this to 0.
362  */
363 static int atr_sample_rate = 20;
364 
365 extern struct if_txrx ixgbe_txrx;
366 
367 static struct if_shared_ctx ixgbe_sctx_init = {
368 	.isc_magic = IFLIB_MAGIC,
369 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
370 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
371 	.isc_tx_maxsegsize = PAGE_SIZE,
372 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
373 	.isc_tso_maxsegsize = PAGE_SIZE,
374 	.isc_rx_maxsize = PAGE_SIZE*4,
375 	.isc_rx_nsegments = 1,
376 	.isc_rx_maxsegsize = PAGE_SIZE*4,
377 	.isc_nfl = 1,
378 	.isc_ntxqs = 1,
379 	.isc_nrxqs = 1,
380 
381 	.isc_admin_intrcnt = 1,
382 	.isc_vendor_info = ixgbe_vendor_info_array,
383 	.isc_driver_version = ixgbe_driver_version,
384 	.isc_driver = &ixgbe_if_driver,
385 	.isc_flags = IFLIB_TSO_INIT_IP,
386 
387 	.isc_nrxd_min = {MIN_RXD},
388 	.isc_ntxd_min = {MIN_TXD},
389 	.isc_nrxd_max = {MAX_RXD},
390 	.isc_ntxd_max = {MAX_TXD},
391 	.isc_nrxd_default = {DEFAULT_RXD},
392 	.isc_ntxd_default = {DEFAULT_TXD},
393 };
394 
395 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
396 
397 /************************************************************************
398  * ixgbe_if_tx_queues_alloc
399  ************************************************************************/
400 static int
401 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
402                          int ntxqs, int ntxqsets)
403 {
404 	struct adapter     *adapter = iflib_get_softc(ctx);
405 	if_softc_ctx_t     scctx = adapter->shared;
406 	struct ix_tx_queue *que;
407 	int                i, j, error;
408 
409 	MPASS(adapter->num_tx_queues > 0);
410 	MPASS(adapter->num_tx_queues == ntxqsets);
411 	MPASS(ntxqs == 1);
412 
413 	/* Allocate queue structure memory */
414 	adapter->tx_queues =
415 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
416 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
417 	if (!adapter->tx_queues) {
418 		device_printf(iflib_get_dev(ctx),
419 		    "Unable to allocate TX ring memory\n");
420 		return (ENOMEM);
421 	}
422 
423 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
424 		struct tx_ring *txr = &que->txr;
425 
426 		/* In case SR-IOV is enabled, align the index properly */
427 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
428 		    i);
429 
430 		txr->adapter = que->adapter = adapter;
431 
432 		/* Allocate report status array */
433 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
434 		if (txr->tx_rsq == NULL) {
435 			error = ENOMEM;
436 			goto fail;
437 		}
438 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
439 			txr->tx_rsq[j] = QIDX_INVALID;
440 		/* get the virtual and physical address of the hardware queues */
441 		txr->tail = IXGBE_TDT(txr->me);
442 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
443 		txr->tx_paddr = paddrs[i];
444 
445 		txr->bytes = 0;
446 		txr->total_packets = 0;
447 
448 		/* Set the rate at which we sample packets */
449 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
450 			txr->atr_sample = atr_sample_rate;
451 
452 	}
453 
454 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
455 	    adapter->num_tx_queues);
456 
457 	return (0);
458 
459 fail:
460 	ixgbe_if_queues_free(ctx);
461 
462 	return (error);
463 } /* ixgbe_if_tx_queues_alloc */
464 
465 /************************************************************************
466  * ixgbe_if_rx_queues_alloc
467  ************************************************************************/
468 static int
469 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
470                          int nrxqs, int nrxqsets)
471 {
472 	struct adapter     *adapter = iflib_get_softc(ctx);
473 	struct ix_rx_queue *que;
474 	int                i;
475 
476 	MPASS(adapter->num_rx_queues > 0);
477 	MPASS(adapter->num_rx_queues == nrxqsets);
478 	MPASS(nrxqs == 1);
479 
480 	/* Allocate queue structure memory */
481 	adapter->rx_queues =
482 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
483 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
484 	if (!adapter->rx_queues) {
485 		device_printf(iflib_get_dev(ctx),
486 		    "Unable to allocate TX ring memory\n");
487 		return (ENOMEM);
488 	}
489 
490 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
491 		struct rx_ring *rxr = &que->rxr;
492 
493 		/* In case SR-IOV is enabled, align the index properly */
494 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
495 		    i);
496 
497 		rxr->adapter = que->adapter = adapter;
498 
499 		/* get the virtual and physical address of the hw queues */
500 		rxr->tail = IXGBE_RDT(rxr->me);
501 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
502 		rxr->rx_paddr = paddrs[i];
503 		rxr->bytes = 0;
504 		rxr->que = que;
505 	}
506 
507 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
508 	    adapter->num_rx_queues);
509 
510 	return (0);
511 } /* ixgbe_if_rx_queues_alloc */
512 
513 /************************************************************************
514  * ixgbe_if_queues_free
515  ************************************************************************/
516 static void
517 ixgbe_if_queues_free(if_ctx_t ctx)
518 {
519 	struct adapter     *adapter = iflib_get_softc(ctx);
520 	struct ix_tx_queue *tx_que = adapter->tx_queues;
521 	struct ix_rx_queue *rx_que = adapter->rx_queues;
522 	int                i;
523 
524 	if (tx_que != NULL) {
525 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
526 			struct tx_ring *txr = &tx_que->txr;
527 			if (txr->tx_rsq == NULL)
528 				break;
529 
530 			free(txr->tx_rsq, M_IXGBE);
531 			txr->tx_rsq = NULL;
532 		}
533 
534 		free(adapter->tx_queues, M_IXGBE);
535 		adapter->tx_queues = NULL;
536 	}
537 	if (rx_que != NULL) {
538 		free(adapter->rx_queues, M_IXGBE);
539 		adapter->rx_queues = NULL;
540 	}
541 } /* ixgbe_if_queues_free */
542 
543 /************************************************************************
544  * ixgbe_initialize_rss_mapping
545  ************************************************************************/
546 static void
547 ixgbe_initialize_rss_mapping(struct adapter *adapter)
548 {
549 	struct ixgbe_hw *hw = &adapter->hw;
550 	u32             reta = 0, mrqc, rss_key[10];
551 	int             queue_id, table_size, index_mult;
552 	int             i, j;
553 	u32             rss_hash_config;
554 
555 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
556 		/* Fetch the configured RSS key */
557 		rss_getkey((uint8_t *)&rss_key);
558 	} else {
559 		/* set up random bits */
560 		arc4rand(&rss_key, sizeof(rss_key), 0);
561 	}
562 
563 	/* Set multiplier for RETA setup and table size based on MAC */
564 	index_mult = 0x1;
565 	table_size = 128;
566 	switch (adapter->hw.mac.type) {
567 	case ixgbe_mac_82598EB:
568 		index_mult = 0x11;
569 		break;
570 	case ixgbe_mac_X550:
571 	case ixgbe_mac_X550EM_x:
572 	case ixgbe_mac_X550EM_a:
573 		table_size = 512;
574 		break;
575 	default:
576 		break;
577 	}
578 
579 	/* Set up the redirection table */
580 	for (i = 0, j = 0; i < table_size; i++, j++) {
581 		if (j == adapter->num_rx_queues)
582 			j = 0;
583 
584 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
585 			/*
586 			 * Fetch the RSS bucket id for the given indirection
587 			 * entry. Cap it at the number of configured buckets
588 			 * (which is num_rx_queues.)
589 			 */
590 			queue_id = rss_get_indirection_to_bucket(i);
591 			queue_id = queue_id % adapter->num_rx_queues;
592 		} else
593 			queue_id = (j * index_mult);
594 
595 		/*
596 		 * The low 8 bits are for hash value (n+0);
597 		 * The next 8 bits are for hash value (n+1), etc.
598 		 */
599 		reta = reta >> 8;
600 		reta = reta | (((uint32_t)queue_id) << 24);
601 		if ((i & 3) == 3) {
602 			if (i < 128)
603 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
604 			else
605 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
606 				    reta);
607 			reta = 0;
608 		}
609 	}
610 
611 	/* Now fill our hash function seeds */
612 	for (i = 0; i < 10; i++)
613 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
614 
615 	/* Perform hash on these packet types */
616 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
617 		rss_hash_config = rss_gethashconfig();
618 	else {
619 		/*
620 		 * Disable UDP - IP fragments aren't currently being handled
621 		 * and so we end up with a mix of 2-tuple and 4-tuple
622 		 * traffic.
623 		 */
624 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
625 		                | RSS_HASHTYPE_RSS_TCP_IPV4
626 		                | RSS_HASHTYPE_RSS_IPV6
627 		                | RSS_HASHTYPE_RSS_TCP_IPV6
628 		                | RSS_HASHTYPE_RSS_IPV6_EX
629 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
630 	}
631 
632 	mrqc = IXGBE_MRQC_RSSEN;
633 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
634 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
635 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
636 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
637 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
638 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
639 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
640 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
649 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
650 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
651 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
652 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
653 } /* ixgbe_initialize_rss_mapping */
654 
655 /************************************************************************
656  * ixgbe_initialize_receive_units - Setup receive registers and features.
657  ************************************************************************/
658 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
659 
660 static void
661 ixgbe_initialize_receive_units(if_ctx_t ctx)
662 {
663 	struct adapter     *adapter = iflib_get_softc(ctx);
664 	if_softc_ctx_t     scctx = adapter->shared;
665 	struct ixgbe_hw    *hw = &adapter->hw;
666 	struct ifnet       *ifp = iflib_get_ifp(ctx);
667 	struct ix_rx_queue *que;
668 	int                i, j;
669 	u32                bufsz, fctrl, srrctl, rxcsum;
670 	u32                hlreg;
671 
672 	/*
673 	 * Make sure receives are disabled while
674 	 * setting up the descriptor ring
675 	 */
676 	ixgbe_disable_rx(hw);
677 
678 	/* Enable broadcasts */
679 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
680 	fctrl |= IXGBE_FCTRL_BAM;
681 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
682 		fctrl |= IXGBE_FCTRL_DPF;
683 		fctrl |= IXGBE_FCTRL_PMCF;
684 	}
685 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
686 
687 	/* Set for Jumbo Frames? */
688 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
689 	if (ifp->if_mtu > ETHERMTU)
690 		hlreg |= IXGBE_HLREG0_JUMBOEN;
691 	else
692 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
693 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
694 
695 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
696 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
697 
698 	/* Setup the Base and Length of the Rx Descriptor Ring */
699 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
700 		struct rx_ring *rxr = &que->rxr;
701 		u64            rdba = rxr->rx_paddr;
702 
703 		j = rxr->me;
704 
705 		/* Setup the Base and Length of the Rx Descriptor Ring */
706 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
707 		    (rdba & 0x00000000ffffffffULL));
708 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
709 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
710 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
711 
712 		/* Set up the SRRCTL register */
713 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
714 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
715 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
716 		srrctl |= bufsz;
717 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
718 
719 		/*
720 		 * Set DROP_EN iff we have no flow control and >1 queue.
721 		 * Note that srrctl was cleared shortly before during reset,
722 		 * so we do not need to clear the bit, but do it just in case
723 		 * this code is moved elsewhere.
724 		 */
725 		if (adapter->num_rx_queues > 1 &&
726 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
727 			srrctl |= IXGBE_SRRCTL_DROP_EN;
728 		} else {
729 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
730 		}
731 
732 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
733 
734 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
735 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
736 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
737 
738 		/* Set the driver rx tail address */
739 		rxr->tail =  IXGBE_RDT(rxr->me);
740 	}
741 
742 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
743 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
744 		            | IXGBE_PSRTYPE_UDPHDR
745 		            | IXGBE_PSRTYPE_IPV4HDR
746 		            | IXGBE_PSRTYPE_IPV6HDR;
747 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
748 	}
749 
750 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
751 
752 	ixgbe_initialize_rss_mapping(adapter);
753 
754 	if (adapter->num_rx_queues > 1) {
755 		/* RSS and RX IPP Checksum are mutually exclusive */
756 		rxcsum |= IXGBE_RXCSUM_PCSD;
757 	}
758 
759 	if (ifp->if_capenable & IFCAP_RXCSUM)
760 		rxcsum |= IXGBE_RXCSUM_PCSD;
761 
762 	/* This is useful for calculating UDP/IP fragment checksums */
763 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
764 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
765 
766 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
767 
768 } /* ixgbe_initialize_receive_units */
769 
770 /************************************************************************
771  * ixgbe_initialize_transmit_units - Enable transmit units.
772  ************************************************************************/
773 static void
774 ixgbe_initialize_transmit_units(if_ctx_t ctx)
775 {
776 	struct adapter     *adapter = iflib_get_softc(ctx);
777 	struct ixgbe_hw    *hw = &adapter->hw;
778 	if_softc_ctx_t     scctx = adapter->shared;
779 	struct ix_tx_queue *que;
780 	int i;
781 
782 	/* Setup the Base and Length of the Tx Descriptor Ring */
783 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
784 	    i++, que++) {
785 		struct tx_ring	   *txr = &que->txr;
786 		u64 tdba = txr->tx_paddr;
787 		u32 txctrl = 0;
788 		int j = txr->me;
789 
790 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
791 		    (tdba & 0x00000000ffffffffULL));
792 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
793 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
794 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
795 
796 		/* Setup the HW Tx Head and Tail descriptor pointers */
797 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
798 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
799 
800 		/* Cache the tail address */
801 		txr->tail = IXGBE_TDT(txr->me);
802 
803 		txr->tx_rs_cidx = txr->tx_rs_pidx;
804 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
805 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
806 			txr->tx_rsq[k] = QIDX_INVALID;
807 
808 		/* Disable Head Writeback */
809 		/*
810 		 * Note: for X550 series devices, these registers are actually
811 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
812 		 * fields remain the same.
813 		 */
814 		switch (hw->mac.type) {
815 		case ixgbe_mac_82598EB:
816 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
817 			break;
818 		default:
819 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
820 			break;
821 		}
822 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
823 		switch (hw->mac.type) {
824 		case ixgbe_mac_82598EB:
825 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
826 			break;
827 		default:
828 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
829 			break;
830 		}
831 
832 	}
833 
834 	if (hw->mac.type != ixgbe_mac_82598EB) {
835 		u32 dmatxctl, rttdcs;
836 
837 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
838 		dmatxctl |= IXGBE_DMATXCTL_TE;
839 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
840 		/* Disable arbiter to set MTQC */
841 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
842 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
843 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
844 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
845 		    ixgbe_get_mtqc(adapter->iov_mode));
846 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
847 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
848 	}
849 
850 } /* ixgbe_initialize_transmit_units */
851 
852 /************************************************************************
853  * ixgbe_register
854  ************************************************************************/
855 static void *
856 ixgbe_register(device_t dev)
857 {
858 	return (ixgbe_sctx);
859 } /* ixgbe_register */
860 
861 /************************************************************************
862  * ixgbe_if_attach_pre - Device initialization routine, part 1
863  *
864  *   Called when the driver is being loaded.
865  *   Identifies the type of hardware, initializes the hardware,
866  *   and initializes iflib structures.
867  *
868  *   return 0 on success, positive on failure
869  ************************************************************************/
870 static int
871 ixgbe_if_attach_pre(if_ctx_t ctx)
872 {
873 	struct adapter  *adapter;
874 	device_t        dev;
875 	if_softc_ctx_t  scctx;
876 	struct ixgbe_hw *hw;
877 	int             error = 0;
878 	u32             ctrl_ext;
879 
880 	INIT_DEBUGOUT("ixgbe_attach: begin");
881 
882 	/* Allocate, clear, and link in our adapter structure */
883 	dev = iflib_get_dev(ctx);
884 	adapter = iflib_get_softc(ctx);
885 	adapter->hw.back = adapter;
886 	adapter->ctx = ctx;
887 	adapter->dev = dev;
888 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
889 	adapter->media = iflib_get_media(ctx);
890 	hw = &adapter->hw;
891 
892 	/* Determine hardware revision */
893 	hw->vendor_id = pci_get_vendor(dev);
894 	hw->device_id = pci_get_device(dev);
895 	hw->revision_id = pci_get_revid(dev);
896 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
897 	hw->subsystem_device_id = pci_get_subdevice(dev);
898 
899 	/* Do base PCI setup - map BAR0 */
900 	if (ixgbe_allocate_pci_resources(ctx)) {
901 		device_printf(dev, "Allocation of PCI resources failed\n");
902 		return (ENXIO);
903 	}
904 
905 	/* let hardware know driver is loaded */
906 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
907 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
908 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
909 
910 	/*
911 	 * Initialize the shared code
912 	 */
913 	if (ixgbe_init_shared_code(hw) != 0) {
914 		device_printf(dev, "Unable to initialize the shared code\n");
915 		error = ENXIO;
916 		goto err_pci;
917 	}
918 
919 	if (hw->mbx.ops.init_params)
920 		hw->mbx.ops.init_params(hw);
921 
922 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
923 
924 	if (hw->mac.type != ixgbe_mac_82598EB)
925 		hw->phy.smart_speed = ixgbe_smart_speed;
926 
927 	ixgbe_init_device_features(adapter);
928 
929 	/* Enable WoL (if supported) */
930 	ixgbe_check_wol_support(adapter);
931 
932 	/* Verify adapter fan is still functional (if applicable) */
933 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
934 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
935 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
936 	}
937 
938 	/* Ensure SW/FW semaphore is free */
939 	ixgbe_init_swfw_semaphore(hw);
940 
941 	/* Set an initial default flow control value */
942 	hw->fc.requested_mode = ixgbe_flow_control;
943 
944 	hw->phy.reset_if_overtemp = TRUE;
945 	error = ixgbe_reset_hw(hw);
946 	hw->phy.reset_if_overtemp = FALSE;
947 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
948 		/*
949 		 * No optics in this port, set up
950 		 * so the timer routine will probe
951 		 * for later insertion.
952 		 */
953 		adapter->sfp_probe = TRUE;
954 		error = 0;
955 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
956 		device_printf(dev, "Unsupported SFP+ module detected!\n");
957 		error = EIO;
958 		goto err_pci;
959 	} else if (error) {
960 		device_printf(dev, "Hardware initialization failed\n");
961 		error = EIO;
962 		goto err_pci;
963 	}
964 
965 	/* Make sure we have a good EEPROM before we read from it */
966 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
967 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
968 		error = EIO;
969 		goto err_pci;
970 	}
971 
972 	error = ixgbe_start_hw(hw);
973 	switch (error) {
974 	case IXGBE_ERR_EEPROM_VERSION:
975 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
976 		break;
977 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
978 		device_printf(dev, "Unsupported SFP+ Module\n");
979 		error = EIO;
980 		goto err_pci;
981 	case IXGBE_ERR_SFP_NOT_PRESENT:
982 		device_printf(dev, "No SFP+ Module found\n");
983 		/* falls thru */
984 	default:
985 		break;
986 	}
987 
988 	/* Most of the iflib initialization... */
989 
990 	iflib_set_mac(ctx, hw->mac.addr);
991 	switch (adapter->hw.mac.type) {
992 	case ixgbe_mac_X550:
993 	case ixgbe_mac_X550EM_x:
994 	case ixgbe_mac_X550EM_a:
995 		scctx->isc_rss_table_size = 512;
996 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
997 		break;
998 	default:
999 		scctx->isc_rss_table_size = 128;
1000 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1001 	}
1002 
1003 	/* Allow legacy interrupts */
1004 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1005 
1006 	scctx->isc_txqsizes[0] =
1007 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1008 	    sizeof(u32), DBA_ALIGN),
1009 	scctx->isc_rxqsizes[0] =
1010 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1011 	    DBA_ALIGN);
1012 
1013 	/* XXX */
1014 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1015 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1016 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1017 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1018 	} else {
1019 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1020 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1021 	}
1022 
1023 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1024 
1025 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1026 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1027 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1028 
1029 	scctx->isc_txrx = &ixgbe_txrx;
1030 
1031 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1032 
1033 	return (0);
1034 
1035 err_pci:
1036 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1037 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1038 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1039 	ixgbe_free_pci_resources(ctx);
1040 
1041 	return (error);
1042 } /* ixgbe_if_attach_pre */
1043 
1044  /*********************************************************************
1045  * ixgbe_if_attach_post - Device initialization routine, part 2
1046  *
1047  *   Called during driver load, but after interrupts and
1048  *   resources have been allocated and configured.
1049  *   Sets up some data structures not relevant to iflib.
1050  *
1051  *   return 0 on success, positive on failure
1052  *********************************************************************/
1053 static int
1054 ixgbe_if_attach_post(if_ctx_t ctx)
1055 {
1056 	device_t dev;
1057 	struct adapter  *adapter;
1058 	struct ixgbe_hw *hw;
1059 	int             error = 0;
1060 
1061 	dev = iflib_get_dev(ctx);
1062 	adapter = iflib_get_softc(ctx);
1063 	hw = &adapter->hw;
1064 
1065 
1066 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1067 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1068 		device_printf(dev, "Device does not support legacy interrupts");
1069 		error = ENXIO;
1070 		goto err;
1071 	}
1072 
1073 	/* Allocate multicast array memory. */
1074 	adapter->mta = malloc(sizeof(*adapter->mta) *
1075 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1076 	if (adapter->mta == NULL) {
1077 		device_printf(dev, "Can not allocate multicast setup array\n");
1078 		error = ENOMEM;
1079 		goto err;
1080 	}
1081 
1082 	/* hw.ix defaults init */
1083 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1084 
1085 	/* Enable the optics for 82599 SFP+ fiber */
1086 	ixgbe_enable_tx_laser(hw);
1087 
1088 	/* Enable power to the phy. */
1089 	ixgbe_set_phy_power(hw, TRUE);
1090 
1091 	ixgbe_initialize_iov(adapter);
1092 
1093 	error = ixgbe_setup_interface(ctx);
1094 	if (error) {
1095 		device_printf(dev, "Interface setup failed: %d\n", error);
1096 		goto err;
1097 	}
1098 
1099 	ixgbe_if_update_admin_status(ctx);
1100 
1101 	/* Initialize statistics */
1102 	ixgbe_update_stats_counters(adapter);
1103 	ixgbe_add_hw_stats(adapter);
1104 
1105 	/* Check PCIE slot type/speed/width */
1106 	ixgbe_get_slot_info(adapter);
1107 
1108 	/*
1109 	 * Do time init and sysctl init here, but
1110 	 * only on the first port of a bypass adapter.
1111 	 */
1112 	ixgbe_bypass_init(adapter);
1113 
1114 	/* Set an initial dmac value */
1115 	adapter->dmac = 0;
1116 	/* Set initial advertised speeds (if applicable) */
1117 	adapter->advertise = ixgbe_get_advertise(adapter);
1118 
1119 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1120 		ixgbe_define_iov_schemas(dev, &error);
1121 
1122 	/* Add sysctls */
1123 	ixgbe_add_device_sysctls(ctx);
1124 
1125 	return (0);
1126 err:
1127 	return (error);
1128 } /* ixgbe_if_attach_post */
1129 
1130 /************************************************************************
1131  * ixgbe_check_wol_support
1132  *
1133  *   Checks whether the adapter's ports are capable of
1134  *   Wake On LAN by reading the adapter's NVM.
1135  *
1136  *   Sets each port's hw->wol_enabled value depending
1137  *   on the value read here.
1138  ************************************************************************/
1139 static void
1140 ixgbe_check_wol_support(struct adapter *adapter)
1141 {
1142 	struct ixgbe_hw *hw = &adapter->hw;
1143 	u16             dev_caps = 0;
1144 
1145 	/* Find out WoL support for port */
1146 	adapter->wol_support = hw->wol_enabled = 0;
1147 	ixgbe_get_device_caps(hw, &dev_caps);
1148 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1149 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1150 	     hw->bus.func == 0))
1151 		adapter->wol_support = hw->wol_enabled = 1;
1152 
1153 	/* Save initial wake up filter configuration */
1154 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1155 
1156 	return;
1157 } /* ixgbe_check_wol_support */
1158 
1159 /************************************************************************
1160  * ixgbe_setup_interface
1161  *
1162  *   Setup networking device structure and register an interface.
1163  ************************************************************************/
1164 static int
1165 ixgbe_setup_interface(if_ctx_t ctx)
1166 {
1167 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1168 	struct adapter *adapter = iflib_get_softc(ctx);
1169 
1170 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1171 
1172 	if_setbaudrate(ifp, IF_Gbps(10));
1173 
1174 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1175 
1176 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1177 
1178 	ixgbe_add_media_types(ctx);
1179 
1180 	/* Autoselect media by default */
1181 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1182 
1183 	return (0);
1184 } /* ixgbe_setup_interface */
1185 
1186 /************************************************************************
1187  * ixgbe_if_get_counter
1188  ************************************************************************/
1189 static uint64_t
1190 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1191 {
1192 	struct adapter *adapter = iflib_get_softc(ctx);
1193 	if_t           ifp = iflib_get_ifp(ctx);
1194 
1195 	switch (cnt) {
1196 	case IFCOUNTER_IPACKETS:
1197 		return (adapter->ipackets);
1198 	case IFCOUNTER_OPACKETS:
1199 		return (adapter->opackets);
1200 	case IFCOUNTER_IBYTES:
1201 		return (adapter->ibytes);
1202 	case IFCOUNTER_OBYTES:
1203 		return (adapter->obytes);
1204 	case IFCOUNTER_IMCASTS:
1205 		return (adapter->imcasts);
1206 	case IFCOUNTER_OMCASTS:
1207 		return (adapter->omcasts);
1208 	case IFCOUNTER_COLLISIONS:
1209 		return (0);
1210 	case IFCOUNTER_IQDROPS:
1211 		return (adapter->iqdrops);
1212 	case IFCOUNTER_OQDROPS:
1213 		return (0);
1214 	case IFCOUNTER_IERRORS:
1215 		return (adapter->ierrors);
1216 	default:
1217 		return (if_get_counter_default(ifp, cnt));
1218 	}
1219 } /* ixgbe_if_get_counter */
1220 
1221 /************************************************************************
1222  * ixgbe_if_i2c_req
1223  ************************************************************************/
1224 static int
1225 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1226 {
1227 	struct adapter		*adapter = iflib_get_softc(ctx);
1228 	struct ixgbe_hw 	*hw = &adapter->hw;
1229 	int 			i;
1230 
1231 
1232 	if (hw->phy.ops.read_i2c_byte == NULL)
1233 		return (ENXIO);
1234 	for (i = 0; i < req->len; i++)
1235 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1236 		    req->dev_addr, &req->data[i]);
1237 	return (0);
1238 } /* ixgbe_if_i2c_req */
1239 
1240 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1241  * @ctx: iflib context
1242  * @event: event code to check
1243  *
1244  * Defaults to returning true for unknown events.
1245  *
1246  * @returns true if iflib needs to reinit the interface
1247  */
1248 static bool
1249 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1250 {
1251 	switch (event) {
1252 	case IFLIB_RESTART_VLAN_CONFIG:
1253 		return (false);
1254 	default:
1255 		return (true);
1256 	}
1257 }
1258 
1259 /************************************************************************
1260  * ixgbe_add_media_types
1261  ************************************************************************/
1262 static void
1263 ixgbe_add_media_types(if_ctx_t ctx)
1264 {
1265 	struct adapter  *adapter = iflib_get_softc(ctx);
1266 	struct ixgbe_hw *hw = &adapter->hw;
1267 	device_t        dev = iflib_get_dev(ctx);
1268 	u64             layer;
1269 
1270 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1271 
1272 	/* Media types with matching FreeBSD media defines */
1273 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1274 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1275 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1276 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1277 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1278 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1279 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1280 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1281 
1282 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1283 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1284 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1285 		    NULL);
1286 
1287 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1288 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1289 		if (hw->phy.multispeed_fiber)
1290 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1291 			    NULL);
1292 	}
1293 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1294 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1295 		if (hw->phy.multispeed_fiber)
1296 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1297 			    NULL);
1298 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1299 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1300 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1301 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1302 
1303 #ifdef IFM_ETH_XTYPE
1304 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1305 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1306 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1307 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1308 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1309 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1310 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1311 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1312 #else
1313 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1314 		device_printf(dev, "Media supported: 10GbaseKR\n");
1315 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1316 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1317 	}
1318 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1319 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1320 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1321 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1322 	}
1323 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1324 		device_printf(dev, "Media supported: 1000baseKX\n");
1325 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1326 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1327 	}
1328 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1329 		device_printf(dev, "Media supported: 2500baseKX\n");
1330 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1331 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1332 	}
1333 #endif
1334 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1335 		device_printf(dev, "Media supported: 1000baseBX\n");
1336 
1337 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1338 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1339 		    0, NULL);
1340 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1341 	}
1342 
1343 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1344 } /* ixgbe_add_media_types */
1345 
1346 /************************************************************************
1347  * ixgbe_is_sfp
1348  ************************************************************************/
1349 static inline bool
1350 ixgbe_is_sfp(struct ixgbe_hw *hw)
1351 {
1352 	switch (hw->mac.type) {
1353 	case ixgbe_mac_82598EB:
1354 		if (hw->phy.type == ixgbe_phy_nl)
1355 			return (TRUE);
1356 		return (FALSE);
1357 	case ixgbe_mac_82599EB:
1358 		switch (hw->mac.ops.get_media_type(hw)) {
1359 		case ixgbe_media_type_fiber:
1360 		case ixgbe_media_type_fiber_qsfp:
1361 			return (TRUE);
1362 		default:
1363 			return (FALSE);
1364 		}
1365 	case ixgbe_mac_X550EM_x:
1366 	case ixgbe_mac_X550EM_a:
1367 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1368 			return (TRUE);
1369 		return (FALSE);
1370 	default:
1371 		return (FALSE);
1372 	}
1373 } /* ixgbe_is_sfp */
1374 
1375 /************************************************************************
1376  * ixgbe_config_link
1377  ************************************************************************/
1378 static void
1379 ixgbe_config_link(if_ctx_t ctx)
1380 {
1381 	struct adapter  *adapter = iflib_get_softc(ctx);
1382 	struct ixgbe_hw *hw = &adapter->hw;
1383 	u32             autoneg, err = 0;
1384 	bool            sfp, negotiate;
1385 
1386 	sfp = ixgbe_is_sfp(hw);
1387 
1388 	if (sfp) {
1389 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1390 		iflib_admin_intr_deferred(ctx);
1391 	} else {
1392 		if (hw->mac.ops.check_link)
1393 			err = ixgbe_check_link(hw, &adapter->link_speed,
1394 			    &adapter->link_up, FALSE);
1395 		if (err)
1396 			return;
1397 		autoneg = hw->phy.autoneg_advertised;
1398 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1399 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1400 			    &negotiate);
1401 		if (err)
1402 			return;
1403 		if (hw->mac.ops.setup_link)
1404 			err = hw->mac.ops.setup_link(hw, autoneg,
1405 			    adapter->link_up);
1406 	}
1407 } /* ixgbe_config_link */
1408 
1409 /************************************************************************
1410  * ixgbe_update_stats_counters - Update board statistics counters.
1411  ************************************************************************/
1412 static void
1413 ixgbe_update_stats_counters(struct adapter *adapter)
1414 {
1415 	struct ixgbe_hw       *hw = &adapter->hw;
1416 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1417 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1418 	u32                   lxoffrxc;
1419 	u64                   total_missed_rx = 0;
1420 
1421 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1422 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1423 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1424 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1425 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1426 
1427 	for (int i = 0; i < 16; i++) {
1428 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1429 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1430 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1431 	}
1432 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1433 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1434 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1435 
1436 	/* Hardware workaround, gprc counts missed packets */
1437 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1438 	stats->gprc -= missed_rx;
1439 
1440 	if (hw->mac.type != ixgbe_mac_82598EB) {
1441 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1442 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1443 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1444 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1445 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1446 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1447 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1448 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1449 		stats->lxoffrxc += lxoffrxc;
1450 	} else {
1451 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1452 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1453 		stats->lxoffrxc += lxoffrxc;
1454 		/* 82598 only has a counter in the high register */
1455 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1456 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1457 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1458 	}
1459 
1460 	/*
1461 	 * For watchdog management we need to know if we have been paused
1462 	 * during the last interval, so capture that here.
1463 	*/
1464 	if (lxoffrxc)
1465 		adapter->shared->isc_pause_frames = 1;
1466 
1467 	/*
1468 	 * Workaround: mprc hardware is incorrectly counting
1469 	 * broadcasts, so for now we subtract those.
1470 	 */
1471 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1472 	stats->bprc += bprc;
1473 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1474 	if (hw->mac.type == ixgbe_mac_82598EB)
1475 		stats->mprc -= bprc;
1476 
1477 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1478 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1479 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1480 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1481 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1482 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1483 
1484 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1485 	stats->lxontxc += lxon;
1486 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1487 	stats->lxofftxc += lxoff;
1488 	total = lxon + lxoff;
1489 
1490 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1491 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1492 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1493 	stats->gptc -= total;
1494 	stats->mptc -= total;
1495 	stats->ptc64 -= total;
1496 	stats->gotc -= total * ETHER_MIN_LEN;
1497 
1498 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1499 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1500 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1501 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1502 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1503 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1504 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1505 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1506 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1507 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1508 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1509 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1510 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1511 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1512 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1513 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1514 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1515 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1516 	/* Only read FCOE on 82599 */
1517 	if (hw->mac.type != ixgbe_mac_82598EB) {
1518 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1519 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1520 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1521 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1522 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1523 	}
1524 
1525 	/* Fill out the OS statistics structure */
1526 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1527 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1528 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1529 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1530 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1531 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1532 	IXGBE_SET_COLLISIONS(adapter, 0);
1533 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1534 
1535 	/*
1536 	 * Aggregate following types of errors as RX errors:
1537 	 * - CRC error count,
1538 	 * - illegal byte error count,
1539 	 * - checksum error count,
1540 	 * - missed packets count,
1541 	 * - length error count,
1542 	 * - undersized packets count,
1543 	 * - fragmented packets count,
1544 	 * - oversized packets count,
1545 	 * - jabber count.
1546 	 */
1547 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->illerrc + stats->xec +
1548 	    stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1549 	    stats->rjc);
1550 } /* ixgbe_update_stats_counters */
1551 
1552 /************************************************************************
1553  * ixgbe_add_hw_stats
1554  *
1555  *   Add sysctl variables, one per statistic, to the system.
1556  ************************************************************************/
1557 static void
1558 ixgbe_add_hw_stats(struct adapter *adapter)
1559 {
1560 	device_t               dev = iflib_get_dev(adapter->ctx);
1561 	struct ix_rx_queue     *rx_que;
1562 	struct ix_tx_queue     *tx_que;
1563 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1564 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1565 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1566 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1567 	struct sysctl_oid      *stat_node, *queue_node;
1568 	struct sysctl_oid_list *stat_list, *queue_list;
1569 	int                    i;
1570 
1571 #define QUEUE_NAME_LEN 32
1572 	char                   namebuf[QUEUE_NAME_LEN];
1573 
1574 	/* Driver Statistics */
1575 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1576 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1577 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1578 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1579 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1580 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1581 
1582 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1583 		struct tx_ring *txr = &tx_que->txr;
1584 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1585 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1586 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1587 		queue_list = SYSCTL_CHILDREN(queue_node);
1588 
1589 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1590 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1591 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1592 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1593 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1594 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1595 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1596 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1597 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1598 		    CTLFLAG_RD, &txr->total_packets,
1599 		    "Queue Packets Transmitted");
1600 	}
1601 
1602 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1603 		struct rx_ring *rxr = &rx_que->rxr;
1604 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1605 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1606 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1607 		queue_list = SYSCTL_CHILDREN(queue_node);
1608 
1609 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1610 		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1611 		    &adapter->rx_queues[i], 0,
1612 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1613 		    "Interrupt Rate");
1614 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1615 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1616 		    "irqs on this queue");
1617 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1618 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1619 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1620 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1621 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1622 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1623 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1624 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1625 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1626 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1627 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1628 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1629 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1630 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1631 	}
1632 
1633 	/* MAC stats get their own sub node */
1634 
1635 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1636 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1637 	stat_list = SYSCTL_CHILDREN(stat_node);
1638 
1639 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1640 	    CTLFLAG_RD, &adapter->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1641 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1642 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1643 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1644 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1645 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1646 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1648 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1650 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1652 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1654 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1656 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1657 
1658 	/* Flow Control stats */
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1660 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1662 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1664 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1665 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1666 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1667 
1668 	/* Packet Reception Stats */
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1670 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1672 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1674 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1676 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1678 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1680 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1681 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1682 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1683 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1684 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1685 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1686 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1687 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1688 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1689 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1690 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1691 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1692 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1693 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1694 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1695 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1696 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1697 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1698 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1699 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1700 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1701 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1702 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1703 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1704 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1705 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1706 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1707 
1708 	/* Packet Transmission Stats */
1709 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1710 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1711 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1712 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1713 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1714 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1715 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1716 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1717 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1718 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1719 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1720 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1721 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1722 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1723 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1724 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1725 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1726 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1727 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1728 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1729 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1730 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1731 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1732 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1733 } /* ixgbe_add_hw_stats */
1734 
1735 /************************************************************************
1736  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1737  *
1738  *   Retrieves the TDH value from the hardware
1739  ************************************************************************/
1740 static int
1741 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1742 {
1743 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1744 	int            error;
1745 	unsigned int   val;
1746 
1747 	if (!txr)
1748 		return (0);
1749 
1750 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1751 	error = sysctl_handle_int(oidp, &val, 0, req);
1752 	if (error || !req->newptr)
1753 		return error;
1754 
1755 	return (0);
1756 } /* ixgbe_sysctl_tdh_handler */
1757 
1758 /************************************************************************
1759  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1760  *
1761  *   Retrieves the TDT value from the hardware
1762  ************************************************************************/
1763 static int
1764 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1765 {
1766 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1767 	int            error;
1768 	unsigned int   val;
1769 
1770 	if (!txr)
1771 		return (0);
1772 
1773 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1774 	error = sysctl_handle_int(oidp, &val, 0, req);
1775 	if (error || !req->newptr)
1776 		return error;
1777 
1778 	return (0);
1779 } /* ixgbe_sysctl_tdt_handler */
1780 
1781 /************************************************************************
1782  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1783  *
1784  *   Retrieves the RDH value from the hardware
1785  ************************************************************************/
1786 static int
1787 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1788 {
1789 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1790 	int            error;
1791 	unsigned int   val;
1792 
1793 	if (!rxr)
1794 		return (0);
1795 
1796 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1797 	error = sysctl_handle_int(oidp, &val, 0, req);
1798 	if (error || !req->newptr)
1799 		return error;
1800 
1801 	return (0);
1802 } /* ixgbe_sysctl_rdh_handler */
1803 
1804 /************************************************************************
1805  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1806  *
1807  *   Retrieves the RDT value from the hardware
1808  ************************************************************************/
1809 static int
1810 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1811 {
1812 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1813 	int            error;
1814 	unsigned int   val;
1815 
1816 	if (!rxr)
1817 		return (0);
1818 
1819 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1820 	error = sysctl_handle_int(oidp, &val, 0, req);
1821 	if (error || !req->newptr)
1822 		return error;
1823 
1824 	return (0);
1825 } /* ixgbe_sysctl_rdt_handler */
1826 
1827 /************************************************************************
1828  * ixgbe_if_vlan_register
1829  *
1830  *   Run via vlan config EVENT, it enables us to use the
1831  *   HW Filter table since we can get the vlan id. This
1832  *   just creates the entry in the soft version of the
1833  *   VFTA, init will repopulate the real table.
1834  ************************************************************************/
1835 static void
1836 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1837 {
1838 	struct adapter *adapter = iflib_get_softc(ctx);
1839 	u16            index, bit;
1840 
1841 	index = (vtag >> 5) & 0x7F;
1842 	bit = vtag & 0x1F;
1843 	adapter->shadow_vfta[index] |= (1 << bit);
1844 	++adapter->num_vlans;
1845 	ixgbe_setup_vlan_hw_support(ctx);
1846 } /* ixgbe_if_vlan_register */
1847 
1848 /************************************************************************
1849  * ixgbe_if_vlan_unregister
1850  *
1851  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1852  ************************************************************************/
1853 static void
1854 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1855 {
1856 	struct adapter *adapter = iflib_get_softc(ctx);
1857 	u16            index, bit;
1858 
1859 	index = (vtag >> 5) & 0x7F;
1860 	bit = vtag & 0x1F;
1861 	adapter->shadow_vfta[index] &= ~(1 << bit);
1862 	--adapter->num_vlans;
1863 	/* Re-init to load the changes */
1864 	ixgbe_setup_vlan_hw_support(ctx);
1865 } /* ixgbe_if_vlan_unregister */
1866 
1867 /************************************************************************
1868  * ixgbe_setup_vlan_hw_support
1869  ************************************************************************/
1870 static void
1871 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1872 {
1873 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1874 	struct adapter  *adapter = iflib_get_softc(ctx);
1875 	struct ixgbe_hw *hw = &adapter->hw;
1876 	struct rx_ring  *rxr;
1877 	int             i;
1878 	u32             ctrl;
1879 
1880 
1881 	/*
1882 	 * We get here thru init_locked, meaning
1883 	 * a soft reset, this has already cleared
1884 	 * the VFTA and other state, so if there
1885 	 * have been no vlan's registered do nothing.
1886 	 */
1887 	if (adapter->num_vlans == 0)
1888 		return;
1889 
1890 	/* Setup the queues for vlans */
1891 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1892 		for (i = 0; i < adapter->num_rx_queues; i++) {
1893 			rxr = &adapter->rx_queues[i].rxr;
1894 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1895 			if (hw->mac.type != ixgbe_mac_82598EB) {
1896 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1897 				ctrl |= IXGBE_RXDCTL_VME;
1898 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1899 			}
1900 			rxr->vtag_strip = TRUE;
1901 		}
1902 	}
1903 
1904 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1905 		return;
1906 	/*
1907 	 * A soft reset zero's out the VFTA, so
1908 	 * we need to repopulate it now.
1909 	 */
1910 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1911 		if (adapter->shadow_vfta[i] != 0)
1912 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1913 			    adapter->shadow_vfta[i]);
1914 
1915 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1916 	/* Enable the Filter Table if enabled */
1917 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1918 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1919 		ctrl |= IXGBE_VLNCTRL_VFE;
1920 	}
1921 	if (hw->mac.type == ixgbe_mac_82598EB)
1922 		ctrl |= IXGBE_VLNCTRL_VME;
1923 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1924 } /* ixgbe_setup_vlan_hw_support */
1925 
1926 /************************************************************************
1927  * ixgbe_get_slot_info
1928  *
1929  *   Get the width and transaction speed of
1930  *   the slot this adapter is plugged into.
1931  ************************************************************************/
1932 static void
1933 ixgbe_get_slot_info(struct adapter *adapter)
1934 {
1935 	device_t        dev = iflib_get_dev(adapter->ctx);
1936 	struct ixgbe_hw *hw = &adapter->hw;
1937 	int             bus_info_valid = TRUE;
1938 	u32             offset;
1939 	u16             link;
1940 
1941 	/* Some devices are behind an internal bridge */
1942 	switch (hw->device_id) {
1943 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1944 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1945 		goto get_parent_info;
1946 	default:
1947 		break;
1948 	}
1949 
1950 	ixgbe_get_bus_info(hw);
1951 
1952 	/*
1953 	 * Some devices don't use PCI-E, but there is no need
1954 	 * to display "Unknown" for bus speed and width.
1955 	 */
1956 	switch (hw->mac.type) {
1957 	case ixgbe_mac_X550EM_x:
1958 	case ixgbe_mac_X550EM_a:
1959 		return;
1960 	default:
1961 		goto display;
1962 	}
1963 
1964 get_parent_info:
1965 	/*
1966 	 * For the Quad port adapter we need to parse back
1967 	 * up the PCI tree to find the speed of the expansion
1968 	 * slot into which this adapter is plugged. A bit more work.
1969 	 */
1970 	dev = device_get_parent(device_get_parent(dev));
1971 #ifdef IXGBE_DEBUG
1972 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1973 	    pci_get_slot(dev), pci_get_function(dev));
1974 #endif
1975 	dev = device_get_parent(device_get_parent(dev));
1976 #ifdef IXGBE_DEBUG
1977 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1978 	    pci_get_slot(dev), pci_get_function(dev));
1979 #endif
1980 	/* Now get the PCI Express Capabilities offset */
1981 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1982 		/*
1983 		 * Hmm...can't get PCI-Express capabilities.
1984 		 * Falling back to default method.
1985 		 */
1986 		bus_info_valid = FALSE;
1987 		ixgbe_get_bus_info(hw);
1988 		goto display;
1989 	}
1990 	/* ...and read the Link Status Register */
1991 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1992 	ixgbe_set_pci_config_data_generic(hw, link);
1993 
1994 display:
1995 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1996 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1997 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1998 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1999 	     "Unknown"),
2000 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2001 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2002 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2003 	     "Unknown"));
2004 
2005 	if (bus_info_valid) {
2006 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2007 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2008 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
2009 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2010 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2011 		}
2012 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2013 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2014 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
2015 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2016 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2017 		}
2018 	} else
2019 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2020 
2021 	return;
2022 } /* ixgbe_get_slot_info */
2023 
2024 /************************************************************************
2025  * ixgbe_if_msix_intr_assign
2026  *
2027  *   Setup MSI-X Interrupt resources and handlers
2028  ************************************************************************/
2029 static int
2030 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2031 {
2032 	struct adapter     *adapter = iflib_get_softc(ctx);
2033 	struct ix_rx_queue *rx_que = adapter->rx_queues;
2034 	struct ix_tx_queue *tx_que;
2035 	int                error, rid, vector = 0;
2036 	int                cpu_id = 0;
2037 	char               buf[16];
2038 
2039 	/* Admin Que is vector 0*/
2040 	rid = vector + 1;
2041 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2042 		rid = vector + 1;
2043 
2044 		snprintf(buf, sizeof(buf), "rxq%d", i);
2045 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2046 		    IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2047 
2048 		if (error) {
2049 			device_printf(iflib_get_dev(ctx),
2050 			    "Failed to allocate que int %d err: %d", i, error);
2051 			adapter->num_rx_queues = i + 1;
2052 			goto fail;
2053 		}
2054 
2055 		rx_que->msix = vector;
2056 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2057 			/*
2058 			 * The queue ID is used as the RSS layer bucket ID.
2059 			 * We look up the queue ID -> RSS CPU ID and select
2060 			 * that.
2061 			 */
2062 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2063 		} else {
2064 			/*
2065 			 * Bind the MSI-X vector, and thus the
2066 			 * rings to the corresponding cpu.
2067 			 *
2068 			 * This just happens to match the default RSS
2069 			 * round-robin bucket -> queue -> CPU allocation.
2070 			 */
2071 			if (adapter->num_rx_queues > 1)
2072 				cpu_id = i;
2073 		}
2074 
2075 	}
2076 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2077 		snprintf(buf, sizeof(buf), "txq%d", i);
2078 		tx_que = &adapter->tx_queues[i];
2079 		tx_que->msix = i % adapter->num_rx_queues;
2080 		iflib_softirq_alloc_generic(ctx,
2081 		    &adapter->rx_queues[tx_que->msix].que_irq,
2082 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2083 	}
2084 	rid = vector + 1;
2085 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2086 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2087 	if (error) {
2088 		device_printf(iflib_get_dev(ctx),
2089 		    "Failed to register admin handler");
2090 		return (error);
2091 	}
2092 
2093 	adapter->vector = vector;
2094 
2095 	return (0);
2096 fail:
2097 	iflib_irq_free(ctx, &adapter->irq);
2098 	rx_que = adapter->rx_queues;
2099 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2100 		iflib_irq_free(ctx, &rx_que->que_irq);
2101 
2102 	return (error);
2103 } /* ixgbe_if_msix_intr_assign */
2104 
2105 /*********************************************************************
2106  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2107  **********************************************************************/
2108 static int
2109 ixgbe_msix_que(void *arg)
2110 {
2111 	struct ix_rx_queue *que = arg;
2112 	struct adapter     *adapter = que->adapter;
2113 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2114 
2115 	/* Protect against spurious interrupts */
2116 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2117 		return (FILTER_HANDLED);
2118 
2119 	ixgbe_disable_queue(adapter, que->msix);
2120 	++que->irqs;
2121 
2122 	return (FILTER_SCHEDULE_THREAD);
2123 } /* ixgbe_msix_que */
2124 
2125 /************************************************************************
2126  * ixgbe_media_status - Media Ioctl callback
2127  *
2128  *   Called whenever the user queries the status of
2129  *   the interface using ifconfig.
2130  ************************************************************************/
2131 static void
2132 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2133 {
2134 	struct adapter  *adapter = iflib_get_softc(ctx);
2135 	struct ixgbe_hw *hw = &adapter->hw;
2136 	int             layer;
2137 
2138 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2139 
2140 	ifmr->ifm_status = IFM_AVALID;
2141 	ifmr->ifm_active = IFM_ETHER;
2142 
2143 	if (!adapter->link_active)
2144 		return;
2145 
2146 	ifmr->ifm_status |= IFM_ACTIVE;
2147 	layer = adapter->phy_layer;
2148 
2149 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2150 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2151 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2152 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2153 		switch (adapter->link_speed) {
2154 		case IXGBE_LINK_SPEED_10GB_FULL:
2155 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2156 			break;
2157 		case IXGBE_LINK_SPEED_1GB_FULL:
2158 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2159 			break;
2160 		case IXGBE_LINK_SPEED_100_FULL:
2161 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2162 			break;
2163 		case IXGBE_LINK_SPEED_10_FULL:
2164 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2165 			break;
2166 		}
2167 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2168 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2169 		switch (adapter->link_speed) {
2170 		case IXGBE_LINK_SPEED_10GB_FULL:
2171 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2172 			break;
2173 		}
2174 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2175 		switch (adapter->link_speed) {
2176 		case IXGBE_LINK_SPEED_10GB_FULL:
2177 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2178 			break;
2179 		case IXGBE_LINK_SPEED_1GB_FULL:
2180 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2181 			break;
2182 		}
2183 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2184 		switch (adapter->link_speed) {
2185 		case IXGBE_LINK_SPEED_10GB_FULL:
2186 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2187 			break;
2188 		case IXGBE_LINK_SPEED_1GB_FULL:
2189 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2190 			break;
2191 		}
2192 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2193 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2194 		switch (adapter->link_speed) {
2195 		case IXGBE_LINK_SPEED_10GB_FULL:
2196 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2197 			break;
2198 		case IXGBE_LINK_SPEED_1GB_FULL:
2199 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2200 			break;
2201 		}
2202 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2203 		switch (adapter->link_speed) {
2204 		case IXGBE_LINK_SPEED_10GB_FULL:
2205 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2206 			break;
2207 		}
2208 	/*
2209 	 * XXX: These need to use the proper media types once
2210 	 * they're added.
2211 	 */
2212 #ifndef IFM_ETH_XTYPE
2213 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2214 		switch (adapter->link_speed) {
2215 		case IXGBE_LINK_SPEED_10GB_FULL:
2216 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2217 			break;
2218 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2219 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2220 			break;
2221 		case IXGBE_LINK_SPEED_1GB_FULL:
2222 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2223 			break;
2224 		}
2225 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2226 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2227 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2228 		switch (adapter->link_speed) {
2229 		case IXGBE_LINK_SPEED_10GB_FULL:
2230 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2231 			break;
2232 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2233 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2234 			break;
2235 		case IXGBE_LINK_SPEED_1GB_FULL:
2236 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2237 			break;
2238 		}
2239 #else
2240 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2241 		switch (adapter->link_speed) {
2242 		case IXGBE_LINK_SPEED_10GB_FULL:
2243 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2244 			break;
2245 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2246 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2247 			break;
2248 		case IXGBE_LINK_SPEED_1GB_FULL:
2249 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2250 			break;
2251 		}
2252 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2253 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2254 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2255 		switch (adapter->link_speed) {
2256 		case IXGBE_LINK_SPEED_10GB_FULL:
2257 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2258 			break;
2259 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2260 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2261 			break;
2262 		case IXGBE_LINK_SPEED_1GB_FULL:
2263 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2264 			break;
2265 		}
2266 #endif
2267 
2268 	/* If nothing is recognized... */
2269 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2270 		ifmr->ifm_active |= IFM_UNKNOWN;
2271 
2272 	/* Display current flow control setting used on link */
2273 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2274 	    hw->fc.current_mode == ixgbe_fc_full)
2275 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2276 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2277 	    hw->fc.current_mode == ixgbe_fc_full)
2278 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2279 } /* ixgbe_media_status */
2280 
2281 /************************************************************************
2282  * ixgbe_media_change - Media Ioctl callback
2283  *
2284  *   Called when the user changes speed/duplex using
2285  *   media/mediopt option with ifconfig.
2286  ************************************************************************/
2287 static int
2288 ixgbe_if_media_change(if_ctx_t ctx)
2289 {
2290 	struct adapter   *adapter = iflib_get_softc(ctx);
2291 	struct ifmedia   *ifm = iflib_get_media(ctx);
2292 	struct ixgbe_hw  *hw = &adapter->hw;
2293 	ixgbe_link_speed speed = 0;
2294 
2295 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2296 
2297 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2298 		return (EINVAL);
2299 
2300 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2301 		return (EPERM);
2302 
2303 	/*
2304 	 * We don't actually need to check against the supported
2305 	 * media types of the adapter; ifmedia will take care of
2306 	 * that for us.
2307 	 */
2308 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2309 	case IFM_AUTO:
2310 	case IFM_10G_T:
2311 		speed |= IXGBE_LINK_SPEED_100_FULL;
2312 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2313 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2314 		break;
2315 	case IFM_10G_LRM:
2316 	case IFM_10G_LR:
2317 #ifndef IFM_ETH_XTYPE
2318 	case IFM_10G_SR: /* KR, too */
2319 	case IFM_10G_CX4: /* KX4 */
2320 #else
2321 	case IFM_10G_KR:
2322 	case IFM_10G_KX4:
2323 #endif
2324 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2325 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2326 		break;
2327 #ifndef IFM_ETH_XTYPE
2328 	case IFM_1000_CX: /* KX */
2329 #else
2330 	case IFM_1000_KX:
2331 #endif
2332 	case IFM_1000_LX:
2333 	case IFM_1000_SX:
2334 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2335 		break;
2336 	case IFM_1000_T:
2337 		speed |= IXGBE_LINK_SPEED_100_FULL;
2338 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2339 		break;
2340 	case IFM_10G_TWINAX:
2341 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2342 		break;
2343 	case IFM_100_TX:
2344 		speed |= IXGBE_LINK_SPEED_100_FULL;
2345 		break;
2346 	case IFM_10_T:
2347 		speed |= IXGBE_LINK_SPEED_10_FULL;
2348 		break;
2349 	default:
2350 		goto invalid;
2351 	}
2352 
2353 	hw->mac.autotry_restart = TRUE;
2354 	hw->mac.ops.setup_link(hw, speed, TRUE);
2355 	adapter->advertise =
2356 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2357 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2358 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2359 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2360 
2361 	return (0);
2362 
2363 invalid:
2364 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2365 
2366 	return (EINVAL);
2367 } /* ixgbe_if_media_change */
2368 
2369 /************************************************************************
2370  * ixgbe_set_promisc
2371  ************************************************************************/
2372 static int
2373 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2374 {
2375 	struct adapter *adapter = iflib_get_softc(ctx);
2376 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2377 	u32            rctl;
2378 	int            mcnt = 0;
2379 
2380 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2381 	rctl &= (~IXGBE_FCTRL_UPE);
2382 	if (ifp->if_flags & IFF_ALLMULTI)
2383 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2384 	else {
2385 		mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2386 	}
2387 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2388 		rctl &= (~IXGBE_FCTRL_MPE);
2389 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2390 
2391 	if (ifp->if_flags & IFF_PROMISC) {
2392 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2393 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2394 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2395 		rctl |= IXGBE_FCTRL_MPE;
2396 		rctl &= ~IXGBE_FCTRL_UPE;
2397 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2398 	}
2399 	return (0);
2400 } /* ixgbe_if_promisc_set */
2401 
2402 /************************************************************************
2403  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2404  ************************************************************************/
2405 static int
2406 ixgbe_msix_link(void *arg)
2407 {
2408 	struct adapter  *adapter = arg;
2409 	struct ixgbe_hw *hw = &adapter->hw;
2410 	u32             eicr, eicr_mask;
2411 	s32             retval;
2412 
2413 	++adapter->link_irq;
2414 
2415 	/* Pause other interrupts */
2416 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2417 
2418 	/* First get the cause */
2419 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2420 	/* Be sure the queue bits are not cleared */
2421 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2422 	/* Clear interrupt with write */
2423 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2424 
2425 	/* Link status change */
2426 	if (eicr & IXGBE_EICR_LSC) {
2427 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2428 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2429 	}
2430 
2431 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2432 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2433 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2434 			/* This is probably overkill :) */
2435 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2436 				return (FILTER_HANDLED);
2437 			/* Disable the interrupt */
2438 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2439 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2440 		} else
2441 			if (eicr & IXGBE_EICR_ECC) {
2442 				device_printf(iflib_get_dev(adapter->ctx),
2443 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2444 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2445 			}
2446 
2447 		/* Check for over temp condition */
2448 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2449 			switch (adapter->hw.mac.type) {
2450 			case ixgbe_mac_X550EM_a:
2451 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2452 					break;
2453 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2454 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2455 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2456 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2457 				retval = hw->phy.ops.check_overtemp(hw);
2458 				if (retval != IXGBE_ERR_OVERTEMP)
2459 					break;
2460 				device_printf(iflib_get_dev(adapter->ctx),
2461 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2462 				device_printf(iflib_get_dev(adapter->ctx),
2463 				    "System shutdown required!\n");
2464 				break;
2465 			default:
2466 				if (!(eicr & IXGBE_EICR_TS))
2467 					break;
2468 				retval = hw->phy.ops.check_overtemp(hw);
2469 				if (retval != IXGBE_ERR_OVERTEMP)
2470 					break;
2471 				device_printf(iflib_get_dev(adapter->ctx),
2472 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2473 				device_printf(iflib_get_dev(adapter->ctx),
2474 				    "System shutdown required!\n");
2475 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2476 				break;
2477 			}
2478 		}
2479 
2480 		/* Check for VF message */
2481 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2482 		    (eicr & IXGBE_EICR_MAILBOX))
2483 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2484 	}
2485 
2486 	if (ixgbe_is_sfp(hw)) {
2487 		/* Pluggable optics-related interrupt */
2488 		if (hw->mac.type >= ixgbe_mac_X540)
2489 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2490 		else
2491 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2492 
2493 		if (eicr & eicr_mask) {
2494 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2495 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2496 		}
2497 
2498 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2499 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2500 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2501 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2502 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2503 		}
2504 	}
2505 
2506 	/* Check for fan failure */
2507 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2508 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2509 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2510 	}
2511 
2512 	/* External PHY interrupt */
2513 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2514 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2515 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2516 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2517 	}
2518 
2519 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2520 } /* ixgbe_msix_link */
2521 
2522 /************************************************************************
2523  * ixgbe_sysctl_interrupt_rate_handler
2524  ************************************************************************/
2525 static int
2526 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2527 {
2528 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2529 	int                error;
2530 	unsigned int       reg, usec, rate;
2531 
2532 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2533 	usec = ((reg & 0x0FF8) >> 3);
2534 	if (usec > 0)
2535 		rate = 500000 / usec;
2536 	else
2537 		rate = 0;
2538 	error = sysctl_handle_int(oidp, &rate, 0, req);
2539 	if (error || !req->newptr)
2540 		return error;
2541 	reg &= ~0xfff; /* default, no limitation */
2542 	ixgbe_max_interrupt_rate = 0;
2543 	if (rate > 0 && rate < 500000) {
2544 		if (rate < 1000)
2545 			rate = 1000;
2546 		ixgbe_max_interrupt_rate = rate;
2547 		reg |= ((4000000/rate) & 0xff8);
2548 	}
2549 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2550 
2551 	return (0);
2552 } /* ixgbe_sysctl_interrupt_rate_handler */
2553 
2554 /************************************************************************
2555  * ixgbe_add_device_sysctls
2556  ************************************************************************/
2557 static void
2558 ixgbe_add_device_sysctls(if_ctx_t ctx)
2559 {
2560 	struct adapter         *adapter = iflib_get_softc(ctx);
2561 	device_t               dev = iflib_get_dev(ctx);
2562 	struct ixgbe_hw        *hw = &adapter->hw;
2563 	struct sysctl_oid_list *child;
2564 	struct sysctl_ctx_list *ctx_list;
2565 
2566 	ctx_list = device_get_sysctl_ctx(dev);
2567 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2568 
2569 	/* Sysctls for all devices */
2570 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2571 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2572 	    adapter, 0, ixgbe_sysctl_flowcntl, "I",
2573 	    IXGBE_SYSCTL_DESC_SET_FC);
2574 
2575 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2576 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2577 	    adapter, 0, ixgbe_sysctl_advertise, "I",
2578 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2579 
2580 #ifdef IXGBE_DEBUG
2581 	/* testing sysctls (for all devices) */
2582 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2583 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2584 	    adapter, 0, ixgbe_sysctl_power_state,
2585 	    "I", "PCI Power State");
2586 
2587 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2588 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2589 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2590 #endif
2591 	/* for X550 series devices */
2592 	if (hw->mac.type >= ixgbe_mac_X550)
2593 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2594 		    CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2595 		    adapter, 0, ixgbe_sysctl_dmac,
2596 		    "I", "DMA Coalesce");
2597 
2598 	/* for WoL-capable devices */
2599 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2600 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2601 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2602 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2603 
2604 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2605 		    CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2606 		    adapter, 0, ixgbe_sysctl_wufc,
2607 		    "I", "Enable/Disable Wake Up Filters");
2608 	}
2609 
2610 	/* for X552/X557-AT devices */
2611 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2612 		struct sysctl_oid *phy_node;
2613 		struct sysctl_oid_list *phy_list;
2614 
2615 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2616 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2617 		phy_list = SYSCTL_CHILDREN(phy_node);
2618 
2619 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2620 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2621 		    adapter, 0, ixgbe_sysctl_phy_temp,
2622 		    "I", "Current External PHY Temperature (Celsius)");
2623 
2624 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2625 		    "overtemp_occurred",
2626 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2627 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2628 		    "External PHY High Temperature Event Occurred");
2629 	}
2630 
2631 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2632 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2633 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2634 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2635 	}
2636 } /* ixgbe_add_device_sysctls */
2637 
2638 /************************************************************************
2639  * ixgbe_allocate_pci_resources
2640  ************************************************************************/
2641 static int
2642 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2643 {
2644 	struct adapter *adapter = iflib_get_softc(ctx);
2645 	device_t        dev = iflib_get_dev(ctx);
2646 	int             rid;
2647 
2648 	rid = PCIR_BAR(0);
2649 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2650 	    RF_ACTIVE);
2651 
2652 	if (!(adapter->pci_mem)) {
2653 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2654 		return (ENXIO);
2655 	}
2656 
2657 	/* Save bus_space values for READ/WRITE_REG macros */
2658 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2659 	adapter->osdep.mem_bus_space_handle =
2660 	    rman_get_bushandle(adapter->pci_mem);
2661 	/* Set hw values for shared code */
2662 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2663 
2664 	return (0);
2665 } /* ixgbe_allocate_pci_resources */
2666 
2667 /************************************************************************
2668  * ixgbe_detach - Device removal routine
2669  *
2670  *   Called when the driver is being removed.
2671  *   Stops the adapter and deallocates all the resources
2672  *   that were allocated for driver operation.
2673  *
2674  *   return 0 on success, positive on failure
2675  ************************************************************************/
2676 static int
2677 ixgbe_if_detach(if_ctx_t ctx)
2678 {
2679 	struct adapter *adapter = iflib_get_softc(ctx);
2680 	device_t       dev = iflib_get_dev(ctx);
2681 	u32            ctrl_ext;
2682 
2683 	INIT_DEBUGOUT("ixgbe_detach: begin");
2684 
2685 	if (ixgbe_pci_iov_detach(dev) != 0) {
2686 		device_printf(dev, "SR-IOV in use; detach first.\n");
2687 		return (EBUSY);
2688 	}
2689 
2690 	ixgbe_setup_low_power_mode(ctx);
2691 
2692 	/* let hardware know driver is unloading */
2693 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2694 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2695 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2696 
2697 	ixgbe_free_pci_resources(ctx);
2698 	free(adapter->mta, M_IXGBE);
2699 
2700 	return (0);
2701 } /* ixgbe_if_detach */
2702 
2703 /************************************************************************
2704  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2705  *
2706  *   Prepare the adapter/port for LPLU and/or WoL
2707  ************************************************************************/
2708 static int
2709 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2710 {
2711 	struct adapter  *adapter = iflib_get_softc(ctx);
2712 	struct ixgbe_hw *hw = &adapter->hw;
2713 	device_t        dev = iflib_get_dev(ctx);
2714 	s32             error = 0;
2715 
2716 	if (!hw->wol_enabled)
2717 		ixgbe_set_phy_power(hw, FALSE);
2718 
2719 	/* Limit power management flow to X550EM baseT */
2720 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2721 	    hw->phy.ops.enter_lplu) {
2722 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2723 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2724 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2725 
2726 		/*
2727 		 * Clear Wake Up Status register to prevent any previous wakeup
2728 		 * events from waking us up immediately after we suspend.
2729 		 */
2730 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2731 
2732 		/*
2733 		 * Program the Wakeup Filter Control register with user filter
2734 		 * settings
2735 		 */
2736 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2737 
2738 		/* Enable wakeups and power management in Wakeup Control */
2739 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2740 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2741 
2742 		/* X550EM baseT adapters need a special LPLU flow */
2743 		hw->phy.reset_disable = TRUE;
2744 		ixgbe_if_stop(ctx);
2745 		error = hw->phy.ops.enter_lplu(hw);
2746 		if (error)
2747 			device_printf(dev, "Error entering LPLU: %d\n", error);
2748 		hw->phy.reset_disable = FALSE;
2749 	} else {
2750 		/* Just stop for other adapters */
2751 		ixgbe_if_stop(ctx);
2752 	}
2753 
2754 	return error;
2755 } /* ixgbe_setup_low_power_mode */
2756 
2757 /************************************************************************
2758  * ixgbe_shutdown - Shutdown entry point
2759  ************************************************************************/
2760 static int
2761 ixgbe_if_shutdown(if_ctx_t ctx)
2762 {
2763 	int error = 0;
2764 
2765 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2766 
2767 	error = ixgbe_setup_low_power_mode(ctx);
2768 
2769 	return (error);
2770 } /* ixgbe_if_shutdown */
2771 
2772 /************************************************************************
2773  * ixgbe_suspend
2774  *
2775  *   From D0 to D3
2776  ************************************************************************/
2777 static int
2778 ixgbe_if_suspend(if_ctx_t ctx)
2779 {
2780 	int error = 0;
2781 
2782 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2783 
2784 	error = ixgbe_setup_low_power_mode(ctx);
2785 
2786 	return (error);
2787 } /* ixgbe_if_suspend */
2788 
2789 /************************************************************************
2790  * ixgbe_resume
2791  *
2792  *   From D3 to D0
2793  ************************************************************************/
2794 static int
2795 ixgbe_if_resume(if_ctx_t ctx)
2796 {
2797 	struct adapter  *adapter = iflib_get_softc(ctx);
2798 	device_t        dev = iflib_get_dev(ctx);
2799 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2800 	struct ixgbe_hw *hw = &adapter->hw;
2801 	u32             wus;
2802 
2803 	INIT_DEBUGOUT("ixgbe_resume: begin");
2804 
2805 	/* Read & clear WUS register */
2806 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2807 	if (wus)
2808 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2809 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2810 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2811 	/* And clear WUFC until next low-power transition */
2812 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2813 
2814 	/*
2815 	 * Required after D3->D0 transition;
2816 	 * will re-advertise all previous advertised speeds
2817 	 */
2818 	if (ifp->if_flags & IFF_UP)
2819 		ixgbe_if_init(ctx);
2820 
2821 	return (0);
2822 } /* ixgbe_if_resume */
2823 
2824 /************************************************************************
2825  * ixgbe_if_mtu_set - Ioctl mtu entry point
2826  *
2827  *   Return 0 on success, EINVAL on failure
2828  ************************************************************************/
2829 static int
2830 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2831 {
2832 	struct adapter *adapter = iflib_get_softc(ctx);
2833 	int error = 0;
2834 
2835 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2836 
2837 	if (mtu > IXGBE_MAX_MTU) {
2838 		error = EINVAL;
2839 	} else {
2840 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2841 	}
2842 
2843 	return error;
2844 } /* ixgbe_if_mtu_set */
2845 
2846 /************************************************************************
2847  * ixgbe_if_crcstrip_set
2848  ************************************************************************/
2849 static void
2850 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2851 {
2852 	struct adapter *sc = iflib_get_softc(ctx);
2853 	struct ixgbe_hw *hw = &sc->hw;
2854 	/* crc stripping is set in two places:
2855 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2856 	 * IXGBE_RDRXCTL (set by the original driver in
2857 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2858 	 *	We disable the setting when netmap is compiled in).
2859 	 * We update the values here, but also in ixgbe.c because
2860 	 * init_locked sometimes is called outside our control.
2861 	 */
2862 	uint32_t hl, rxc;
2863 
2864 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2865 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2866 #ifdef NETMAP
2867 	if (netmap_verbose)
2868 		D("%s read  HLREG 0x%x rxc 0x%x",
2869 			onoff ? "enter" : "exit", hl, rxc);
2870 #endif
2871 	/* hw requirements ... */
2872 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2873 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2874 	if (onoff && !crcstrip) {
2875 		/* keep the crc. Fast rx */
2876 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2877 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2878 	} else {
2879 		/* reset default mode */
2880 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2881 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2882 	}
2883 #ifdef NETMAP
2884 	if (netmap_verbose)
2885 		D("%s write HLREG 0x%x rxc 0x%x",
2886 			onoff ? "enter" : "exit", hl, rxc);
2887 #endif
2888 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2889 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2890 } /* ixgbe_if_crcstrip_set */
2891 
2892 /*********************************************************************
2893  * ixgbe_if_init - Init entry point
2894  *
2895  *   Used in two ways: It is used by the stack as an init
2896  *   entry point in network interface structure. It is also
2897  *   used by the driver as a hw/sw initialization routine to
2898  *   get to a consistent state.
2899  *
2900  *   Return 0 on success, positive on failure
2901  **********************************************************************/
2902 void
2903 ixgbe_if_init(if_ctx_t ctx)
2904 {
2905 	struct adapter     *adapter = iflib_get_softc(ctx);
2906 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2907 	device_t           dev = iflib_get_dev(ctx);
2908 	struct ixgbe_hw *hw = &adapter->hw;
2909 	struct ix_rx_queue *rx_que;
2910 	struct ix_tx_queue *tx_que;
2911 	u32             txdctl, mhadd;
2912 	u32             rxdctl, rxctrl;
2913 	u32             ctrl_ext;
2914 
2915 	int             i, j, err;
2916 
2917 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2918 
2919 	/* Queue indices may change with IOV mode */
2920 	ixgbe_align_all_queue_indices(adapter);
2921 
2922 	/* reprogram the RAR[0] in case user changed it. */
2923 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2924 
2925 	/* Get the latest mac address, User can use a LAA */
2926 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2927 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2928 	hw->addr_ctrl.rar_used_count = 1;
2929 
2930 	ixgbe_init_hw(hw);
2931 
2932 	ixgbe_initialize_iov(adapter);
2933 
2934 	ixgbe_initialize_transmit_units(ctx);
2935 
2936 	/* Setup Multicast table */
2937 	ixgbe_if_multi_set(ctx);
2938 
2939 	/* Determine the correct mbuf pool, based on frame size */
2940 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2941 
2942 	/* Configure RX settings */
2943 	ixgbe_initialize_receive_units(ctx);
2944 
2945 	/*
2946 	 * Initialize variable holding task enqueue requests
2947 	 * from MSI-X interrupts
2948 	 */
2949 	adapter->task_requests = 0;
2950 
2951 	/* Enable SDP & MSI-X interrupts based on adapter */
2952 	ixgbe_config_gpie(adapter);
2953 
2954 	/* Set MTU size */
2955 	if (ifp->if_mtu > ETHERMTU) {
2956 		/* aka IXGBE_MAXFRS on 82599 and newer */
2957 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2958 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2959 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2960 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2961 	}
2962 
2963 	/* Now enable all the queues */
2964 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2965 		struct tx_ring *txr = &tx_que->txr;
2966 
2967 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2968 		txdctl |= IXGBE_TXDCTL_ENABLE;
2969 		/* Set WTHRESH to 8, burst writeback */
2970 		txdctl |= (8 << 16);
2971 		/*
2972 		 * When the internal queue falls below PTHRESH (32),
2973 		 * start prefetching as long as there are at least
2974 		 * HTHRESH (1) buffers ready. The values are taken
2975 		 * from the Intel linux driver 3.8.21.
2976 		 * Prefetching enables tx line rate even with 1 queue.
2977 		 */
2978 		txdctl |= (32 << 0) | (1 << 8);
2979 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2980 	}
2981 
2982 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2983 		struct rx_ring *rxr = &rx_que->rxr;
2984 
2985 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2986 		if (hw->mac.type == ixgbe_mac_82598EB) {
2987 			/*
2988 			 * PTHRESH = 21
2989 			 * HTHRESH = 4
2990 			 * WTHRESH = 8
2991 			 */
2992 			rxdctl &= ~0x3FFFFF;
2993 			rxdctl |= 0x080420;
2994 		}
2995 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2996 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2997 		for (j = 0; j < 10; j++) {
2998 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2999 			    IXGBE_RXDCTL_ENABLE)
3000 				break;
3001 			else
3002 				msec_delay(1);
3003 		}
3004 		wmb();
3005 	}
3006 
3007 	/* Enable Receive engine */
3008 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3009 	if (hw->mac.type == ixgbe_mac_82598EB)
3010 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3011 	rxctrl |= IXGBE_RXCTRL_RXEN;
3012 	ixgbe_enable_rx_dma(hw, rxctrl);
3013 
3014 	/* Set up MSI/MSI-X routing */
3015 	if (ixgbe_enable_msix)  {
3016 		ixgbe_configure_ivars(adapter);
3017 		/* Set up auto-mask */
3018 		if (hw->mac.type == ixgbe_mac_82598EB)
3019 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3020 		else {
3021 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3022 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3023 		}
3024 	} else {  /* Simple settings for Legacy/MSI */
3025 		ixgbe_set_ivar(adapter, 0, 0, 0);
3026 		ixgbe_set_ivar(adapter, 0, 0, 1);
3027 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3028 	}
3029 
3030 	ixgbe_init_fdir(adapter);
3031 
3032 	/*
3033 	 * Check on any SFP devices that
3034 	 * need to be kick-started
3035 	 */
3036 	if (hw->phy.type == ixgbe_phy_none) {
3037 		err = hw->phy.ops.identify(hw);
3038 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3039 			device_printf(dev,
3040 			    "Unsupported SFP+ module type was detected.\n");
3041 			return;
3042 		}
3043 	}
3044 
3045 	/* Set moderation on the Link interrupt */
3046 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3047 
3048 	/* Enable power to the phy. */
3049 	ixgbe_set_phy_power(hw, TRUE);
3050 
3051 	/* Config/Enable Link */
3052 	ixgbe_config_link(ctx);
3053 
3054 	/* Hardware Packet Buffer & Flow Control setup */
3055 	ixgbe_config_delay_values(adapter);
3056 
3057 	/* Initialize the FC settings */
3058 	ixgbe_start_hw(hw);
3059 
3060 	/* Set up VLAN support and filter */
3061 	ixgbe_setup_vlan_hw_support(ctx);
3062 
3063 	/* Setup DMA Coalescing */
3064 	ixgbe_config_dmac(adapter);
3065 
3066 	/* And now turn on interrupts */
3067 	ixgbe_if_enable_intr(ctx);
3068 
3069 	/* Enable the use of the MBX by the VF's */
3070 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3071 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3072 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3073 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3074 	}
3075 
3076 } /* ixgbe_init_locked */
3077 
3078 /************************************************************************
3079  * ixgbe_set_ivar
3080  *
3081  *   Setup the correct IVAR register for a particular MSI-X interrupt
3082  *     (yes this is all very magic and confusing :)
3083  *    - entry is the register array entry
3084  *    - vector is the MSI-X vector for this queue
3085  *    - type is RX/TX/MISC
3086  ************************************************************************/
3087 static void
3088 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3089 {
3090 	struct ixgbe_hw *hw = &adapter->hw;
3091 	u32 ivar, index;
3092 
3093 	vector |= IXGBE_IVAR_ALLOC_VAL;
3094 
3095 	switch (hw->mac.type) {
3096 	case ixgbe_mac_82598EB:
3097 		if (type == -1)
3098 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3099 		else
3100 			entry += (type * 64);
3101 		index = (entry >> 2) & 0x1F;
3102 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3103 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3104 		ivar |= (vector << (8 * (entry & 0x3)));
3105 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3106 		break;
3107 	case ixgbe_mac_82599EB:
3108 	case ixgbe_mac_X540:
3109 	case ixgbe_mac_X550:
3110 	case ixgbe_mac_X550EM_x:
3111 	case ixgbe_mac_X550EM_a:
3112 		if (type == -1) { /* MISC IVAR */
3113 			index = (entry & 1) * 8;
3114 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3115 			ivar &= ~(0xFF << index);
3116 			ivar |= (vector << index);
3117 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3118 		} else {          /* RX/TX IVARS */
3119 			index = (16 * (entry & 1)) + (8 * type);
3120 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3121 			ivar &= ~(0xFF << index);
3122 			ivar |= (vector << index);
3123 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3124 		}
3125 	default:
3126 		break;
3127 	}
3128 } /* ixgbe_set_ivar */
3129 
3130 /************************************************************************
3131  * ixgbe_configure_ivars
3132  ************************************************************************/
3133 static void
3134 ixgbe_configure_ivars(struct adapter *adapter)
3135 {
3136 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3137 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3138 	u32                newitr;
3139 
3140 	if (ixgbe_max_interrupt_rate > 0)
3141 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3142 	else {
3143 		/*
3144 		 * Disable DMA coalescing if interrupt moderation is
3145 		 * disabled.
3146 		 */
3147 		adapter->dmac = 0;
3148 		newitr = 0;
3149 	}
3150 
3151 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3152 		struct rx_ring *rxr = &rx_que->rxr;
3153 
3154 		/* First the RX queue entry */
3155 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3156 
3157 		/* Set an Initial EITR value */
3158 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3159 	}
3160 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3161 		struct tx_ring *txr = &tx_que->txr;
3162 
3163 		/* ... and the TX */
3164 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3165 	}
3166 	/* For the Link interrupt */
3167 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3168 } /* ixgbe_configure_ivars */
3169 
3170 /************************************************************************
3171  * ixgbe_config_gpie
3172  ************************************************************************/
3173 static void
3174 ixgbe_config_gpie(struct adapter *adapter)
3175 {
3176 	struct ixgbe_hw *hw = &adapter->hw;
3177 	u32             gpie;
3178 
3179 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3180 
3181 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3182 		/* Enable Enhanced MSI-X mode */
3183 		gpie |= IXGBE_GPIE_MSIX_MODE
3184 		     |  IXGBE_GPIE_EIAME
3185 		     |  IXGBE_GPIE_PBA_SUPPORT
3186 		     |  IXGBE_GPIE_OCD;
3187 	}
3188 
3189 	/* Fan Failure Interrupt */
3190 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3191 		gpie |= IXGBE_SDP1_GPIEN;
3192 
3193 	/* Thermal Sensor Interrupt */
3194 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3195 		gpie |= IXGBE_SDP0_GPIEN_X540;
3196 
3197 	/* Link detection */
3198 	switch (hw->mac.type) {
3199 	case ixgbe_mac_82599EB:
3200 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3201 		break;
3202 	case ixgbe_mac_X550EM_x:
3203 	case ixgbe_mac_X550EM_a:
3204 		gpie |= IXGBE_SDP0_GPIEN_X540;
3205 		break;
3206 	default:
3207 		break;
3208 	}
3209 
3210 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3211 
3212 } /* ixgbe_config_gpie */
3213 
3214 /************************************************************************
3215  * ixgbe_config_delay_values
3216  *
3217  *   Requires adapter->max_frame_size to be set.
3218  ************************************************************************/
3219 static void
3220 ixgbe_config_delay_values(struct adapter *adapter)
3221 {
3222 	struct ixgbe_hw *hw = &adapter->hw;
3223 	u32             rxpb, frame, size, tmp;
3224 
3225 	frame = adapter->max_frame_size;
3226 
3227 	/* Calculate High Water */
3228 	switch (hw->mac.type) {
3229 	case ixgbe_mac_X540:
3230 	case ixgbe_mac_X550:
3231 	case ixgbe_mac_X550EM_x:
3232 	case ixgbe_mac_X550EM_a:
3233 		tmp = IXGBE_DV_X540(frame, frame);
3234 		break;
3235 	default:
3236 		tmp = IXGBE_DV(frame, frame);
3237 		break;
3238 	}
3239 	size = IXGBE_BT2KB(tmp);
3240 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3241 	hw->fc.high_water[0] = rxpb - size;
3242 
3243 	/* Now calculate Low Water */
3244 	switch (hw->mac.type) {
3245 	case ixgbe_mac_X540:
3246 	case ixgbe_mac_X550:
3247 	case ixgbe_mac_X550EM_x:
3248 	case ixgbe_mac_X550EM_a:
3249 		tmp = IXGBE_LOW_DV_X540(frame);
3250 		break;
3251 	default:
3252 		tmp = IXGBE_LOW_DV(frame);
3253 		break;
3254 	}
3255 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3256 
3257 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3258 	hw->fc.send_xon = TRUE;
3259 } /* ixgbe_config_delay_values */
3260 
3261 /************************************************************************
3262  * ixgbe_set_multi - Multicast Update
3263  *
3264  *   Called whenever multicast address list is updated.
3265  ************************************************************************/
3266 static u_int
3267 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count)
3268 {
3269 	struct adapter *adapter = arg;
3270 	struct ixgbe_mc_addr *mta = adapter->mta;
3271 
3272 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3273 		return (0);
3274 	bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3275 	mta[count].vmdq = adapter->pool;
3276 
3277 	return (1);
3278 } /* ixgbe_mc_filter_apply */
3279 
3280 static void
3281 ixgbe_if_multi_set(if_ctx_t ctx)
3282 {
3283 	struct adapter       *adapter = iflib_get_softc(ctx);
3284 	struct ixgbe_mc_addr *mta;
3285 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3286 	u8                   *update_ptr;
3287 	u32                  fctrl;
3288 	u_int		     mcnt;
3289 
3290 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3291 
3292 	mta = adapter->mta;
3293 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3294 
3295 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3296 	    adapter);
3297 
3298 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3299 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3300 	if (ifp->if_flags & IFF_PROMISC)
3301 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3302 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3303 	    ifp->if_flags & IFF_ALLMULTI) {
3304 		fctrl |= IXGBE_FCTRL_MPE;
3305 		fctrl &= ~IXGBE_FCTRL_UPE;
3306 	} else
3307 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3308 
3309 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3310 
3311 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3312 		update_ptr = (u8 *)mta;
3313 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3314 		    ixgbe_mc_array_itr, TRUE);
3315 	}
3316 
3317 } /* ixgbe_if_multi_set */
3318 
3319 /************************************************************************
3320  * ixgbe_mc_array_itr
3321  *
3322  *   An iterator function needed by the multicast shared code.
3323  *   It feeds the shared code routine the addresses in the
3324  *   array of ixgbe_set_multi() one by one.
3325  ************************************************************************/
3326 static u8 *
3327 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3328 {
3329 	struct ixgbe_mc_addr *mta;
3330 
3331 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3332 	*vmdq = mta->vmdq;
3333 
3334 	*update_ptr = (u8*)(mta + 1);
3335 
3336 	return (mta->addr);
3337 } /* ixgbe_mc_array_itr */
3338 
3339 /************************************************************************
3340  * ixgbe_local_timer - Timer routine
3341  *
3342  *   Checks for link status, updates statistics,
3343  *   and runs the watchdog check.
3344  ************************************************************************/
3345 static void
3346 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3347 {
3348 	struct adapter *adapter = iflib_get_softc(ctx);
3349 
3350 	if (qid != 0)
3351 		return;
3352 
3353 	/* Check for pluggable optics */
3354 	if (adapter->sfp_probe)
3355 		if (!ixgbe_sfp_probe(ctx))
3356 			return; /* Nothing to do */
3357 
3358 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3359 	    &adapter->link_up, 0);
3360 
3361 	/* Fire off the adminq task */
3362 	iflib_admin_intr_deferred(ctx);
3363 
3364 } /* ixgbe_if_timer */
3365 
3366 /************************************************************************
3367  * ixgbe_sfp_probe
3368  *
3369  *   Determine if a port had optics inserted.
3370  ************************************************************************/
3371 static bool
3372 ixgbe_sfp_probe(if_ctx_t ctx)
3373 {
3374 	struct adapter  *adapter = iflib_get_softc(ctx);
3375 	struct ixgbe_hw *hw = &adapter->hw;
3376 	device_t        dev = iflib_get_dev(ctx);
3377 	bool            result = FALSE;
3378 
3379 	if ((hw->phy.type == ixgbe_phy_nl) &&
3380 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3381 		s32 ret = hw->phy.ops.identify_sfp(hw);
3382 		if (ret)
3383 			goto out;
3384 		ret = hw->phy.ops.reset(hw);
3385 		adapter->sfp_probe = FALSE;
3386 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3387 			device_printf(dev, "Unsupported SFP+ module detected!");
3388 			device_printf(dev,
3389 			    "Reload driver with supported module.\n");
3390 			goto out;
3391 		} else
3392 			device_printf(dev, "SFP+ module detected!\n");
3393 		/* We now have supported optics */
3394 		result = TRUE;
3395 	}
3396 out:
3397 
3398 	return (result);
3399 } /* ixgbe_sfp_probe */
3400 
3401 /************************************************************************
3402  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3403  ************************************************************************/
3404 static void
3405 ixgbe_handle_mod(void *context)
3406 {
3407 	if_ctx_t        ctx = context;
3408 	struct adapter  *adapter = iflib_get_softc(ctx);
3409 	struct ixgbe_hw *hw = &adapter->hw;
3410 	device_t        dev = iflib_get_dev(ctx);
3411 	u32             err, cage_full = 0;
3412 
3413 	if (adapter->hw.need_crosstalk_fix) {
3414 		switch (hw->mac.type) {
3415 		case ixgbe_mac_82599EB:
3416 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3417 			    IXGBE_ESDP_SDP2;
3418 			break;
3419 		case ixgbe_mac_X550EM_x:
3420 		case ixgbe_mac_X550EM_a:
3421 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3422 			    IXGBE_ESDP_SDP0;
3423 			break;
3424 		default:
3425 			break;
3426 		}
3427 
3428 		if (!cage_full)
3429 			goto handle_mod_out;
3430 	}
3431 
3432 	err = hw->phy.ops.identify_sfp(hw);
3433 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3434 		device_printf(dev,
3435 		    "Unsupported SFP+ module type was detected.\n");
3436 		goto handle_mod_out;
3437 	}
3438 
3439 	if (hw->mac.type == ixgbe_mac_82598EB)
3440 		err = hw->phy.ops.reset(hw);
3441 	else
3442 		err = hw->mac.ops.setup_sfp(hw);
3443 
3444 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3445 		device_printf(dev,
3446 		    "Setup failure - unsupported SFP+ module type.\n");
3447 		goto handle_mod_out;
3448 	}
3449 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3450 	return;
3451 
3452 handle_mod_out:
3453 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3454 } /* ixgbe_handle_mod */
3455 
3456 
3457 /************************************************************************
3458  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3459  ************************************************************************/
3460 static void
3461 ixgbe_handle_msf(void *context)
3462 {
3463 	if_ctx_t        ctx = context;
3464 	struct adapter  *adapter = iflib_get_softc(ctx);
3465 	struct ixgbe_hw *hw = &adapter->hw;
3466 	u32             autoneg;
3467 	bool            negotiate;
3468 
3469 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3470 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3471 
3472 	autoneg = hw->phy.autoneg_advertised;
3473 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3474 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3475 	if (hw->mac.ops.setup_link)
3476 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3477 
3478 	/* Adjust media types shown in ifconfig */
3479 	ifmedia_removeall(adapter->media);
3480 	ixgbe_add_media_types(adapter->ctx);
3481 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3482 } /* ixgbe_handle_msf */
3483 
3484 /************************************************************************
3485  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3486  ************************************************************************/
3487 static void
3488 ixgbe_handle_phy(void *context)
3489 {
3490 	if_ctx_t        ctx = context;
3491 	struct adapter  *adapter = iflib_get_softc(ctx);
3492 	struct ixgbe_hw *hw = &adapter->hw;
3493 	int             error;
3494 
3495 	error = hw->phy.ops.handle_lasi(hw);
3496 	if (error == IXGBE_ERR_OVERTEMP)
3497 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3498 	else if (error)
3499 		device_printf(adapter->dev,
3500 		    "Error handling LASI interrupt: %d\n", error);
3501 } /* ixgbe_handle_phy */
3502 
3503 /************************************************************************
3504  * ixgbe_if_stop - Stop the hardware
3505  *
3506  *   Disables all traffic on the adapter by issuing a
3507  *   global reset on the MAC and deallocates TX/RX buffers.
3508  ************************************************************************/
3509 static void
3510 ixgbe_if_stop(if_ctx_t ctx)
3511 {
3512 	struct adapter  *adapter = iflib_get_softc(ctx);
3513 	struct ixgbe_hw *hw = &adapter->hw;
3514 
3515 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3516 
3517 	ixgbe_reset_hw(hw);
3518 	hw->adapter_stopped = FALSE;
3519 	ixgbe_stop_adapter(hw);
3520 	if (hw->mac.type == ixgbe_mac_82599EB)
3521 		ixgbe_stop_mac_link_on_d3_82599(hw);
3522 	/* Turn off the laser - noop with no optics */
3523 	ixgbe_disable_tx_laser(hw);
3524 
3525 	/* Update the stack */
3526 	adapter->link_up = FALSE;
3527 	ixgbe_if_update_admin_status(ctx);
3528 
3529 	/* reprogram the RAR[0] in case user changed it. */
3530 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3531 
3532 	return;
3533 } /* ixgbe_if_stop */
3534 
3535 /************************************************************************
3536  * ixgbe_update_link_status - Update OS on link state
3537  *
3538  * Note: Only updates the OS on the cached link state.
3539  *       The real check of the hardware only happens with
3540  *       a link interrupt.
3541  ************************************************************************/
3542 static void
3543 ixgbe_if_update_admin_status(if_ctx_t ctx)
3544 {
3545 	struct adapter *adapter = iflib_get_softc(ctx);
3546 	device_t       dev = iflib_get_dev(ctx);
3547 
3548 	if (adapter->link_up) {
3549 		if (adapter->link_active == FALSE) {
3550 			if (bootverbose)
3551 				device_printf(dev, "Link is up %d Gbps %s \n",
3552 				    ((adapter->link_speed == 128) ? 10 : 1),
3553 				    "Full Duplex");
3554 			adapter->link_active = TRUE;
3555 			/* Update any Flow Control changes */
3556 			ixgbe_fc_enable(&adapter->hw);
3557 			/* Update DMA coalescing config */
3558 			ixgbe_config_dmac(adapter);
3559 			/* should actually be negotiated value */
3560 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3561 
3562 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3563 				ixgbe_ping_all_vfs(adapter);
3564 		}
3565 	} else { /* Link down */
3566 		if (adapter->link_active == TRUE) {
3567 			if (bootverbose)
3568 				device_printf(dev, "Link is Down\n");
3569 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3570 			adapter->link_active = FALSE;
3571 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3572 				ixgbe_ping_all_vfs(adapter);
3573 		}
3574 	}
3575 
3576 	/* Handle task requests from msix_link() */
3577 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3578 		ixgbe_handle_mod(ctx);
3579 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3580 		ixgbe_handle_msf(ctx);
3581 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3582 		ixgbe_handle_mbx(ctx);
3583 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3584 		ixgbe_reinit_fdir(ctx);
3585 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3586 		ixgbe_handle_phy(ctx);
3587 	adapter->task_requests = 0;
3588 
3589 	ixgbe_update_stats_counters(adapter);
3590 } /* ixgbe_if_update_admin_status */
3591 
3592 /************************************************************************
3593  * ixgbe_config_dmac - Configure DMA Coalescing
3594  ************************************************************************/
3595 static void
3596 ixgbe_config_dmac(struct adapter *adapter)
3597 {
3598 	struct ixgbe_hw          *hw = &adapter->hw;
3599 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3600 
3601 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3602 		return;
3603 
3604 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3605 	    dcfg->link_speed ^ adapter->link_speed) {
3606 		dcfg->watchdog_timer = adapter->dmac;
3607 		dcfg->fcoe_en = FALSE;
3608 		dcfg->link_speed = adapter->link_speed;
3609 		dcfg->num_tcs = 1;
3610 
3611 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3612 		    dcfg->watchdog_timer, dcfg->link_speed);
3613 
3614 		hw->mac.ops.dmac_config(hw);
3615 	}
3616 } /* ixgbe_config_dmac */
3617 
3618 /************************************************************************
3619  * ixgbe_if_enable_intr
3620  ************************************************************************/
3621 void
3622 ixgbe_if_enable_intr(if_ctx_t ctx)
3623 {
3624 	struct adapter     *adapter = iflib_get_softc(ctx);
3625 	struct ixgbe_hw    *hw = &adapter->hw;
3626 	struct ix_rx_queue *que = adapter->rx_queues;
3627 	u32                mask, fwsm;
3628 
3629 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3630 
3631 	switch (adapter->hw.mac.type) {
3632 	case ixgbe_mac_82599EB:
3633 		mask |= IXGBE_EIMS_ECC;
3634 		/* Temperature sensor on some adapters */
3635 		mask |= IXGBE_EIMS_GPI_SDP0;
3636 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3637 		mask |= IXGBE_EIMS_GPI_SDP1;
3638 		mask |= IXGBE_EIMS_GPI_SDP2;
3639 		break;
3640 	case ixgbe_mac_X540:
3641 		/* Detect if Thermal Sensor is enabled */
3642 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3643 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3644 			mask |= IXGBE_EIMS_TS;
3645 		mask |= IXGBE_EIMS_ECC;
3646 		break;
3647 	case ixgbe_mac_X550:
3648 		/* MAC thermal sensor is automatically enabled */
3649 		mask |= IXGBE_EIMS_TS;
3650 		mask |= IXGBE_EIMS_ECC;
3651 		break;
3652 	case ixgbe_mac_X550EM_x:
3653 	case ixgbe_mac_X550EM_a:
3654 		/* Some devices use SDP0 for important information */
3655 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3656 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3657 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3658 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3659 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3660 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3661 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3662 		mask |= IXGBE_EIMS_ECC;
3663 		break;
3664 	default:
3665 		break;
3666 	}
3667 
3668 	/* Enable Fan Failure detection */
3669 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3670 		mask |= IXGBE_EIMS_GPI_SDP1;
3671 	/* Enable SR-IOV */
3672 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3673 		mask |= IXGBE_EIMS_MAILBOX;
3674 	/* Enable Flow Director */
3675 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3676 		mask |= IXGBE_EIMS_FLOW_DIR;
3677 
3678 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3679 
3680 	/* With MSI-X we use auto clear */
3681 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3682 		mask = IXGBE_EIMS_ENABLE_MASK;
3683 		/* Don't autoclear Link */
3684 		mask &= ~IXGBE_EIMS_OTHER;
3685 		mask &= ~IXGBE_EIMS_LSC;
3686 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3687 			mask &= ~IXGBE_EIMS_MAILBOX;
3688 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3689 	}
3690 
3691 	/*
3692 	 * Now enable all queues, this is done separately to
3693 	 * allow for handling the extended (beyond 32) MSI-X
3694 	 * vectors that can be used by 82599
3695 	 */
3696 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3697 		ixgbe_enable_queue(adapter, que->msix);
3698 
3699 	IXGBE_WRITE_FLUSH(hw);
3700 
3701 } /* ixgbe_if_enable_intr */
3702 
3703 /************************************************************************
3704  * ixgbe_disable_intr
3705  ************************************************************************/
3706 static void
3707 ixgbe_if_disable_intr(if_ctx_t ctx)
3708 {
3709 	struct adapter *adapter = iflib_get_softc(ctx);
3710 
3711 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3712 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3713 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3714 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3715 	} else {
3716 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3717 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3718 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3719 	}
3720 	IXGBE_WRITE_FLUSH(&adapter->hw);
3721 
3722 } /* ixgbe_if_disable_intr */
3723 
3724 /************************************************************************
3725  * ixgbe_link_intr_enable
3726  ************************************************************************/
3727 static void
3728 ixgbe_link_intr_enable(if_ctx_t ctx)
3729 {
3730 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3731 
3732 	/* Re-enable other interrupts */
3733 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3734 } /* ixgbe_link_intr_enable */
3735 
3736 /************************************************************************
3737  * ixgbe_if_rx_queue_intr_enable
3738  ************************************************************************/
3739 static int
3740 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3741 {
3742 	struct adapter     *adapter = iflib_get_softc(ctx);
3743 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3744 
3745 	ixgbe_enable_queue(adapter, que->msix);
3746 
3747 	return (0);
3748 } /* ixgbe_if_rx_queue_intr_enable */
3749 
3750 /************************************************************************
3751  * ixgbe_enable_queue
3752  ************************************************************************/
3753 static void
3754 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3755 {
3756 	struct ixgbe_hw *hw = &adapter->hw;
3757 	u64             queue = 1ULL << vector;
3758 	u32             mask;
3759 
3760 	if (hw->mac.type == ixgbe_mac_82598EB) {
3761 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3762 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3763 	} else {
3764 		mask = (queue & 0xFFFFFFFF);
3765 		if (mask)
3766 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3767 		mask = (queue >> 32);
3768 		if (mask)
3769 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3770 	}
3771 } /* ixgbe_enable_queue */
3772 
3773 /************************************************************************
3774  * ixgbe_disable_queue
3775  ************************************************************************/
3776 static void
3777 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3778 {
3779 	struct ixgbe_hw *hw = &adapter->hw;
3780 	u64             queue = 1ULL << vector;
3781 	u32             mask;
3782 
3783 	if (hw->mac.type == ixgbe_mac_82598EB) {
3784 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3785 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3786 	} else {
3787 		mask = (queue & 0xFFFFFFFF);
3788 		if (mask)
3789 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3790 		mask = (queue >> 32);
3791 		if (mask)
3792 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3793 	}
3794 } /* ixgbe_disable_queue */
3795 
3796 /************************************************************************
3797  * ixgbe_intr - Legacy Interrupt Service Routine
3798  ************************************************************************/
3799 int
3800 ixgbe_intr(void *arg)
3801 {
3802 	struct adapter     *adapter = arg;
3803 	struct ix_rx_queue *que = adapter->rx_queues;
3804 	struct ixgbe_hw    *hw = &adapter->hw;
3805 	if_ctx_t           ctx = adapter->ctx;
3806 	u32                eicr, eicr_mask;
3807 
3808 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3809 
3810 	++que->irqs;
3811 	if (eicr == 0) {
3812 		ixgbe_if_enable_intr(ctx);
3813 		return (FILTER_HANDLED);
3814 	}
3815 
3816 	/* Check for fan failure */
3817 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3818 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3819 		device_printf(adapter->dev,
3820 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3821 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3822 	}
3823 
3824 	/* Link status change */
3825 	if (eicr & IXGBE_EICR_LSC) {
3826 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3827 		iflib_admin_intr_deferred(ctx);
3828 	}
3829 
3830 	if (ixgbe_is_sfp(hw)) {
3831 		/* Pluggable optics-related interrupt */
3832 		if (hw->mac.type >= ixgbe_mac_X540)
3833 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3834 		else
3835 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3836 
3837 		if (eicr & eicr_mask) {
3838 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3839 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3840 		}
3841 
3842 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3843 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3844 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3845 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3846 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3847 		}
3848 	}
3849 
3850 	/* External PHY interrupt */
3851 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3852 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3853 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3854 
3855 	return (FILTER_SCHEDULE_THREAD);
3856 } /* ixgbe_intr */
3857 
3858 /************************************************************************
3859  * ixgbe_free_pci_resources
3860  ************************************************************************/
3861 static void
3862 ixgbe_free_pci_resources(if_ctx_t ctx)
3863 {
3864 	struct adapter *adapter = iflib_get_softc(ctx);
3865 	struct         ix_rx_queue *que = adapter->rx_queues;
3866 	device_t       dev = iflib_get_dev(ctx);
3867 
3868 	/* Release all MSI-X queue resources */
3869 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3870 		iflib_irq_free(ctx, &adapter->irq);
3871 
3872 	if (que != NULL) {
3873 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3874 			iflib_irq_free(ctx, &que->que_irq);
3875 		}
3876 	}
3877 
3878 	if (adapter->pci_mem != NULL)
3879 		bus_release_resource(dev, SYS_RES_MEMORY,
3880 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3881 } /* ixgbe_free_pci_resources */
3882 
3883 /************************************************************************
3884  * ixgbe_sysctl_flowcntl
3885  *
3886  *   SYSCTL wrapper around setting Flow Control
3887  ************************************************************************/
3888 static int
3889 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3890 {
3891 	struct adapter *adapter;
3892 	int            error, fc;
3893 
3894 	adapter = (struct adapter *)arg1;
3895 	fc = adapter->hw.fc.current_mode;
3896 
3897 	error = sysctl_handle_int(oidp, &fc, 0, req);
3898 	if ((error) || (req->newptr == NULL))
3899 		return (error);
3900 
3901 	/* Don't bother if it's not changed */
3902 	if (fc == adapter->hw.fc.current_mode)
3903 		return (0);
3904 
3905 	return ixgbe_set_flowcntl(adapter, fc);
3906 } /* ixgbe_sysctl_flowcntl */
3907 
3908 /************************************************************************
3909  * ixgbe_set_flowcntl - Set flow control
3910  *
3911  *   Flow control values:
3912  *     0 - off
3913  *     1 - rx pause
3914  *     2 - tx pause
3915  *     3 - full
3916  ************************************************************************/
3917 static int
3918 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3919 {
3920 	switch (fc) {
3921 	case ixgbe_fc_rx_pause:
3922 	case ixgbe_fc_tx_pause:
3923 	case ixgbe_fc_full:
3924 		adapter->hw.fc.requested_mode = fc;
3925 		if (adapter->num_rx_queues > 1)
3926 			ixgbe_disable_rx_drop(adapter);
3927 		break;
3928 	case ixgbe_fc_none:
3929 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3930 		if (adapter->num_rx_queues > 1)
3931 			ixgbe_enable_rx_drop(adapter);
3932 		break;
3933 	default:
3934 		return (EINVAL);
3935 	}
3936 
3937 	/* Don't autoneg if forcing a value */
3938 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3939 	ixgbe_fc_enable(&adapter->hw);
3940 
3941 	return (0);
3942 } /* ixgbe_set_flowcntl */
3943 
3944 /************************************************************************
3945  * ixgbe_enable_rx_drop
3946  *
3947  *   Enable the hardware to drop packets when the buffer is
3948  *   full. This is useful with multiqueue, so that no single
3949  *   queue being full stalls the entire RX engine. We only
3950  *   enable this when Multiqueue is enabled AND Flow Control
3951  *   is disabled.
3952  ************************************************************************/
3953 static void
3954 ixgbe_enable_rx_drop(struct adapter *adapter)
3955 {
3956 	struct ixgbe_hw *hw = &adapter->hw;
3957 	struct rx_ring  *rxr;
3958 	u32             srrctl;
3959 
3960 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3961 		rxr = &adapter->rx_queues[i].rxr;
3962 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3963 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3964 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3965 	}
3966 
3967 	/* enable drop for each vf */
3968 	for (int i = 0; i < adapter->num_vfs; i++) {
3969 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3970 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3971 		                IXGBE_QDE_ENABLE));
3972 	}
3973 } /* ixgbe_enable_rx_drop */
3974 
3975 /************************************************************************
3976  * ixgbe_disable_rx_drop
3977  ************************************************************************/
3978 static void
3979 ixgbe_disable_rx_drop(struct adapter *adapter)
3980 {
3981 	struct ixgbe_hw *hw = &adapter->hw;
3982 	struct rx_ring  *rxr;
3983 	u32             srrctl;
3984 
3985 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3986 		rxr = &adapter->rx_queues[i].rxr;
3987 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3988 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3989 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3990 	}
3991 
3992 	/* disable drop for each vf */
3993 	for (int i = 0; i < adapter->num_vfs; i++) {
3994 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3995 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3996 	}
3997 } /* ixgbe_disable_rx_drop */
3998 
3999 /************************************************************************
4000  * ixgbe_sysctl_advertise
4001  *
4002  *   SYSCTL wrapper around setting advertised speed
4003  ************************************************************************/
4004 static int
4005 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4006 {
4007 	struct adapter *adapter;
4008 	int            error, advertise;
4009 
4010 	adapter = (struct adapter *)arg1;
4011 	advertise = adapter->advertise;
4012 
4013 	error = sysctl_handle_int(oidp, &advertise, 0, req);
4014 	if ((error) || (req->newptr == NULL))
4015 		return (error);
4016 
4017 	return ixgbe_set_advertise(adapter, advertise);
4018 } /* ixgbe_sysctl_advertise */
4019 
4020 /************************************************************************
4021  * ixgbe_set_advertise - Control advertised link speed
4022  *
4023  *   Flags:
4024  *     0x1 - advertise 100 Mb
4025  *     0x2 - advertise 1G
4026  *     0x4 - advertise 10G
4027  *     0x8 - advertise 10 Mb (yes, Mb)
4028  ************************************************************************/
4029 static int
4030 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4031 {
4032 	device_t         dev = iflib_get_dev(adapter->ctx);
4033 	struct ixgbe_hw  *hw;
4034 	ixgbe_link_speed speed = 0;
4035 	ixgbe_link_speed link_caps = 0;
4036 	s32              err = IXGBE_NOT_IMPLEMENTED;
4037 	bool             negotiate = FALSE;
4038 
4039 	/* Checks to validate new value */
4040 	if (adapter->advertise == advertise) /* no change */
4041 		return (0);
4042 
4043 	hw = &adapter->hw;
4044 
4045 	/* No speed changes for backplane media */
4046 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4047 		return (ENODEV);
4048 
4049 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4050 	      (hw->phy.multispeed_fiber))) {
4051 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4052 		return (EINVAL);
4053 	}
4054 
4055 	if (advertise < 0x1 || advertise > 0xF) {
4056 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4057 		return (EINVAL);
4058 	}
4059 
4060 	if (hw->mac.ops.get_link_capabilities) {
4061 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4062 		    &negotiate);
4063 		if (err != IXGBE_SUCCESS) {
4064 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4065 			return (ENODEV);
4066 		}
4067 	}
4068 
4069 	/* Set new value and report new advertised mode */
4070 	if (advertise & 0x1) {
4071 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4072 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4073 			return (EINVAL);
4074 		}
4075 		speed |= IXGBE_LINK_SPEED_100_FULL;
4076 	}
4077 	if (advertise & 0x2) {
4078 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4079 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4080 			return (EINVAL);
4081 		}
4082 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4083 	}
4084 	if (advertise & 0x4) {
4085 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4086 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4087 			return (EINVAL);
4088 		}
4089 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4090 	}
4091 	if (advertise & 0x8) {
4092 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4093 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4094 			return (EINVAL);
4095 		}
4096 		speed |= IXGBE_LINK_SPEED_10_FULL;
4097 	}
4098 
4099 	hw->mac.autotry_restart = TRUE;
4100 	hw->mac.ops.setup_link(hw, speed, TRUE);
4101 	adapter->advertise = advertise;
4102 
4103 	return (0);
4104 } /* ixgbe_set_advertise */
4105 
4106 /************************************************************************
4107  * ixgbe_get_advertise - Get current advertised speed settings
4108  *
4109  *   Formatted for sysctl usage.
4110  *   Flags:
4111  *     0x1 - advertise 100 Mb
4112  *     0x2 - advertise 1G
4113  *     0x4 - advertise 10G
4114  *     0x8 - advertise 10 Mb (yes, Mb)
4115  ************************************************************************/
4116 static int
4117 ixgbe_get_advertise(struct adapter *adapter)
4118 {
4119 	struct ixgbe_hw  *hw = &adapter->hw;
4120 	int              speed;
4121 	ixgbe_link_speed link_caps = 0;
4122 	s32              err;
4123 	bool             negotiate = FALSE;
4124 
4125 	/*
4126 	 * Advertised speed means nothing unless it's copper or
4127 	 * multi-speed fiber
4128 	 */
4129 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4130 	    !(hw->phy.multispeed_fiber))
4131 		return (0);
4132 
4133 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4134 	if (err != IXGBE_SUCCESS)
4135 		return (0);
4136 
4137 	speed =
4138 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4139 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4140 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4141 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4142 
4143 	return speed;
4144 } /* ixgbe_get_advertise */
4145 
4146 /************************************************************************
4147  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4148  *
4149  *   Control values:
4150  *     0/1 - off / on (use default value of 1000)
4151  *
4152  *     Legal timer values are:
4153  *     50,100,250,500,1000,2000,5000,10000
4154  *
4155  *     Turning off interrupt moderation will also turn this off.
4156  ************************************************************************/
4157 static int
4158 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4159 {
4160 	struct adapter *adapter = (struct adapter *)arg1;
4161 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4162 	int            error;
4163 	u16            newval;
4164 
4165 	newval = adapter->dmac;
4166 	error = sysctl_handle_16(oidp, &newval, 0, req);
4167 	if ((error) || (req->newptr == NULL))
4168 		return (error);
4169 
4170 	switch (newval) {
4171 	case 0:
4172 		/* Disabled */
4173 		adapter->dmac = 0;
4174 		break;
4175 	case 1:
4176 		/* Enable and use default */
4177 		adapter->dmac = 1000;
4178 		break;
4179 	case 50:
4180 	case 100:
4181 	case 250:
4182 	case 500:
4183 	case 1000:
4184 	case 2000:
4185 	case 5000:
4186 	case 10000:
4187 		/* Legal values - allow */
4188 		adapter->dmac = newval;
4189 		break;
4190 	default:
4191 		/* Do nothing, illegal value */
4192 		return (EINVAL);
4193 	}
4194 
4195 	/* Re-initialize hardware if it's already running */
4196 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4197 		ifp->if_init(ifp);
4198 
4199 	return (0);
4200 } /* ixgbe_sysctl_dmac */
4201 
4202 #ifdef IXGBE_DEBUG
4203 /************************************************************************
4204  * ixgbe_sysctl_power_state
4205  *
4206  *   Sysctl to test power states
4207  *   Values:
4208  *     0      - set device to D0
4209  *     3      - set device to D3
4210  *     (none) - get current device power state
4211  ************************************************************************/
4212 static int
4213 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4214 {
4215 	struct adapter *adapter = (struct adapter *)arg1;
4216 	device_t       dev = adapter->dev;
4217 	int            curr_ps, new_ps, error = 0;
4218 
4219 	curr_ps = new_ps = pci_get_powerstate(dev);
4220 
4221 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4222 	if ((error) || (req->newptr == NULL))
4223 		return (error);
4224 
4225 	if (new_ps == curr_ps)
4226 		return (0);
4227 
4228 	if (new_ps == 3 && curr_ps == 0)
4229 		error = DEVICE_SUSPEND(dev);
4230 	else if (new_ps == 0 && curr_ps == 3)
4231 		error = DEVICE_RESUME(dev);
4232 	else
4233 		return (EINVAL);
4234 
4235 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4236 
4237 	return (error);
4238 } /* ixgbe_sysctl_power_state */
4239 #endif
4240 
4241 /************************************************************************
4242  * ixgbe_sysctl_wol_enable
4243  *
4244  *   Sysctl to enable/disable the WoL capability,
4245  *   if supported by the adapter.
4246  *
4247  *   Values:
4248  *     0 - disabled
4249  *     1 - enabled
4250  ************************************************************************/
4251 static int
4252 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4253 {
4254 	struct adapter  *adapter = (struct adapter *)arg1;
4255 	struct ixgbe_hw *hw = &adapter->hw;
4256 	int             new_wol_enabled;
4257 	int             error = 0;
4258 
4259 	new_wol_enabled = hw->wol_enabled;
4260 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4261 	if ((error) || (req->newptr == NULL))
4262 		return (error);
4263 	new_wol_enabled = !!(new_wol_enabled);
4264 	if (new_wol_enabled == hw->wol_enabled)
4265 		return (0);
4266 
4267 	if (new_wol_enabled > 0 && !adapter->wol_support)
4268 		return (ENODEV);
4269 	else
4270 		hw->wol_enabled = new_wol_enabled;
4271 
4272 	return (0);
4273 } /* ixgbe_sysctl_wol_enable */
4274 
4275 /************************************************************************
4276  * ixgbe_sysctl_wufc - Wake Up Filter Control
4277  *
4278  *   Sysctl to enable/disable the types of packets that the
4279  *   adapter will wake up on upon receipt.
4280  *   Flags:
4281  *     0x1  - Link Status Change
4282  *     0x2  - Magic Packet
4283  *     0x4  - Direct Exact
4284  *     0x8  - Directed Multicast
4285  *     0x10 - Broadcast
4286  *     0x20 - ARP/IPv4 Request Packet
4287  *     0x40 - Direct IPv4 Packet
4288  *     0x80 - Direct IPv6 Packet
4289  *
4290  *   Settings not listed above will cause the sysctl to return an error.
4291  ************************************************************************/
4292 static int
4293 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4294 {
4295 	struct adapter *adapter = (struct adapter *)arg1;
4296 	int            error = 0;
4297 	u32            new_wufc;
4298 
4299 	new_wufc = adapter->wufc;
4300 
4301 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4302 	if ((error) || (req->newptr == NULL))
4303 		return (error);
4304 	if (new_wufc == adapter->wufc)
4305 		return (0);
4306 
4307 	if (new_wufc & 0xffffff00)
4308 		return (EINVAL);
4309 
4310 	new_wufc &= 0xff;
4311 	new_wufc |= (0xffffff & adapter->wufc);
4312 	adapter->wufc = new_wufc;
4313 
4314 	return (0);
4315 } /* ixgbe_sysctl_wufc */
4316 
4317 #ifdef IXGBE_DEBUG
4318 /************************************************************************
4319  * ixgbe_sysctl_print_rss_config
4320  ************************************************************************/
4321 static int
4322 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4323 {
4324 	struct adapter  *adapter = (struct adapter *)arg1;
4325 	struct ixgbe_hw *hw = &adapter->hw;
4326 	device_t        dev = adapter->dev;
4327 	struct sbuf     *buf;
4328 	int             error = 0, reta_size;
4329 	u32             reg;
4330 
4331 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4332 	if (!buf) {
4333 		device_printf(dev, "Could not allocate sbuf for output.\n");
4334 		return (ENOMEM);
4335 	}
4336 
4337 	// TODO: use sbufs to make a string to print out
4338 	/* Set multiplier for RETA setup and table size based on MAC */
4339 	switch (adapter->hw.mac.type) {
4340 	case ixgbe_mac_X550:
4341 	case ixgbe_mac_X550EM_x:
4342 	case ixgbe_mac_X550EM_a:
4343 		reta_size = 128;
4344 		break;
4345 	default:
4346 		reta_size = 32;
4347 		break;
4348 	}
4349 
4350 	/* Print out the redirection table */
4351 	sbuf_cat(buf, "\n");
4352 	for (int i = 0; i < reta_size; i++) {
4353 		if (i < 32) {
4354 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4355 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4356 		} else {
4357 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4358 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4359 		}
4360 	}
4361 
4362 	// TODO: print more config
4363 
4364 	error = sbuf_finish(buf);
4365 	if (error)
4366 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4367 
4368 	sbuf_delete(buf);
4369 
4370 	return (0);
4371 } /* ixgbe_sysctl_print_rss_config */
4372 #endif /* IXGBE_DEBUG */
4373 
4374 /************************************************************************
4375  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4376  *
4377  *   For X552/X557-AT devices using an external PHY
4378  ************************************************************************/
4379 static int
4380 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4381 {
4382 	struct adapter  *adapter = (struct adapter *)arg1;
4383 	struct ixgbe_hw *hw = &adapter->hw;
4384 	u16             reg;
4385 
4386 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4387 		device_printf(iflib_get_dev(adapter->ctx),
4388 		    "Device has no supported external thermal sensor.\n");
4389 		return (ENODEV);
4390 	}
4391 
4392 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4393 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4394 		device_printf(iflib_get_dev(adapter->ctx),
4395 		    "Error reading from PHY's current temperature register\n");
4396 		return (EAGAIN);
4397 	}
4398 
4399 	/* Shift temp for output */
4400 	reg = reg >> 8;
4401 
4402 	return (sysctl_handle_16(oidp, NULL, reg, req));
4403 } /* ixgbe_sysctl_phy_temp */
4404 
4405 /************************************************************************
4406  * ixgbe_sysctl_phy_overtemp_occurred
4407  *
4408  *   Reports (directly from the PHY) whether the current PHY
4409  *   temperature is over the overtemp threshold.
4410  ************************************************************************/
4411 static int
4412 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4413 {
4414 	struct adapter  *adapter = (struct adapter *)arg1;
4415 	struct ixgbe_hw *hw = &adapter->hw;
4416 	u16             reg;
4417 
4418 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4419 		device_printf(iflib_get_dev(adapter->ctx),
4420 		    "Device has no supported external thermal sensor.\n");
4421 		return (ENODEV);
4422 	}
4423 
4424 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4425 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4426 		device_printf(iflib_get_dev(adapter->ctx),
4427 		    "Error reading from PHY's temperature status register\n");
4428 		return (EAGAIN);
4429 	}
4430 
4431 	/* Get occurrence bit */
4432 	reg = !!(reg & 0x4000);
4433 
4434 	return (sysctl_handle_16(oidp, 0, reg, req));
4435 } /* ixgbe_sysctl_phy_overtemp_occurred */
4436 
4437 /************************************************************************
4438  * ixgbe_sysctl_eee_state
4439  *
4440  *   Sysctl to set EEE power saving feature
4441  *   Values:
4442  *     0      - disable EEE
4443  *     1      - enable EEE
4444  *     (none) - get current device EEE state
4445  ************************************************************************/
4446 static int
4447 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4448 {
4449 	struct adapter *adapter = (struct adapter *)arg1;
4450 	device_t       dev = adapter->dev;
4451 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4452 	int            curr_eee, new_eee, error = 0;
4453 	s32            retval;
4454 
4455 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4456 
4457 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4458 	if ((error) || (req->newptr == NULL))
4459 		return (error);
4460 
4461 	/* Nothing to do */
4462 	if (new_eee == curr_eee)
4463 		return (0);
4464 
4465 	/* Not supported */
4466 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4467 		return (EINVAL);
4468 
4469 	/* Bounds checking */
4470 	if ((new_eee < 0) || (new_eee > 1))
4471 		return (EINVAL);
4472 
4473 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4474 	if (retval) {
4475 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4476 		return (EINVAL);
4477 	}
4478 
4479 	/* Restart auto-neg */
4480 	ifp->if_init(ifp);
4481 
4482 	device_printf(dev, "New EEE state: %d\n", new_eee);
4483 
4484 	/* Cache new value */
4485 	if (new_eee)
4486 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4487 	else
4488 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4489 
4490 	return (error);
4491 } /* ixgbe_sysctl_eee_state */
4492 
4493 /************************************************************************
4494  * ixgbe_init_device_features
4495  ************************************************************************/
4496 static void
4497 ixgbe_init_device_features(struct adapter *adapter)
4498 {
4499 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4500 	                  | IXGBE_FEATURE_RSS
4501 	                  | IXGBE_FEATURE_MSI
4502 	                  | IXGBE_FEATURE_MSIX
4503 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4504 
4505 	/* Set capabilities first... */
4506 	switch (adapter->hw.mac.type) {
4507 	case ixgbe_mac_82598EB:
4508 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4509 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4510 		break;
4511 	case ixgbe_mac_X540:
4512 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4513 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4514 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4515 		    (adapter->hw.bus.func == 0))
4516 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4517 		break;
4518 	case ixgbe_mac_X550:
4519 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4520 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4521 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4522 		break;
4523 	case ixgbe_mac_X550EM_x:
4524 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4525 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4526 		break;
4527 	case ixgbe_mac_X550EM_a:
4528 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4529 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4530 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4531 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4532 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4533 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4534 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4535 		}
4536 		break;
4537 	case ixgbe_mac_82599EB:
4538 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4539 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4540 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4541 		    (adapter->hw.bus.func == 0))
4542 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4543 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4544 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4545 		break;
4546 	default:
4547 		break;
4548 	}
4549 
4550 	/* Enabled by default... */
4551 	/* Fan failure detection */
4552 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4553 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4554 	/* Netmap */
4555 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4556 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4557 	/* EEE */
4558 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4559 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4560 	/* Thermal Sensor */
4561 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4562 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4563 
4564 	/* Enabled via global sysctl... */
4565 	/* Flow Director */
4566 	if (ixgbe_enable_fdir) {
4567 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4568 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4569 		else
4570 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4571 	}
4572 	/*
4573 	 * Message Signal Interrupts - Extended (MSI-X)
4574 	 * Normal MSI is only enabled if MSI-X calls fail.
4575 	 */
4576 	if (!ixgbe_enable_msix)
4577 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4578 	/* Receive-Side Scaling (RSS) */
4579 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4580 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4581 
4582 	/* Disable features with unmet dependencies... */
4583 	/* No MSI-X */
4584 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4585 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4586 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4587 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4588 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4589 	}
4590 } /* ixgbe_init_device_features */
4591 
4592 /************************************************************************
4593  * ixgbe_check_fan_failure
4594  ************************************************************************/
4595 static void
4596 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4597 {
4598 	u32 mask;
4599 
4600 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4601 	    IXGBE_ESDP_SDP1;
4602 
4603 	if (reg & mask)
4604 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4605 } /* ixgbe_check_fan_failure */
4606