xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 5def4c47d4bd90b209b9b4a4ba9faec15846d8fd)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) 82598EB AF (Dual Fiber)"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) 82598EB AF (Fiber)"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) 82598EB AT (CX4)"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) 82598EB AT"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) 82598EB AT2"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) 82598"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) 82598EB AF DA (Dual Fiber)"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) 82598EB AT (Dual CX4)"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) 82598EB AF (Dual Fiber LR)"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) 82598EB AF (Dual Fiber SR)"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) 82598EB LOM"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) X520 82599 (KX4)"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) X520 82599 (KX4 Mezzanine)"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) X520 82599ES (SFI/SFP+)"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) X520 82599 (XAUI/BX4)"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) X520 82599 (Dual CX4)"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) X520-T 82599 LOM"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) X520 82599 (Combined Backplane)"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) X520 82599 (Backplane w/FCoE)"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) X520 82599 (Dual SFP+)"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) X520-1 82599EN (SFP+)"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) X520-4 82599 (Quad SFP+)"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) X520-Q1 82599 (QSFP+)"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) X540-AT2"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) X540-T1"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) X550-T2"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) X552 (KR Backplane)"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) X552 (KX4 Backplane)"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) X552/X557-AT (10GBASE-T)"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) X552 (1000BASE-T)"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
143 int ixgbe_intr(void *arg);
144 
145 /************************************************************************
146  * Function prototypes
147  ************************************************************************/
148 #if __FreeBSD_version >= 1100036
149 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
150 #endif
151 
152 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
154 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
155 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
156 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
157 
158 static void ixgbe_config_dmac(struct adapter *adapter);
159 static void ixgbe_configure_ivars(struct adapter *adapter);
160 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
161                            s8 type);
162 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163 static bool ixgbe_sfp_probe(if_ctx_t ctx);
164 
165 static void ixgbe_free_pci_resources(if_ctx_t ctx);
166 
167 static int  ixgbe_msix_link(void *arg);
168 static int  ixgbe_msix_que(void *arg);
169 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
170 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
171 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
172 
173 static int  ixgbe_setup_interface(if_ctx_t ctx);
174 static void ixgbe_init_device_features(struct adapter *adapter);
175 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
176 static void ixgbe_add_media_types(if_ctx_t ctx);
177 static void ixgbe_update_stats_counters(struct adapter *adapter);
178 static void ixgbe_config_link(if_ctx_t ctx);
179 static void ixgbe_get_slot_info(struct adapter *);
180 static void ixgbe_check_wol_support(struct adapter *adapter);
181 static void ixgbe_enable_rx_drop(struct adapter *);
182 static void ixgbe_disable_rx_drop(struct adapter *);
183 
184 static void ixgbe_add_hw_stats(struct adapter *adapter);
185 static int  ixgbe_set_flowcntl(struct adapter *, int);
186 static int  ixgbe_set_advertise(struct adapter *, int);
187 static int  ixgbe_get_advertise(struct adapter *);
188 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
189 static void ixgbe_config_gpie(struct adapter *adapter);
190 static void ixgbe_config_delay_values(struct adapter *adapter);
191 
192 /* Sysctl handlers */
193 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
198 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
199 #ifdef IXGBE_DEBUG
200 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
201 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
202 #endif
203 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
209 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
210 
211 /* Deferred interrupt tasklets */
212 static void ixgbe_handle_msf(void *);
213 static void ixgbe_handle_mod(void *);
214 static void ixgbe_handle_phy(void *);
215 
216 /************************************************************************
217  *  FreeBSD Device Interface Entry Points
218  ************************************************************************/
219 static device_method_t ix_methods[] = {
220 	/* Device interface */
221 	DEVMETHOD(device_register, ixgbe_register),
222 	DEVMETHOD(device_probe, iflib_device_probe),
223 	DEVMETHOD(device_attach, iflib_device_attach),
224 	DEVMETHOD(device_detach, iflib_device_detach),
225 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
226 	DEVMETHOD(device_suspend, iflib_device_suspend),
227 	DEVMETHOD(device_resume, iflib_device_resume),
228 #ifdef PCI_IOV
229 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
230 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
231 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
232 #endif /* PCI_IOV */
233 	DEVMETHOD_END
234 };
235 
236 static driver_t ix_driver = {
237 	"ix", ix_methods, sizeof(struct adapter),
238 };
239 
240 devclass_t ix_devclass;
241 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
242 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
243 MODULE_DEPEND(ix, pci, 1, 1, 1);
244 MODULE_DEPEND(ix, ether, 1, 1, 1);
245 MODULE_DEPEND(ix, iflib, 1, 1, 1);
246 
247 static device_method_t ixgbe_if_methods[] = {
248 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
249 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
250 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
251 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
252 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
253 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
254 	DEVMETHOD(ifdi_init, ixgbe_if_init),
255 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
256 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
257 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
258 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
259 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
260 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
262 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
263 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
264 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
265 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
266 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
267 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
268 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
269 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
270 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
271 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
272 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
273 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
274 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
275 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
276 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
277 	DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
278 #ifdef PCI_IOV
279 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
280 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
281 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
282 #endif /* PCI_IOV */
283 	DEVMETHOD_END
284 };
285 
286 /*
287  * TUNEABLE PARAMETERS:
288  */
289 
290 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
291     "IXGBE driver parameters");
292 static driver_t ixgbe_if_driver = {
293   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
294 };
295 
296 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
297 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
298     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
299 
300 /* Flow control setting, default to full */
301 static int ixgbe_flow_control = ixgbe_fc_full;
302 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
303     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
304 
305 /* Advertise Speed, default to 0 (auto) */
306 static int ixgbe_advertise_speed = 0;
307 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
308     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
309 
310 /*
311  * Smart speed setting, default to on
312  * this only works as a compile option
313  * right now as its during attach, set
314  * this to 'ixgbe_smart_speed_off' to
315  * disable.
316  */
317 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
318 
319 /*
320  * MSI-X should be the default for best performance,
321  * but this allows it to be forced off for testing.
322  */
323 static int ixgbe_enable_msix = 1;
324 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
325     "Enable MSI-X interrupts");
326 
327 /*
328  * Defining this on will allow the use
329  * of unsupported SFP+ modules, note that
330  * doing so you are on your own :)
331  */
332 static int allow_unsupported_sfp = FALSE;
333 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
334     &allow_unsupported_sfp, 0,
335     "Allow unsupported SFP modules...use at your own risk");
336 
337 /*
338  * Not sure if Flow Director is fully baked,
339  * so we'll default to turning it off.
340  */
341 static int ixgbe_enable_fdir = 0;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
343     "Enable Flow Director");
344 
345 /* Receive-Side Scaling */
346 static int ixgbe_enable_rss = 1;
347 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
348     "Enable Receive-Side Scaling (RSS)");
349 
350 /*
351  * AIM: Adaptive Interrupt Moderation
352  * which means that the interrupt rate
353  * is varied over time based on the
354  * traffic for that interrupt vector
355  */
356 static int ixgbe_enable_aim = FALSE;
357 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
358     "Enable adaptive interrupt moderation");
359 
360 #if 0
361 /* Keep running tab on them for sanity check */
362 static int ixgbe_total_ports;
363 #endif
364 
365 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
366 
367 /*
368  * For Flow Director: this is the number of TX packets we sample
369  * for the filter pool, this means every 20th packet will be probed.
370  *
371  * This feature can be disabled by setting this to 0.
372  */
373 static int atr_sample_rate = 20;
374 
375 extern struct if_txrx ixgbe_txrx;
376 
377 static struct if_shared_ctx ixgbe_sctx_init = {
378 	.isc_magic = IFLIB_MAGIC,
379 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
380 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
381 	.isc_tx_maxsegsize = PAGE_SIZE,
382 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
383 	.isc_tso_maxsegsize = PAGE_SIZE,
384 	.isc_rx_maxsize = PAGE_SIZE*4,
385 	.isc_rx_nsegments = 1,
386 	.isc_rx_maxsegsize = PAGE_SIZE*4,
387 	.isc_nfl = 1,
388 	.isc_ntxqs = 1,
389 	.isc_nrxqs = 1,
390 
391 	.isc_admin_intrcnt = 1,
392 	.isc_vendor_info = ixgbe_vendor_info_array,
393 	.isc_driver_version = ixgbe_driver_version,
394 	.isc_driver = &ixgbe_if_driver,
395 	.isc_flags = IFLIB_TSO_INIT_IP,
396 
397 	.isc_nrxd_min = {MIN_RXD},
398 	.isc_ntxd_min = {MIN_TXD},
399 	.isc_nrxd_max = {MAX_RXD},
400 	.isc_ntxd_max = {MAX_TXD},
401 	.isc_nrxd_default = {DEFAULT_RXD},
402 	.isc_ntxd_default = {DEFAULT_TXD},
403 };
404 
405 /************************************************************************
406  * ixgbe_if_tx_queues_alloc
407  ************************************************************************/
408 static int
409 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
410                          int ntxqs, int ntxqsets)
411 {
412 	struct adapter     *adapter = iflib_get_softc(ctx);
413 	if_softc_ctx_t     scctx = adapter->shared;
414 	struct ix_tx_queue *que;
415 	int                i, j, error;
416 
417 	MPASS(adapter->num_tx_queues > 0);
418 	MPASS(adapter->num_tx_queues == ntxqsets);
419 	MPASS(ntxqs == 1);
420 
421 	/* Allocate queue structure memory */
422 	adapter->tx_queues =
423 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
424 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
425 	if (!adapter->tx_queues) {
426 		device_printf(iflib_get_dev(ctx),
427 		    "Unable to allocate TX ring memory\n");
428 		return (ENOMEM);
429 	}
430 
431 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
432 		struct tx_ring *txr = &que->txr;
433 
434 		/* In case SR-IOV is enabled, align the index properly */
435 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
436 		    i);
437 
438 		txr->adapter = que->adapter = adapter;
439 
440 		/* Allocate report status array */
441 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
442 		if (txr->tx_rsq == NULL) {
443 			error = ENOMEM;
444 			goto fail;
445 		}
446 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
447 			txr->tx_rsq[j] = QIDX_INVALID;
448 		/* get the virtual and physical address of the hardware queues */
449 		txr->tail = IXGBE_TDT(txr->me);
450 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
451 		txr->tx_paddr = paddrs[i];
452 
453 		txr->bytes = 0;
454 		txr->total_packets = 0;
455 
456 		/* Set the rate at which we sample packets */
457 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
458 			txr->atr_sample = atr_sample_rate;
459 
460 	}
461 
462 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
463 	    adapter->num_tx_queues);
464 
465 	return (0);
466 
467 fail:
468 	ixgbe_if_queues_free(ctx);
469 
470 	return (error);
471 } /* ixgbe_if_tx_queues_alloc */
472 
473 /************************************************************************
474  * ixgbe_if_rx_queues_alloc
475  ************************************************************************/
476 static int
477 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
478                          int nrxqs, int nrxqsets)
479 {
480 	struct adapter     *adapter = iflib_get_softc(ctx);
481 	struct ix_rx_queue *que;
482 	int                i;
483 
484 	MPASS(adapter->num_rx_queues > 0);
485 	MPASS(adapter->num_rx_queues == nrxqsets);
486 	MPASS(nrxqs == 1);
487 
488 	/* Allocate queue structure memory */
489 	adapter->rx_queues =
490 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
491 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
492 	if (!adapter->rx_queues) {
493 		device_printf(iflib_get_dev(ctx),
494 		    "Unable to allocate TX ring memory\n");
495 		return (ENOMEM);
496 	}
497 
498 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
499 		struct rx_ring *rxr = &que->rxr;
500 
501 		/* In case SR-IOV is enabled, align the index properly */
502 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
503 		    i);
504 
505 		rxr->adapter = que->adapter = adapter;
506 
507 		/* get the virtual and physical address of the hw queues */
508 		rxr->tail = IXGBE_RDT(rxr->me);
509 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
510 		rxr->rx_paddr = paddrs[i];
511 		rxr->bytes = 0;
512 		rxr->que = que;
513 	}
514 
515 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
516 	    adapter->num_rx_queues);
517 
518 	return (0);
519 } /* ixgbe_if_rx_queues_alloc */
520 
521 /************************************************************************
522  * ixgbe_if_queues_free
523  ************************************************************************/
524 static void
525 ixgbe_if_queues_free(if_ctx_t ctx)
526 {
527 	struct adapter     *adapter = iflib_get_softc(ctx);
528 	struct ix_tx_queue *tx_que = adapter->tx_queues;
529 	struct ix_rx_queue *rx_que = adapter->rx_queues;
530 	int                i;
531 
532 	if (tx_que != NULL) {
533 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
534 			struct tx_ring *txr = &tx_que->txr;
535 			if (txr->tx_rsq == NULL)
536 				break;
537 
538 			free(txr->tx_rsq, M_IXGBE);
539 			txr->tx_rsq = NULL;
540 		}
541 
542 		free(adapter->tx_queues, M_IXGBE);
543 		adapter->tx_queues = NULL;
544 	}
545 	if (rx_que != NULL) {
546 		free(adapter->rx_queues, M_IXGBE);
547 		adapter->rx_queues = NULL;
548 	}
549 } /* ixgbe_if_queues_free */
550 
551 /************************************************************************
552  * ixgbe_initialize_rss_mapping
553  ************************************************************************/
554 static void
555 ixgbe_initialize_rss_mapping(struct adapter *adapter)
556 {
557 	struct ixgbe_hw *hw = &adapter->hw;
558 	u32             reta = 0, mrqc, rss_key[10];
559 	int             queue_id, table_size, index_mult;
560 	int             i, j;
561 	u32             rss_hash_config;
562 
563 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
564 		/* Fetch the configured RSS key */
565 		rss_getkey((uint8_t *)&rss_key);
566 	} else {
567 		/* set up random bits */
568 		arc4rand(&rss_key, sizeof(rss_key), 0);
569 	}
570 
571 	/* Set multiplier for RETA setup and table size based on MAC */
572 	index_mult = 0x1;
573 	table_size = 128;
574 	switch (adapter->hw.mac.type) {
575 	case ixgbe_mac_82598EB:
576 		index_mult = 0x11;
577 		break;
578 	case ixgbe_mac_X550:
579 	case ixgbe_mac_X550EM_x:
580 	case ixgbe_mac_X550EM_a:
581 		table_size = 512;
582 		break;
583 	default:
584 		break;
585 	}
586 
587 	/* Set up the redirection table */
588 	for (i = 0, j = 0; i < table_size; i++, j++) {
589 		if (j == adapter->num_rx_queues)
590 			j = 0;
591 
592 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
593 			/*
594 			 * Fetch the RSS bucket id for the given indirection
595 			 * entry. Cap it at the number of configured buckets
596 			 * (which is num_rx_queues.)
597 			 */
598 			queue_id = rss_get_indirection_to_bucket(i);
599 			queue_id = queue_id % adapter->num_rx_queues;
600 		} else
601 			queue_id = (j * index_mult);
602 
603 		/*
604 		 * The low 8 bits are for hash value (n+0);
605 		 * The next 8 bits are for hash value (n+1), etc.
606 		 */
607 		reta = reta >> 8;
608 		reta = reta | (((uint32_t)queue_id) << 24);
609 		if ((i & 3) == 3) {
610 			if (i < 128)
611 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
612 			else
613 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
614 				    reta);
615 			reta = 0;
616 		}
617 	}
618 
619 	/* Now fill our hash function seeds */
620 	for (i = 0; i < 10; i++)
621 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
622 
623 	/* Perform hash on these packet types */
624 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
625 		rss_hash_config = rss_gethashconfig();
626 	else {
627 		/*
628 		 * Disable UDP - IP fragments aren't currently being handled
629 		 * and so we end up with a mix of 2-tuple and 4-tuple
630 		 * traffic.
631 		 */
632 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
633 		                | RSS_HASHTYPE_RSS_TCP_IPV4
634 		                | RSS_HASHTYPE_RSS_IPV6
635 		                | RSS_HASHTYPE_RSS_TCP_IPV6
636 		                | RSS_HASHTYPE_RSS_IPV6_EX
637 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
638 	}
639 
640 	mrqc = IXGBE_MRQC_RSSEN;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
649 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
650 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
651 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
652 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
653 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
654 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
655 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
656 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
657 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
658 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
659 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
660 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
661 } /* ixgbe_initialize_rss_mapping */
662 
663 /************************************************************************
664  * ixgbe_initialize_receive_units - Setup receive registers and features.
665  ************************************************************************/
666 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
667 
668 static void
669 ixgbe_initialize_receive_units(if_ctx_t ctx)
670 {
671 	struct adapter     *adapter = iflib_get_softc(ctx);
672 	if_softc_ctx_t     scctx = adapter->shared;
673 	struct ixgbe_hw    *hw = &adapter->hw;
674 	struct ifnet       *ifp = iflib_get_ifp(ctx);
675 	struct ix_rx_queue *que;
676 	int                i, j;
677 	u32                bufsz, fctrl, srrctl, rxcsum;
678 	u32                hlreg;
679 
680 	/*
681 	 * Make sure receives are disabled while
682 	 * setting up the descriptor ring
683 	 */
684 	ixgbe_disable_rx(hw);
685 
686 	/* Enable broadcasts */
687 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
688 	fctrl |= IXGBE_FCTRL_BAM;
689 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
690 		fctrl |= IXGBE_FCTRL_DPF;
691 		fctrl |= IXGBE_FCTRL_PMCF;
692 	}
693 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
694 
695 	/* Set for Jumbo Frames? */
696 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
697 	if (ifp->if_mtu > ETHERMTU)
698 		hlreg |= IXGBE_HLREG0_JUMBOEN;
699 	else
700 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
701 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
702 
703 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
704 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
705 
706 	/* Setup the Base and Length of the Rx Descriptor Ring */
707 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
708 		struct rx_ring *rxr = &que->rxr;
709 		u64            rdba = rxr->rx_paddr;
710 
711 		j = rxr->me;
712 
713 		/* Setup the Base and Length of the Rx Descriptor Ring */
714 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
715 		    (rdba & 0x00000000ffffffffULL));
716 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
717 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
718 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
719 
720 		/* Set up the SRRCTL register */
721 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
722 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
723 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
724 		srrctl |= bufsz;
725 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
726 
727 		/*
728 		 * Set DROP_EN iff we have no flow control and >1 queue.
729 		 * Note that srrctl was cleared shortly before during reset,
730 		 * so we do not need to clear the bit, but do it just in case
731 		 * this code is moved elsewhere.
732 		 */
733 		if (adapter->num_rx_queues > 1 &&
734 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
735 			srrctl |= IXGBE_SRRCTL_DROP_EN;
736 		} else {
737 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
738 		}
739 
740 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
741 
742 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
743 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
744 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
745 
746 		/* Set the driver rx tail address */
747 		rxr->tail =  IXGBE_RDT(rxr->me);
748 	}
749 
750 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
751 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
752 		            | IXGBE_PSRTYPE_UDPHDR
753 		            | IXGBE_PSRTYPE_IPV4HDR
754 		            | IXGBE_PSRTYPE_IPV6HDR;
755 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
756 	}
757 
758 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
759 
760 	ixgbe_initialize_rss_mapping(adapter);
761 
762 	if (adapter->num_rx_queues > 1) {
763 		/* RSS and RX IPP Checksum are mutually exclusive */
764 		rxcsum |= IXGBE_RXCSUM_PCSD;
765 	}
766 
767 	if (ifp->if_capenable & IFCAP_RXCSUM)
768 		rxcsum |= IXGBE_RXCSUM_PCSD;
769 
770 	/* This is useful for calculating UDP/IP fragment checksums */
771 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
772 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
773 
774 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
775 
776 } /* ixgbe_initialize_receive_units */
777 
778 /************************************************************************
779  * ixgbe_initialize_transmit_units - Enable transmit units.
780  ************************************************************************/
781 static void
782 ixgbe_initialize_transmit_units(if_ctx_t ctx)
783 {
784 	struct adapter     *adapter = iflib_get_softc(ctx);
785 	struct ixgbe_hw    *hw = &adapter->hw;
786 	if_softc_ctx_t     scctx = adapter->shared;
787 	struct ix_tx_queue *que;
788 	int i;
789 
790 	/* Setup the Base and Length of the Tx Descriptor Ring */
791 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
792 	    i++, que++) {
793 		struct tx_ring	   *txr = &que->txr;
794 		u64 tdba = txr->tx_paddr;
795 		u32 txctrl = 0;
796 		int j = txr->me;
797 
798 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
799 		    (tdba & 0x00000000ffffffffULL));
800 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
801 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
802 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
803 
804 		/* Setup the HW Tx Head and Tail descriptor pointers */
805 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
806 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
807 
808 		/* Cache the tail address */
809 		txr->tail = IXGBE_TDT(txr->me);
810 
811 		txr->tx_rs_cidx = txr->tx_rs_pidx;
812 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
813 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
814 			txr->tx_rsq[k] = QIDX_INVALID;
815 
816 		/* Disable Head Writeback */
817 		/*
818 		 * Note: for X550 series devices, these registers are actually
819 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
820 		 * fields remain the same.
821 		 */
822 		switch (hw->mac.type) {
823 		case ixgbe_mac_82598EB:
824 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
825 			break;
826 		default:
827 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
828 			break;
829 		}
830 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
831 		switch (hw->mac.type) {
832 		case ixgbe_mac_82598EB:
833 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
834 			break;
835 		default:
836 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
837 			break;
838 		}
839 
840 	}
841 
842 	if (hw->mac.type != ixgbe_mac_82598EB) {
843 		u32 dmatxctl, rttdcs;
844 
845 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
846 		dmatxctl |= IXGBE_DMATXCTL_TE;
847 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
848 		/* Disable arbiter to set MTQC */
849 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
850 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
851 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
852 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
853 		    ixgbe_get_mtqc(adapter->iov_mode));
854 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
855 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
856 	}
857 
858 } /* ixgbe_initialize_transmit_units */
859 
860 /************************************************************************
861  * ixgbe_register
862  ************************************************************************/
863 static void *
864 ixgbe_register(device_t dev)
865 {
866 	return (&ixgbe_sctx_init);
867 } /* ixgbe_register */
868 
869 /************************************************************************
870  * ixgbe_if_attach_pre - Device initialization routine, part 1
871  *
872  *   Called when the driver is being loaded.
873  *   Identifies the type of hardware, initializes the hardware,
874  *   and initializes iflib structures.
875  *
876  *   return 0 on success, positive on failure
877  ************************************************************************/
878 static int
879 ixgbe_if_attach_pre(if_ctx_t ctx)
880 {
881 	struct adapter  *adapter;
882 	device_t        dev;
883 	if_softc_ctx_t  scctx;
884 	struct ixgbe_hw *hw;
885 	int             error = 0;
886 	u32             ctrl_ext;
887 
888 	INIT_DEBUGOUT("ixgbe_attach: begin");
889 
890 	/* Allocate, clear, and link in our adapter structure */
891 	dev = iflib_get_dev(ctx);
892 	adapter = iflib_get_softc(ctx);
893 	adapter->hw.back = adapter;
894 	adapter->ctx = ctx;
895 	adapter->dev = dev;
896 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
897 	adapter->media = iflib_get_media(ctx);
898 	hw = &adapter->hw;
899 
900 	/* Determine hardware revision */
901 	hw->vendor_id = pci_get_vendor(dev);
902 	hw->device_id = pci_get_device(dev);
903 	hw->revision_id = pci_get_revid(dev);
904 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
905 	hw->subsystem_device_id = pci_get_subdevice(dev);
906 
907 	/* Do base PCI setup - map BAR0 */
908 	if (ixgbe_allocate_pci_resources(ctx)) {
909 		device_printf(dev, "Allocation of PCI resources failed\n");
910 		return (ENXIO);
911 	}
912 
913 	/* let hardware know driver is loaded */
914 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
915 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
916 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
917 
918 	/*
919 	 * Initialize the shared code
920 	 */
921 	if (ixgbe_init_shared_code(hw) != 0) {
922 		device_printf(dev, "Unable to initialize the shared code\n");
923 		error = ENXIO;
924 		goto err_pci;
925 	}
926 
927 	if (hw->mbx.ops.init_params)
928 		hw->mbx.ops.init_params(hw);
929 
930 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
931 
932 	if (hw->mac.type != ixgbe_mac_82598EB)
933 		hw->phy.smart_speed = ixgbe_smart_speed;
934 
935 	ixgbe_init_device_features(adapter);
936 
937 	/* Enable WoL (if supported) */
938 	ixgbe_check_wol_support(adapter);
939 
940 	/* Verify adapter fan is still functional (if applicable) */
941 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
942 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
943 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
944 	}
945 
946 	/* Ensure SW/FW semaphore is free */
947 	ixgbe_init_swfw_semaphore(hw);
948 
949 	/* Set an initial default flow control value */
950 	hw->fc.requested_mode = ixgbe_flow_control;
951 
952 	hw->phy.reset_if_overtemp = TRUE;
953 	error = ixgbe_reset_hw(hw);
954 	hw->phy.reset_if_overtemp = FALSE;
955 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
956 		/*
957 		 * No optics in this port, set up
958 		 * so the timer routine will probe
959 		 * for later insertion.
960 		 */
961 		adapter->sfp_probe = TRUE;
962 		error = 0;
963 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
964 		device_printf(dev, "Unsupported SFP+ module detected!\n");
965 		error = EIO;
966 		goto err_pci;
967 	} else if (error) {
968 		device_printf(dev, "Hardware initialization failed\n");
969 		error = EIO;
970 		goto err_pci;
971 	}
972 
973 	/* Make sure we have a good EEPROM before we read from it */
974 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
975 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
976 		error = EIO;
977 		goto err_pci;
978 	}
979 
980 	error = ixgbe_start_hw(hw);
981 	switch (error) {
982 	case IXGBE_ERR_EEPROM_VERSION:
983 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
984 		break;
985 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
986 		device_printf(dev, "Unsupported SFP+ Module\n");
987 		error = EIO;
988 		goto err_pci;
989 	case IXGBE_ERR_SFP_NOT_PRESENT:
990 		device_printf(dev, "No SFP+ Module found\n");
991 		/* falls thru */
992 	default:
993 		break;
994 	}
995 
996 	/* Most of the iflib initialization... */
997 
998 	iflib_set_mac(ctx, hw->mac.addr);
999 	switch (adapter->hw.mac.type) {
1000 	case ixgbe_mac_X550:
1001 	case ixgbe_mac_X550EM_x:
1002 	case ixgbe_mac_X550EM_a:
1003 		scctx->isc_rss_table_size = 512;
1004 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1005 		break;
1006 	default:
1007 		scctx->isc_rss_table_size = 128;
1008 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1009 	}
1010 
1011 	/* Allow legacy interrupts */
1012 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1013 
1014 	scctx->isc_txqsizes[0] =
1015 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1016 	    sizeof(u32), DBA_ALIGN),
1017 	scctx->isc_rxqsizes[0] =
1018 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1019 	    DBA_ALIGN);
1020 
1021 	/* XXX */
1022 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1023 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1024 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1025 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1026 	} else {
1027 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1028 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1029 	}
1030 
1031 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1032 
1033 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1034 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1035 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1036 
1037 	scctx->isc_txrx = &ixgbe_txrx;
1038 
1039 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1040 
1041 	return (0);
1042 
1043 err_pci:
1044 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1045 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1046 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1047 	ixgbe_free_pci_resources(ctx);
1048 
1049 	return (error);
1050 } /* ixgbe_if_attach_pre */
1051 
1052  /*********************************************************************
1053  * ixgbe_if_attach_post - Device initialization routine, part 2
1054  *
1055  *   Called during driver load, but after interrupts and
1056  *   resources have been allocated and configured.
1057  *   Sets up some data structures not relevant to iflib.
1058  *
1059  *   return 0 on success, positive on failure
1060  *********************************************************************/
1061 static int
1062 ixgbe_if_attach_post(if_ctx_t ctx)
1063 {
1064 	device_t dev;
1065 	struct adapter  *adapter;
1066 	struct ixgbe_hw *hw;
1067 	int             error = 0;
1068 
1069 	dev = iflib_get_dev(ctx);
1070 	adapter = iflib_get_softc(ctx);
1071 	hw = &adapter->hw;
1072 
1073 
1074 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1075 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1076 		device_printf(dev, "Device does not support legacy interrupts");
1077 		error = ENXIO;
1078 		goto err;
1079 	}
1080 
1081 	/* Allocate multicast array memory. */
1082 	adapter->mta = malloc(sizeof(*adapter->mta) *
1083 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1084 	if (adapter->mta == NULL) {
1085 		device_printf(dev, "Can not allocate multicast setup array\n");
1086 		error = ENOMEM;
1087 		goto err;
1088 	}
1089 
1090 	/* hw.ix defaults init */
1091 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1092 
1093 	/* Enable the optics for 82599 SFP+ fiber */
1094 	ixgbe_enable_tx_laser(hw);
1095 
1096 	/* Enable power to the phy. */
1097 	ixgbe_set_phy_power(hw, TRUE);
1098 
1099 	ixgbe_initialize_iov(adapter);
1100 
1101 	error = ixgbe_setup_interface(ctx);
1102 	if (error) {
1103 		device_printf(dev, "Interface setup failed: %d\n", error);
1104 		goto err;
1105 	}
1106 
1107 	ixgbe_if_update_admin_status(ctx);
1108 
1109 	/* Initialize statistics */
1110 	ixgbe_update_stats_counters(adapter);
1111 	ixgbe_add_hw_stats(adapter);
1112 
1113 	/* Check PCIE slot type/speed/width */
1114 	ixgbe_get_slot_info(adapter);
1115 
1116 	/*
1117 	 * Do time init and sysctl init here, but
1118 	 * only on the first port of a bypass adapter.
1119 	 */
1120 	ixgbe_bypass_init(adapter);
1121 
1122 	/* Set an initial dmac value */
1123 	adapter->dmac = 0;
1124 	/* Set initial advertised speeds (if applicable) */
1125 	adapter->advertise = ixgbe_get_advertise(adapter);
1126 
1127 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1128 		ixgbe_define_iov_schemas(dev, &error);
1129 
1130 	/* Add sysctls */
1131 	ixgbe_add_device_sysctls(ctx);
1132 
1133 	return (0);
1134 err:
1135 	return (error);
1136 } /* ixgbe_if_attach_post */
1137 
1138 /************************************************************************
1139  * ixgbe_check_wol_support
1140  *
1141  *   Checks whether the adapter's ports are capable of
1142  *   Wake On LAN by reading the adapter's NVM.
1143  *
1144  *   Sets each port's hw->wol_enabled value depending
1145  *   on the value read here.
1146  ************************************************************************/
1147 static void
1148 ixgbe_check_wol_support(struct adapter *adapter)
1149 {
1150 	struct ixgbe_hw *hw = &adapter->hw;
1151 	u16             dev_caps = 0;
1152 
1153 	/* Find out WoL support for port */
1154 	adapter->wol_support = hw->wol_enabled = 0;
1155 	ixgbe_get_device_caps(hw, &dev_caps);
1156 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1157 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1158 	     hw->bus.func == 0))
1159 		adapter->wol_support = hw->wol_enabled = 1;
1160 
1161 	/* Save initial wake up filter configuration */
1162 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1163 
1164 	return;
1165 } /* ixgbe_check_wol_support */
1166 
1167 /************************************************************************
1168  * ixgbe_setup_interface
1169  *
1170  *   Setup networking device structure and register an interface.
1171  ************************************************************************/
1172 static int
1173 ixgbe_setup_interface(if_ctx_t ctx)
1174 {
1175 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1176 	struct adapter *adapter = iflib_get_softc(ctx);
1177 
1178 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1179 
1180 	if_setbaudrate(ifp, IF_Gbps(10));
1181 
1182 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1183 
1184 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1185 
1186 	ixgbe_add_media_types(ctx);
1187 
1188 	/* Autoselect media by default */
1189 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1190 
1191 	return (0);
1192 } /* ixgbe_setup_interface */
1193 
1194 /************************************************************************
1195  * ixgbe_if_get_counter
1196  ************************************************************************/
1197 static uint64_t
1198 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1199 {
1200 	struct adapter *adapter = iflib_get_softc(ctx);
1201 	if_t           ifp = iflib_get_ifp(ctx);
1202 
1203 	switch (cnt) {
1204 	case IFCOUNTER_IPACKETS:
1205 		return (adapter->ipackets);
1206 	case IFCOUNTER_OPACKETS:
1207 		return (adapter->opackets);
1208 	case IFCOUNTER_IBYTES:
1209 		return (adapter->ibytes);
1210 	case IFCOUNTER_OBYTES:
1211 		return (adapter->obytes);
1212 	case IFCOUNTER_IMCASTS:
1213 		return (adapter->imcasts);
1214 	case IFCOUNTER_OMCASTS:
1215 		return (adapter->omcasts);
1216 	case IFCOUNTER_COLLISIONS:
1217 		return (0);
1218 	case IFCOUNTER_IQDROPS:
1219 		return (adapter->iqdrops);
1220 	case IFCOUNTER_OQDROPS:
1221 		return (0);
1222 	case IFCOUNTER_IERRORS:
1223 		return (adapter->ierrors);
1224 	default:
1225 		return (if_get_counter_default(ifp, cnt));
1226 	}
1227 } /* ixgbe_if_get_counter */
1228 
1229 /************************************************************************
1230  * ixgbe_if_i2c_req
1231  ************************************************************************/
1232 static int
1233 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1234 {
1235 	struct adapter		*adapter = iflib_get_softc(ctx);
1236 	struct ixgbe_hw 	*hw = &adapter->hw;
1237 	int 			i;
1238 
1239 
1240 	if (hw->phy.ops.read_i2c_byte == NULL)
1241 		return (ENXIO);
1242 	for (i = 0; i < req->len; i++)
1243 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1244 		    req->dev_addr, &req->data[i]);
1245 	return (0);
1246 } /* ixgbe_if_i2c_req */
1247 
1248 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1249  * @ctx: iflib context
1250  * @event: event code to check
1251  *
1252  * Defaults to returning true for unknown events.
1253  *
1254  * @returns true if iflib needs to reinit the interface
1255  */
1256 static bool
1257 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1258 {
1259 	switch (event) {
1260 	case IFLIB_RESTART_VLAN_CONFIG:
1261 		return (false);
1262 	default:
1263 		return (true);
1264 	}
1265 }
1266 
1267 /************************************************************************
1268  * ixgbe_add_media_types
1269  ************************************************************************/
1270 static void
1271 ixgbe_add_media_types(if_ctx_t ctx)
1272 {
1273 	struct adapter  *adapter = iflib_get_softc(ctx);
1274 	struct ixgbe_hw *hw = &adapter->hw;
1275 	device_t        dev = iflib_get_dev(ctx);
1276 	u64             layer;
1277 
1278 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1279 
1280 	/* Media types with matching FreeBSD media defines */
1281 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1282 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1283 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1284 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1285 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1286 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1287 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1288 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1289 
1290 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1291 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1292 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1293 		    NULL);
1294 
1295 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1296 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1297 		if (hw->phy.multispeed_fiber)
1298 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1299 			    NULL);
1300 	}
1301 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1302 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1303 		if (hw->phy.multispeed_fiber)
1304 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1305 			    NULL);
1306 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1307 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1308 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1309 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1310 
1311 #ifdef IFM_ETH_XTYPE
1312 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1313 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1314 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1315 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1316 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1317 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1318 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1319 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1320 #else
1321 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1322 		device_printf(dev, "Media supported: 10GbaseKR\n");
1323 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1324 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1325 	}
1326 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1327 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1328 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1329 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1330 	}
1331 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1332 		device_printf(dev, "Media supported: 1000baseKX\n");
1333 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1334 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1335 	}
1336 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1337 		device_printf(dev, "Media supported: 2500baseKX\n");
1338 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1339 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1340 	}
1341 #endif
1342 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1343 		device_printf(dev, "Media supported: 1000baseBX\n");
1344 
1345 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1346 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1347 		    0, NULL);
1348 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1349 	}
1350 
1351 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1352 } /* ixgbe_add_media_types */
1353 
1354 /************************************************************************
1355  * ixgbe_is_sfp
1356  ************************************************************************/
1357 static inline bool
1358 ixgbe_is_sfp(struct ixgbe_hw *hw)
1359 {
1360 	switch (hw->mac.type) {
1361 	case ixgbe_mac_82598EB:
1362 		if (hw->phy.type == ixgbe_phy_nl)
1363 			return (TRUE);
1364 		return (FALSE);
1365 	case ixgbe_mac_82599EB:
1366 		switch (hw->mac.ops.get_media_type(hw)) {
1367 		case ixgbe_media_type_fiber:
1368 		case ixgbe_media_type_fiber_qsfp:
1369 			return (TRUE);
1370 		default:
1371 			return (FALSE);
1372 		}
1373 	case ixgbe_mac_X550EM_x:
1374 	case ixgbe_mac_X550EM_a:
1375 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1376 			return (TRUE);
1377 		return (FALSE);
1378 	default:
1379 		return (FALSE);
1380 	}
1381 } /* ixgbe_is_sfp */
1382 
1383 /************************************************************************
1384  * ixgbe_config_link
1385  ************************************************************************/
1386 static void
1387 ixgbe_config_link(if_ctx_t ctx)
1388 {
1389 	struct adapter  *adapter = iflib_get_softc(ctx);
1390 	struct ixgbe_hw *hw = &adapter->hw;
1391 	u32             autoneg, err = 0;
1392 	bool            sfp, negotiate;
1393 
1394 	sfp = ixgbe_is_sfp(hw);
1395 
1396 	if (sfp) {
1397 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1398 		iflib_admin_intr_deferred(ctx);
1399 	} else {
1400 		if (hw->mac.ops.check_link)
1401 			err = ixgbe_check_link(hw, &adapter->link_speed,
1402 			    &adapter->link_up, FALSE);
1403 		if (err)
1404 			return;
1405 		autoneg = hw->phy.autoneg_advertised;
1406 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1407 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1408 			    &negotiate);
1409 		if (err)
1410 			return;
1411 		if (hw->mac.ops.setup_link)
1412 			err = hw->mac.ops.setup_link(hw, autoneg,
1413 			    adapter->link_up);
1414 	}
1415 } /* ixgbe_config_link */
1416 
1417 /************************************************************************
1418  * ixgbe_update_stats_counters - Update board statistics counters.
1419  ************************************************************************/
1420 static void
1421 ixgbe_update_stats_counters(struct adapter *adapter)
1422 {
1423 	struct ixgbe_hw       *hw = &adapter->hw;
1424 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1425 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1426 	u32                   lxoffrxc;
1427 	u64                   total_missed_rx = 0;
1428 
1429 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1430 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1431 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1432 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1433 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1434 
1435 	for (int i = 0; i < 16; i++) {
1436 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1437 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1438 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1439 	}
1440 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1441 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1442 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1443 
1444 	/* Hardware workaround, gprc counts missed packets */
1445 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1446 	stats->gprc -= missed_rx;
1447 
1448 	if (hw->mac.type != ixgbe_mac_82598EB) {
1449 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1450 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1451 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1452 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1453 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1454 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1455 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1456 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1457 		stats->lxoffrxc += lxoffrxc;
1458 	} else {
1459 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1460 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1461 		stats->lxoffrxc += lxoffrxc;
1462 		/* 82598 only has a counter in the high register */
1463 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1464 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1465 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1466 	}
1467 
1468 	/*
1469 	 * For watchdog management we need to know if we have been paused
1470 	 * during the last interval, so capture that here.
1471 	*/
1472 	if (lxoffrxc)
1473 		adapter->shared->isc_pause_frames = 1;
1474 
1475 	/*
1476 	 * Workaround: mprc hardware is incorrectly counting
1477 	 * broadcasts, so for now we subtract those.
1478 	 */
1479 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1480 	stats->bprc += bprc;
1481 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1482 	if (hw->mac.type == ixgbe_mac_82598EB)
1483 		stats->mprc -= bprc;
1484 
1485 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1486 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1487 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1488 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1489 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1490 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1491 
1492 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1493 	stats->lxontxc += lxon;
1494 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1495 	stats->lxofftxc += lxoff;
1496 	total = lxon + lxoff;
1497 
1498 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1499 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1500 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1501 	stats->gptc -= total;
1502 	stats->mptc -= total;
1503 	stats->ptc64 -= total;
1504 	stats->gotc -= total * ETHER_MIN_LEN;
1505 
1506 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1507 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1508 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1509 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1510 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1511 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1512 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1513 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1514 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1515 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1516 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1517 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1518 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1519 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1520 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1521 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1522 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1523 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1524 	/* Only read FCOE on 82599 */
1525 	if (hw->mac.type != ixgbe_mac_82598EB) {
1526 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1527 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1528 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1529 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1530 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1531 	}
1532 
1533 	/* Fill out the OS statistics structure */
1534 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1535 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1536 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1537 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1538 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1539 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1540 	IXGBE_SET_COLLISIONS(adapter, 0);
1541 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1542 
1543 	/*
1544 	 * Aggregate following types of errors as RX errors:
1545 	 * - CRC error count,
1546 	 * - illegal byte error count,
1547 	 * - checksum error count,
1548 	 * - missed packets count,
1549 	 * - length error count,
1550 	 * - undersized packets count,
1551 	 * - fragmented packets count,
1552 	 * - oversized packets count,
1553 	 * - jabber count.
1554 	 */
1555 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->illerrc + stats->xec +
1556 	    stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1557 	    stats->rjc);
1558 } /* ixgbe_update_stats_counters */
1559 
1560 /************************************************************************
1561  * ixgbe_add_hw_stats
1562  *
1563  *   Add sysctl variables, one per statistic, to the system.
1564  ************************************************************************/
1565 static void
1566 ixgbe_add_hw_stats(struct adapter *adapter)
1567 {
1568 	device_t               dev = iflib_get_dev(adapter->ctx);
1569 	struct ix_rx_queue     *rx_que;
1570 	struct ix_tx_queue     *tx_que;
1571 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1572 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1573 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1574 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1575 	struct sysctl_oid      *stat_node, *queue_node;
1576 	struct sysctl_oid_list *stat_list, *queue_list;
1577 	int                    i;
1578 
1579 #define QUEUE_NAME_LEN 32
1580 	char                   namebuf[QUEUE_NAME_LEN];
1581 
1582 	/* Driver Statistics */
1583 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1584 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1585 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1586 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1587 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1588 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1589 
1590 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1591 		struct tx_ring *txr = &tx_que->txr;
1592 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1593 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1594 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1595 		queue_list = SYSCTL_CHILDREN(queue_node);
1596 
1597 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1598 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1599 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1600 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1601 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1602 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1603 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1604 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1605 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1606 		    CTLFLAG_RD, &txr->total_packets,
1607 		    "Queue Packets Transmitted");
1608 	}
1609 
1610 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1611 		struct rx_ring *rxr = &rx_que->rxr;
1612 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1613 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1614 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1615 		queue_list = SYSCTL_CHILDREN(queue_node);
1616 
1617 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1618 		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1619 		    &adapter->rx_queues[i], 0,
1620 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1621 		    "Interrupt Rate");
1622 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1623 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1624 		    "irqs on this queue");
1625 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1626 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1627 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1628 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1629 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1630 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1631 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1632 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1633 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1634 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1635 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1636 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1637 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1638 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1639 	}
1640 
1641 	/* MAC stats get their own sub node */
1642 
1643 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1644 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1645 	stat_list = SYSCTL_CHILDREN(stat_node);
1646 
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1648 	    CTLFLAG_RD, &adapter->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1650 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1652 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1654 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1656 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1657 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1658 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1660 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1662 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1664 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1665 
1666 	/* Flow Control stats */
1667 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1668 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1670 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1672 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1674 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1675 
1676 	/* Packet Reception Stats */
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1678 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1680 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1681 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1682 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1683 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1684 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1685 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1686 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1687 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1688 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1689 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1690 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1691 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1692 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1693 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1694 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1695 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1696 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1697 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1698 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1699 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1700 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1701 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1702 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1703 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1704 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1705 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1706 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1707 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1708 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1709 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1710 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1711 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1712 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1713 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1714 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1715 
1716 	/* Packet Transmission Stats */
1717 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1718 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1719 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1720 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1721 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1722 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1723 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1724 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1725 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1726 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1727 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1728 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1729 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1730 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1731 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1732 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1733 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1734 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1735 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1736 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1737 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1738 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1739 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1740 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1741 } /* ixgbe_add_hw_stats */
1742 
1743 /************************************************************************
1744  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1745  *
1746  *   Retrieves the TDH value from the hardware
1747  ************************************************************************/
1748 static int
1749 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1750 {
1751 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1752 	int            error;
1753 	unsigned int   val;
1754 
1755 	if (!txr)
1756 		return (0);
1757 
1758 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1759 	error = sysctl_handle_int(oidp, &val, 0, req);
1760 	if (error || !req->newptr)
1761 		return error;
1762 
1763 	return (0);
1764 } /* ixgbe_sysctl_tdh_handler */
1765 
1766 /************************************************************************
1767  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1768  *
1769  *   Retrieves the TDT value from the hardware
1770  ************************************************************************/
1771 static int
1772 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1773 {
1774 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1775 	int            error;
1776 	unsigned int   val;
1777 
1778 	if (!txr)
1779 		return (0);
1780 
1781 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1782 	error = sysctl_handle_int(oidp, &val, 0, req);
1783 	if (error || !req->newptr)
1784 		return error;
1785 
1786 	return (0);
1787 } /* ixgbe_sysctl_tdt_handler */
1788 
1789 /************************************************************************
1790  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1791  *
1792  *   Retrieves the RDH value from the hardware
1793  ************************************************************************/
1794 static int
1795 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1796 {
1797 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1798 	int            error;
1799 	unsigned int   val;
1800 
1801 	if (!rxr)
1802 		return (0);
1803 
1804 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1805 	error = sysctl_handle_int(oidp, &val, 0, req);
1806 	if (error || !req->newptr)
1807 		return error;
1808 
1809 	return (0);
1810 } /* ixgbe_sysctl_rdh_handler */
1811 
1812 /************************************************************************
1813  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1814  *
1815  *   Retrieves the RDT value from the hardware
1816  ************************************************************************/
1817 static int
1818 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1819 {
1820 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1821 	int            error;
1822 	unsigned int   val;
1823 
1824 	if (!rxr)
1825 		return (0);
1826 
1827 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1828 	error = sysctl_handle_int(oidp, &val, 0, req);
1829 	if (error || !req->newptr)
1830 		return error;
1831 
1832 	return (0);
1833 } /* ixgbe_sysctl_rdt_handler */
1834 
1835 /************************************************************************
1836  * ixgbe_if_vlan_register
1837  *
1838  *   Run via vlan config EVENT, it enables us to use the
1839  *   HW Filter table since we can get the vlan id. This
1840  *   just creates the entry in the soft version of the
1841  *   VFTA, init will repopulate the real table.
1842  ************************************************************************/
1843 static void
1844 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1845 {
1846 	struct adapter *adapter = iflib_get_softc(ctx);
1847 	u16            index, bit;
1848 
1849 	index = (vtag >> 5) & 0x7F;
1850 	bit = vtag & 0x1F;
1851 	adapter->shadow_vfta[index] |= (1 << bit);
1852 	++adapter->num_vlans;
1853 	ixgbe_setup_vlan_hw_support(ctx);
1854 } /* ixgbe_if_vlan_register */
1855 
1856 /************************************************************************
1857  * ixgbe_if_vlan_unregister
1858  *
1859  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1860  ************************************************************************/
1861 static void
1862 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1863 {
1864 	struct adapter *adapter = iflib_get_softc(ctx);
1865 	u16            index, bit;
1866 
1867 	index = (vtag >> 5) & 0x7F;
1868 	bit = vtag & 0x1F;
1869 	adapter->shadow_vfta[index] &= ~(1 << bit);
1870 	--adapter->num_vlans;
1871 	/* Re-init to load the changes */
1872 	ixgbe_setup_vlan_hw_support(ctx);
1873 } /* ixgbe_if_vlan_unregister */
1874 
1875 /************************************************************************
1876  * ixgbe_setup_vlan_hw_support
1877  ************************************************************************/
1878 static void
1879 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1880 {
1881 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1882 	struct adapter  *adapter = iflib_get_softc(ctx);
1883 	struct ixgbe_hw *hw = &adapter->hw;
1884 	struct rx_ring  *rxr;
1885 	int             i;
1886 	u32             ctrl;
1887 
1888 
1889 	/*
1890 	 * We get here thru init_locked, meaning
1891 	 * a soft reset, this has already cleared
1892 	 * the VFTA and other state, so if there
1893 	 * have been no vlan's registered do nothing.
1894 	 */
1895 	if (adapter->num_vlans == 0)
1896 		return;
1897 
1898 	/* Setup the queues for vlans */
1899 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1900 		for (i = 0; i < adapter->num_rx_queues; i++) {
1901 			rxr = &adapter->rx_queues[i].rxr;
1902 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1903 			if (hw->mac.type != ixgbe_mac_82598EB) {
1904 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1905 				ctrl |= IXGBE_RXDCTL_VME;
1906 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1907 			}
1908 			rxr->vtag_strip = TRUE;
1909 		}
1910 	}
1911 
1912 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1913 		return;
1914 	/*
1915 	 * A soft reset zero's out the VFTA, so
1916 	 * we need to repopulate it now.
1917 	 */
1918 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1919 		if (adapter->shadow_vfta[i] != 0)
1920 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1921 			    adapter->shadow_vfta[i]);
1922 
1923 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1924 	/* Enable the Filter Table if enabled */
1925 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1926 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1927 		ctrl |= IXGBE_VLNCTRL_VFE;
1928 	}
1929 	if (hw->mac.type == ixgbe_mac_82598EB)
1930 		ctrl |= IXGBE_VLNCTRL_VME;
1931 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1932 } /* ixgbe_setup_vlan_hw_support */
1933 
1934 /************************************************************************
1935  * ixgbe_get_slot_info
1936  *
1937  *   Get the width and transaction speed of
1938  *   the slot this adapter is plugged into.
1939  ************************************************************************/
1940 static void
1941 ixgbe_get_slot_info(struct adapter *adapter)
1942 {
1943 	device_t        dev = iflib_get_dev(adapter->ctx);
1944 	struct ixgbe_hw *hw = &adapter->hw;
1945 	int             bus_info_valid = TRUE;
1946 	u32             offset;
1947 	u16             link;
1948 
1949 	/* Some devices are behind an internal bridge */
1950 	switch (hw->device_id) {
1951 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1952 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1953 		goto get_parent_info;
1954 	default:
1955 		break;
1956 	}
1957 
1958 	ixgbe_get_bus_info(hw);
1959 
1960 	/*
1961 	 * Some devices don't use PCI-E, but there is no need
1962 	 * to display "Unknown" for bus speed and width.
1963 	 */
1964 	switch (hw->mac.type) {
1965 	case ixgbe_mac_X550EM_x:
1966 	case ixgbe_mac_X550EM_a:
1967 		return;
1968 	default:
1969 		goto display;
1970 	}
1971 
1972 get_parent_info:
1973 	/*
1974 	 * For the Quad port adapter we need to parse back
1975 	 * up the PCI tree to find the speed of the expansion
1976 	 * slot into which this adapter is plugged. A bit more work.
1977 	 */
1978 	dev = device_get_parent(device_get_parent(dev));
1979 #ifdef IXGBE_DEBUG
1980 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1981 	    pci_get_slot(dev), pci_get_function(dev));
1982 #endif
1983 	dev = device_get_parent(device_get_parent(dev));
1984 #ifdef IXGBE_DEBUG
1985 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1986 	    pci_get_slot(dev), pci_get_function(dev));
1987 #endif
1988 	/* Now get the PCI Express Capabilities offset */
1989 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1990 		/*
1991 		 * Hmm...can't get PCI-Express capabilities.
1992 		 * Falling back to default method.
1993 		 */
1994 		bus_info_valid = FALSE;
1995 		ixgbe_get_bus_info(hw);
1996 		goto display;
1997 	}
1998 	/* ...and read the Link Status Register */
1999 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2000 	ixgbe_set_pci_config_data_generic(hw, link);
2001 
2002 display:
2003 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2004 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
2005 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
2006 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
2007 	     "Unknown"),
2008 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2009 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2010 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2011 	     "Unknown"));
2012 
2013 	if (bus_info_valid) {
2014 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2015 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2016 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
2017 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2018 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2019 		}
2020 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2021 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2022 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
2023 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
2024 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2025 		}
2026 	} else
2027 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2028 
2029 	return;
2030 } /* ixgbe_get_slot_info */
2031 
2032 /************************************************************************
2033  * ixgbe_if_msix_intr_assign
2034  *
2035  *   Setup MSI-X Interrupt resources and handlers
2036  ************************************************************************/
2037 static int
2038 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2039 {
2040 	struct adapter     *adapter = iflib_get_softc(ctx);
2041 	struct ix_rx_queue *rx_que = adapter->rx_queues;
2042 	struct ix_tx_queue *tx_que;
2043 	int                error, rid, vector = 0;
2044 	int                cpu_id = 0;
2045 	char               buf[16];
2046 
2047 	/* Admin Que is vector 0*/
2048 	rid = vector + 1;
2049 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2050 		rid = vector + 1;
2051 
2052 		snprintf(buf, sizeof(buf), "rxq%d", i);
2053 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2054 		    IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2055 
2056 		if (error) {
2057 			device_printf(iflib_get_dev(ctx),
2058 			    "Failed to allocate que int %d err: %d", i, error);
2059 			adapter->num_rx_queues = i + 1;
2060 			goto fail;
2061 		}
2062 
2063 		rx_que->msix = vector;
2064 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2065 			/*
2066 			 * The queue ID is used as the RSS layer bucket ID.
2067 			 * We look up the queue ID -> RSS CPU ID and select
2068 			 * that.
2069 			 */
2070 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2071 		} else {
2072 			/*
2073 			 * Bind the MSI-X vector, and thus the
2074 			 * rings to the corresponding cpu.
2075 			 *
2076 			 * This just happens to match the default RSS
2077 			 * round-robin bucket -> queue -> CPU allocation.
2078 			 */
2079 			if (adapter->num_rx_queues > 1)
2080 				cpu_id = i;
2081 		}
2082 
2083 	}
2084 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2085 		snprintf(buf, sizeof(buf), "txq%d", i);
2086 		tx_que = &adapter->tx_queues[i];
2087 		tx_que->msix = i % adapter->num_rx_queues;
2088 		iflib_softirq_alloc_generic(ctx,
2089 		    &adapter->rx_queues[tx_que->msix].que_irq,
2090 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2091 	}
2092 	rid = vector + 1;
2093 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2094 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2095 	if (error) {
2096 		device_printf(iflib_get_dev(ctx),
2097 		    "Failed to register admin handler");
2098 		return (error);
2099 	}
2100 
2101 	adapter->vector = vector;
2102 
2103 	return (0);
2104 fail:
2105 	iflib_irq_free(ctx, &adapter->irq);
2106 	rx_que = adapter->rx_queues;
2107 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2108 		iflib_irq_free(ctx, &rx_que->que_irq);
2109 
2110 	return (error);
2111 } /* ixgbe_if_msix_intr_assign */
2112 
2113 static inline void
2114 ixgbe_perform_aim(struct adapter *adapter, struct ix_rx_queue *que)
2115 {
2116 	uint32_t newitr = 0;
2117 	struct rx_ring *rxr = &que->rxr;
2118 
2119 	/*
2120 	 * Do Adaptive Interrupt Moderation:
2121 	 *  - Write out last calculated setting
2122 	 *  - Calculate based on average size over
2123 	 *    the last interval.
2124 	 */
2125 	if (que->eitr_setting) {
2126 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
2127 		    que->eitr_setting);
2128 	}
2129 
2130 	que->eitr_setting = 0;
2131 	/* Idle, do nothing */
2132 	if (rxr->bytes == 0) {
2133 		return;
2134 	}
2135 
2136 	if ((rxr->bytes) && (rxr->packets)) {
2137 		newitr = (rxr->bytes / rxr->packets);
2138 	}
2139 
2140 	newitr += 24; /* account for hardware frame, crc */
2141 	/* set an upper boundary */
2142 	newitr = min(newitr, 3000);
2143 
2144 	/* Be nice to the mid range */
2145 	if ((newitr > 300) && (newitr < 1200)) {
2146 		newitr = (newitr / 3);
2147 	} else {
2148 		newitr = (newitr / 2);
2149 	}
2150 
2151 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2152 		newitr |= newitr << 16;
2153 	} else {
2154 		newitr |= IXGBE_EITR_CNT_WDIS;
2155 	}
2156 
2157 	/* save for next interrupt */
2158 	que->eitr_setting = newitr;
2159 
2160 	/* Reset state */
2161 	rxr->bytes = 0;
2162 	rxr->packets = 0;
2163 
2164 	return;
2165 }
2166 
2167 /*********************************************************************
2168  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2169  **********************************************************************/
2170 static int
2171 ixgbe_msix_que(void *arg)
2172 {
2173 	struct ix_rx_queue *que = arg;
2174 	struct adapter     *adapter = que->adapter;
2175 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2176 
2177 	/* Protect against spurious interrupts */
2178 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2179 		return (FILTER_HANDLED);
2180 
2181 	ixgbe_disable_queue(adapter, que->msix);
2182 	++que->irqs;
2183 
2184 	/* Check for AIM */
2185 	if (adapter->enable_aim) {
2186 		ixgbe_perform_aim(adapter, que);
2187 	}
2188 
2189 	return (FILTER_SCHEDULE_THREAD);
2190 } /* ixgbe_msix_que */
2191 
2192 /************************************************************************
2193  * ixgbe_media_status - Media Ioctl callback
2194  *
2195  *   Called whenever the user queries the status of
2196  *   the interface using ifconfig.
2197  ************************************************************************/
2198 static void
2199 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2200 {
2201 	struct adapter  *adapter = iflib_get_softc(ctx);
2202 	struct ixgbe_hw *hw = &adapter->hw;
2203 	int             layer;
2204 
2205 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2206 
2207 	ifmr->ifm_status = IFM_AVALID;
2208 	ifmr->ifm_active = IFM_ETHER;
2209 
2210 	if (!adapter->link_active)
2211 		return;
2212 
2213 	ifmr->ifm_status |= IFM_ACTIVE;
2214 	layer = adapter->phy_layer;
2215 
2216 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2217 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2218 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2219 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2220 		switch (adapter->link_speed) {
2221 		case IXGBE_LINK_SPEED_10GB_FULL:
2222 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2223 			break;
2224 		case IXGBE_LINK_SPEED_1GB_FULL:
2225 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2226 			break;
2227 		case IXGBE_LINK_SPEED_100_FULL:
2228 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2229 			break;
2230 		case IXGBE_LINK_SPEED_10_FULL:
2231 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2232 			break;
2233 		}
2234 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2235 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2236 		switch (adapter->link_speed) {
2237 		case IXGBE_LINK_SPEED_10GB_FULL:
2238 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2239 			break;
2240 		}
2241 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2242 		switch (adapter->link_speed) {
2243 		case IXGBE_LINK_SPEED_10GB_FULL:
2244 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2245 			break;
2246 		case IXGBE_LINK_SPEED_1GB_FULL:
2247 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2248 			break;
2249 		}
2250 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2251 		switch (adapter->link_speed) {
2252 		case IXGBE_LINK_SPEED_10GB_FULL:
2253 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2254 			break;
2255 		case IXGBE_LINK_SPEED_1GB_FULL:
2256 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2257 			break;
2258 		}
2259 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2260 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2261 		switch (adapter->link_speed) {
2262 		case IXGBE_LINK_SPEED_10GB_FULL:
2263 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2264 			break;
2265 		case IXGBE_LINK_SPEED_1GB_FULL:
2266 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2267 			break;
2268 		}
2269 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2270 		switch (adapter->link_speed) {
2271 		case IXGBE_LINK_SPEED_10GB_FULL:
2272 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2273 			break;
2274 		}
2275 	/*
2276 	 * XXX: These need to use the proper media types once
2277 	 * they're added.
2278 	 */
2279 #ifndef IFM_ETH_XTYPE
2280 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2281 		switch (adapter->link_speed) {
2282 		case IXGBE_LINK_SPEED_10GB_FULL:
2283 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2284 			break;
2285 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2286 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2287 			break;
2288 		case IXGBE_LINK_SPEED_1GB_FULL:
2289 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2290 			break;
2291 		}
2292 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2293 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2294 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2295 		switch (adapter->link_speed) {
2296 		case IXGBE_LINK_SPEED_10GB_FULL:
2297 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2298 			break;
2299 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2300 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2301 			break;
2302 		case IXGBE_LINK_SPEED_1GB_FULL:
2303 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2304 			break;
2305 		}
2306 #else
2307 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2308 		switch (adapter->link_speed) {
2309 		case IXGBE_LINK_SPEED_10GB_FULL:
2310 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2311 			break;
2312 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2313 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2314 			break;
2315 		case IXGBE_LINK_SPEED_1GB_FULL:
2316 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2317 			break;
2318 		}
2319 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2320 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2321 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2322 		switch (adapter->link_speed) {
2323 		case IXGBE_LINK_SPEED_10GB_FULL:
2324 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2325 			break;
2326 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2327 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2328 			break;
2329 		case IXGBE_LINK_SPEED_1GB_FULL:
2330 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2331 			break;
2332 		}
2333 #endif
2334 
2335 	/* If nothing is recognized... */
2336 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2337 		ifmr->ifm_active |= IFM_UNKNOWN;
2338 
2339 	/* Display current flow control setting used on link */
2340 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2341 	    hw->fc.current_mode == ixgbe_fc_full)
2342 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2343 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2344 	    hw->fc.current_mode == ixgbe_fc_full)
2345 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2346 } /* ixgbe_media_status */
2347 
2348 /************************************************************************
2349  * ixgbe_media_change - Media Ioctl callback
2350  *
2351  *   Called when the user changes speed/duplex using
2352  *   media/mediopt option with ifconfig.
2353  ************************************************************************/
2354 static int
2355 ixgbe_if_media_change(if_ctx_t ctx)
2356 {
2357 	struct adapter   *adapter = iflib_get_softc(ctx);
2358 	struct ifmedia   *ifm = iflib_get_media(ctx);
2359 	struct ixgbe_hw  *hw = &adapter->hw;
2360 	ixgbe_link_speed speed = 0;
2361 
2362 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2363 
2364 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2365 		return (EINVAL);
2366 
2367 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2368 		return (EPERM);
2369 
2370 	/*
2371 	 * We don't actually need to check against the supported
2372 	 * media types of the adapter; ifmedia will take care of
2373 	 * that for us.
2374 	 */
2375 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2376 	case IFM_AUTO:
2377 	case IFM_10G_T:
2378 		speed |= IXGBE_LINK_SPEED_100_FULL;
2379 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2380 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2381 		break;
2382 	case IFM_10G_LRM:
2383 	case IFM_10G_LR:
2384 #ifndef IFM_ETH_XTYPE
2385 	case IFM_10G_SR: /* KR, too */
2386 	case IFM_10G_CX4: /* KX4 */
2387 #else
2388 	case IFM_10G_KR:
2389 	case IFM_10G_KX4:
2390 #endif
2391 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2392 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2393 		break;
2394 #ifndef IFM_ETH_XTYPE
2395 	case IFM_1000_CX: /* KX */
2396 #else
2397 	case IFM_1000_KX:
2398 #endif
2399 	case IFM_1000_LX:
2400 	case IFM_1000_SX:
2401 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2402 		break;
2403 	case IFM_1000_T:
2404 		speed |= IXGBE_LINK_SPEED_100_FULL;
2405 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2406 		break;
2407 	case IFM_10G_TWINAX:
2408 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2409 		break;
2410 	case IFM_100_TX:
2411 		speed |= IXGBE_LINK_SPEED_100_FULL;
2412 		break;
2413 	case IFM_10_T:
2414 		speed |= IXGBE_LINK_SPEED_10_FULL;
2415 		break;
2416 	default:
2417 		goto invalid;
2418 	}
2419 
2420 	hw->mac.autotry_restart = TRUE;
2421 	hw->mac.ops.setup_link(hw, speed, TRUE);
2422 	adapter->advertise =
2423 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2424 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2425 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2426 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2427 
2428 	return (0);
2429 
2430 invalid:
2431 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2432 
2433 	return (EINVAL);
2434 } /* ixgbe_if_media_change */
2435 
2436 /************************************************************************
2437  * ixgbe_set_promisc
2438  ************************************************************************/
2439 static int
2440 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2441 {
2442 	struct adapter *adapter = iflib_get_softc(ctx);
2443 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2444 	u32            rctl;
2445 	int            mcnt = 0;
2446 
2447 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2448 	rctl &= (~IXGBE_FCTRL_UPE);
2449 	if (ifp->if_flags & IFF_ALLMULTI)
2450 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2451 	else {
2452 		mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2453 	}
2454 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2455 		rctl &= (~IXGBE_FCTRL_MPE);
2456 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2457 
2458 	if (ifp->if_flags & IFF_PROMISC) {
2459 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2460 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2461 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2462 		rctl |= IXGBE_FCTRL_MPE;
2463 		rctl &= ~IXGBE_FCTRL_UPE;
2464 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2465 	}
2466 	return (0);
2467 } /* ixgbe_if_promisc_set */
2468 
2469 /************************************************************************
2470  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2471  ************************************************************************/
2472 static int
2473 ixgbe_msix_link(void *arg)
2474 {
2475 	struct adapter  *adapter = arg;
2476 	struct ixgbe_hw *hw = &adapter->hw;
2477 	u32             eicr, eicr_mask;
2478 	s32             retval;
2479 
2480 	++adapter->link_irq;
2481 
2482 	/* Pause other interrupts */
2483 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2484 
2485 	/* First get the cause */
2486 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2487 	/* Be sure the queue bits are not cleared */
2488 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2489 	/* Clear interrupt with write */
2490 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2491 
2492 	/* Link status change */
2493 	if (eicr & IXGBE_EICR_LSC) {
2494 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2495 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2496 	}
2497 
2498 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2499 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2500 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2501 			/* This is probably overkill :) */
2502 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2503 				return (FILTER_HANDLED);
2504 			/* Disable the interrupt */
2505 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2506 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2507 		} else
2508 			if (eicr & IXGBE_EICR_ECC) {
2509 				device_printf(iflib_get_dev(adapter->ctx),
2510 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2511 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2512 			}
2513 
2514 		/* Check for over temp condition */
2515 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2516 			switch (adapter->hw.mac.type) {
2517 			case ixgbe_mac_X550EM_a:
2518 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2519 					break;
2520 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2521 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2522 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2523 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2524 				retval = hw->phy.ops.check_overtemp(hw);
2525 				if (retval != IXGBE_ERR_OVERTEMP)
2526 					break;
2527 				device_printf(iflib_get_dev(adapter->ctx),
2528 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2529 				device_printf(iflib_get_dev(adapter->ctx),
2530 				    "System shutdown required!\n");
2531 				break;
2532 			default:
2533 				if (!(eicr & IXGBE_EICR_TS))
2534 					break;
2535 				retval = hw->phy.ops.check_overtemp(hw);
2536 				if (retval != IXGBE_ERR_OVERTEMP)
2537 					break;
2538 				device_printf(iflib_get_dev(adapter->ctx),
2539 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2540 				device_printf(iflib_get_dev(adapter->ctx),
2541 				    "System shutdown required!\n");
2542 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2543 				break;
2544 			}
2545 		}
2546 
2547 		/* Check for VF message */
2548 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2549 		    (eicr & IXGBE_EICR_MAILBOX))
2550 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2551 	}
2552 
2553 	if (ixgbe_is_sfp(hw)) {
2554 		/* Pluggable optics-related interrupt */
2555 		if (hw->mac.type >= ixgbe_mac_X540)
2556 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2557 		else
2558 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2559 
2560 		if (eicr & eicr_mask) {
2561 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2562 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2563 		}
2564 
2565 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2566 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2567 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2568 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2569 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2570 		}
2571 	}
2572 
2573 	/* Check for fan failure */
2574 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2575 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2576 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2577 	}
2578 
2579 	/* External PHY interrupt */
2580 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2581 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2582 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2583 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2584 	}
2585 
2586 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2587 } /* ixgbe_msix_link */
2588 
2589 /************************************************************************
2590  * ixgbe_sysctl_interrupt_rate_handler
2591  ************************************************************************/
2592 static int
2593 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2594 {
2595 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2596 	int                error;
2597 	unsigned int       reg, usec, rate;
2598 
2599 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2600 	usec = ((reg & 0x0FF8) >> 3);
2601 	if (usec > 0)
2602 		rate = 500000 / usec;
2603 	else
2604 		rate = 0;
2605 	error = sysctl_handle_int(oidp, &rate, 0, req);
2606 	if (error || !req->newptr)
2607 		return error;
2608 	reg &= ~0xfff; /* default, no limitation */
2609 	ixgbe_max_interrupt_rate = 0;
2610 	if (rate > 0 && rate < 500000) {
2611 		if (rate < 1000)
2612 			rate = 1000;
2613 		ixgbe_max_interrupt_rate = rate;
2614 		reg |= ((4000000/rate) & 0xff8);
2615 	}
2616 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2617 
2618 	return (0);
2619 } /* ixgbe_sysctl_interrupt_rate_handler */
2620 
2621 /************************************************************************
2622  * ixgbe_add_device_sysctls
2623  ************************************************************************/
2624 static void
2625 ixgbe_add_device_sysctls(if_ctx_t ctx)
2626 {
2627 	struct adapter         *adapter = iflib_get_softc(ctx);
2628 	device_t               dev = iflib_get_dev(ctx);
2629 	struct ixgbe_hw        *hw = &adapter->hw;
2630 	struct sysctl_oid_list *child;
2631 	struct sysctl_ctx_list *ctx_list;
2632 
2633 	ctx_list = device_get_sysctl_ctx(dev);
2634 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2635 
2636 	/* Sysctls for all devices */
2637 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2638 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2639 	    adapter, 0, ixgbe_sysctl_flowcntl, "I",
2640 	    IXGBE_SYSCTL_DESC_SET_FC);
2641 
2642 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2643 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2644 	    adapter, 0, ixgbe_sysctl_advertise, "I",
2645 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2646 
2647 	adapter->enable_aim = ixgbe_enable_aim;
2648 	SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2649 	    &adapter->enable_aim, 0, "Interrupt Moderation");
2650 
2651 #ifdef IXGBE_DEBUG
2652 	/* testing sysctls (for all devices) */
2653 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2654 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2655 	    adapter, 0, ixgbe_sysctl_power_state,
2656 	    "I", "PCI Power State");
2657 
2658 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2659 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2660 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2661 #endif
2662 	/* for X550 series devices */
2663 	if (hw->mac.type >= ixgbe_mac_X550)
2664 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2665 		    CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2666 		    adapter, 0, ixgbe_sysctl_dmac,
2667 		    "I", "DMA Coalesce");
2668 
2669 	/* for WoL-capable devices */
2670 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2671 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2672 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2673 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2674 
2675 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2676 		    CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2677 		    adapter, 0, ixgbe_sysctl_wufc,
2678 		    "I", "Enable/Disable Wake Up Filters");
2679 	}
2680 
2681 	/* for X552/X557-AT devices */
2682 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2683 		struct sysctl_oid *phy_node;
2684 		struct sysctl_oid_list *phy_list;
2685 
2686 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2687 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2688 		phy_list = SYSCTL_CHILDREN(phy_node);
2689 
2690 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2691 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2692 		    adapter, 0, ixgbe_sysctl_phy_temp,
2693 		    "I", "Current External PHY Temperature (Celsius)");
2694 
2695 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2696 		    "overtemp_occurred",
2697 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2698 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2699 		    "External PHY High Temperature Event Occurred");
2700 	}
2701 
2702 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2703 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2704 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2705 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2706 	}
2707 } /* ixgbe_add_device_sysctls */
2708 
2709 /************************************************************************
2710  * ixgbe_allocate_pci_resources
2711  ************************************************************************/
2712 static int
2713 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2714 {
2715 	struct adapter *adapter = iflib_get_softc(ctx);
2716 	device_t        dev = iflib_get_dev(ctx);
2717 	int             rid;
2718 
2719 	rid = PCIR_BAR(0);
2720 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2721 	    RF_ACTIVE);
2722 
2723 	if (!(adapter->pci_mem)) {
2724 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2725 		return (ENXIO);
2726 	}
2727 
2728 	/* Save bus_space values for READ/WRITE_REG macros */
2729 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2730 	adapter->osdep.mem_bus_space_handle =
2731 	    rman_get_bushandle(adapter->pci_mem);
2732 	/* Set hw values for shared code */
2733 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2734 
2735 	return (0);
2736 } /* ixgbe_allocate_pci_resources */
2737 
2738 /************************************************************************
2739  * ixgbe_detach - Device removal routine
2740  *
2741  *   Called when the driver is being removed.
2742  *   Stops the adapter and deallocates all the resources
2743  *   that were allocated for driver operation.
2744  *
2745  *   return 0 on success, positive on failure
2746  ************************************************************************/
2747 static int
2748 ixgbe_if_detach(if_ctx_t ctx)
2749 {
2750 	struct adapter *adapter = iflib_get_softc(ctx);
2751 	device_t       dev = iflib_get_dev(ctx);
2752 	u32            ctrl_ext;
2753 
2754 	INIT_DEBUGOUT("ixgbe_detach: begin");
2755 
2756 	if (ixgbe_pci_iov_detach(dev) != 0) {
2757 		device_printf(dev, "SR-IOV in use; detach first.\n");
2758 		return (EBUSY);
2759 	}
2760 
2761 	ixgbe_setup_low_power_mode(ctx);
2762 
2763 	/* let hardware know driver is unloading */
2764 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2765 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2766 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2767 
2768 	ixgbe_free_pci_resources(ctx);
2769 	free(adapter->mta, M_IXGBE);
2770 
2771 	return (0);
2772 } /* ixgbe_if_detach */
2773 
2774 /************************************************************************
2775  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2776  *
2777  *   Prepare the adapter/port for LPLU and/or WoL
2778  ************************************************************************/
2779 static int
2780 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2781 {
2782 	struct adapter  *adapter = iflib_get_softc(ctx);
2783 	struct ixgbe_hw *hw = &adapter->hw;
2784 	device_t        dev = iflib_get_dev(ctx);
2785 	s32             error = 0;
2786 
2787 	if (!hw->wol_enabled)
2788 		ixgbe_set_phy_power(hw, FALSE);
2789 
2790 	/* Limit power management flow to X550EM baseT */
2791 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2792 	    hw->phy.ops.enter_lplu) {
2793 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2794 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2795 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2796 
2797 		/*
2798 		 * Clear Wake Up Status register to prevent any previous wakeup
2799 		 * events from waking us up immediately after we suspend.
2800 		 */
2801 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2802 
2803 		/*
2804 		 * Program the Wakeup Filter Control register with user filter
2805 		 * settings
2806 		 */
2807 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2808 
2809 		/* Enable wakeups and power management in Wakeup Control */
2810 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2811 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2812 
2813 		/* X550EM baseT adapters need a special LPLU flow */
2814 		hw->phy.reset_disable = TRUE;
2815 		ixgbe_if_stop(ctx);
2816 		error = hw->phy.ops.enter_lplu(hw);
2817 		if (error)
2818 			device_printf(dev, "Error entering LPLU: %d\n", error);
2819 		hw->phy.reset_disable = FALSE;
2820 	} else {
2821 		/* Just stop for other adapters */
2822 		ixgbe_if_stop(ctx);
2823 	}
2824 
2825 	return error;
2826 } /* ixgbe_setup_low_power_mode */
2827 
2828 /************************************************************************
2829  * ixgbe_shutdown - Shutdown entry point
2830  ************************************************************************/
2831 static int
2832 ixgbe_if_shutdown(if_ctx_t ctx)
2833 {
2834 	int error = 0;
2835 
2836 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2837 
2838 	error = ixgbe_setup_low_power_mode(ctx);
2839 
2840 	return (error);
2841 } /* ixgbe_if_shutdown */
2842 
2843 /************************************************************************
2844  * ixgbe_suspend
2845  *
2846  *   From D0 to D3
2847  ************************************************************************/
2848 static int
2849 ixgbe_if_suspend(if_ctx_t ctx)
2850 {
2851 	int error = 0;
2852 
2853 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2854 
2855 	error = ixgbe_setup_low_power_mode(ctx);
2856 
2857 	return (error);
2858 } /* ixgbe_if_suspend */
2859 
2860 /************************************************************************
2861  * ixgbe_resume
2862  *
2863  *   From D3 to D0
2864  ************************************************************************/
2865 static int
2866 ixgbe_if_resume(if_ctx_t ctx)
2867 {
2868 	struct adapter  *adapter = iflib_get_softc(ctx);
2869 	device_t        dev = iflib_get_dev(ctx);
2870 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2871 	struct ixgbe_hw *hw = &adapter->hw;
2872 	u32             wus;
2873 
2874 	INIT_DEBUGOUT("ixgbe_resume: begin");
2875 
2876 	/* Read & clear WUS register */
2877 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2878 	if (wus)
2879 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2880 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2881 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2882 	/* And clear WUFC until next low-power transition */
2883 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2884 
2885 	/*
2886 	 * Required after D3->D0 transition;
2887 	 * will re-advertise all previous advertised speeds
2888 	 */
2889 	if (ifp->if_flags & IFF_UP)
2890 		ixgbe_if_init(ctx);
2891 
2892 	return (0);
2893 } /* ixgbe_if_resume */
2894 
2895 /************************************************************************
2896  * ixgbe_if_mtu_set - Ioctl mtu entry point
2897  *
2898  *   Return 0 on success, EINVAL on failure
2899  ************************************************************************/
2900 static int
2901 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2902 {
2903 	struct adapter *adapter = iflib_get_softc(ctx);
2904 	int error = 0;
2905 
2906 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2907 
2908 	if (mtu > IXGBE_MAX_MTU) {
2909 		error = EINVAL;
2910 	} else {
2911 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2912 	}
2913 
2914 	return error;
2915 } /* ixgbe_if_mtu_set */
2916 
2917 /************************************************************************
2918  * ixgbe_if_crcstrip_set
2919  ************************************************************************/
2920 static void
2921 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2922 {
2923 	struct adapter *sc = iflib_get_softc(ctx);
2924 	struct ixgbe_hw *hw = &sc->hw;
2925 	/* crc stripping is set in two places:
2926 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2927 	 * IXGBE_RDRXCTL (set by the original driver in
2928 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2929 	 *	We disable the setting when netmap is compiled in).
2930 	 * We update the values here, but also in ixgbe.c because
2931 	 * init_locked sometimes is called outside our control.
2932 	 */
2933 	uint32_t hl, rxc;
2934 
2935 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2936 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2937 #ifdef NETMAP
2938 	if (netmap_verbose)
2939 		D("%s read  HLREG 0x%x rxc 0x%x",
2940 			onoff ? "enter" : "exit", hl, rxc);
2941 #endif
2942 	/* hw requirements ... */
2943 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2944 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2945 	if (onoff && !crcstrip) {
2946 		/* keep the crc. Fast rx */
2947 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2948 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2949 	} else {
2950 		/* reset default mode */
2951 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2952 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2953 	}
2954 #ifdef NETMAP
2955 	if (netmap_verbose)
2956 		D("%s write HLREG 0x%x rxc 0x%x",
2957 			onoff ? "enter" : "exit", hl, rxc);
2958 #endif
2959 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2960 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2961 } /* ixgbe_if_crcstrip_set */
2962 
2963 /*********************************************************************
2964  * ixgbe_if_init - Init entry point
2965  *
2966  *   Used in two ways: It is used by the stack as an init
2967  *   entry point in network interface structure. It is also
2968  *   used by the driver as a hw/sw initialization routine to
2969  *   get to a consistent state.
2970  *
2971  *   Return 0 on success, positive on failure
2972  **********************************************************************/
2973 void
2974 ixgbe_if_init(if_ctx_t ctx)
2975 {
2976 	struct adapter     *adapter = iflib_get_softc(ctx);
2977 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2978 	device_t           dev = iflib_get_dev(ctx);
2979 	struct ixgbe_hw *hw = &adapter->hw;
2980 	struct ix_rx_queue *rx_que;
2981 	struct ix_tx_queue *tx_que;
2982 	u32             txdctl, mhadd;
2983 	u32             rxdctl, rxctrl;
2984 	u32             ctrl_ext;
2985 
2986 	int             i, j, err;
2987 
2988 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2989 
2990 	/* Queue indices may change with IOV mode */
2991 	ixgbe_align_all_queue_indices(adapter);
2992 
2993 	/* reprogram the RAR[0] in case user changed it. */
2994 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2995 
2996 	/* Get the latest mac address, User can use a LAA */
2997 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2998 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2999 	hw->addr_ctrl.rar_used_count = 1;
3000 
3001 	ixgbe_init_hw(hw);
3002 
3003 	ixgbe_initialize_iov(adapter);
3004 
3005 	ixgbe_initialize_transmit_units(ctx);
3006 
3007 	/* Setup Multicast table */
3008 	ixgbe_if_multi_set(ctx);
3009 
3010 	/* Determine the correct mbuf pool, based on frame size */
3011 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3012 
3013 	/* Configure RX settings */
3014 	ixgbe_initialize_receive_units(ctx);
3015 
3016 	/*
3017 	 * Initialize variable holding task enqueue requests
3018 	 * from MSI-X interrupts
3019 	 */
3020 	adapter->task_requests = 0;
3021 
3022 	/* Enable SDP & MSI-X interrupts based on adapter */
3023 	ixgbe_config_gpie(adapter);
3024 
3025 	/* Set MTU size */
3026 	if (ifp->if_mtu > ETHERMTU) {
3027 		/* aka IXGBE_MAXFRS on 82599 and newer */
3028 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3029 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
3030 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3031 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3032 	}
3033 
3034 	/* Now enable all the queues */
3035 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
3036 		struct tx_ring *txr = &tx_que->txr;
3037 
3038 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3039 		txdctl |= IXGBE_TXDCTL_ENABLE;
3040 		/* Set WTHRESH to 8, burst writeback */
3041 		txdctl |= (8 << 16);
3042 		/*
3043 		 * When the internal queue falls below PTHRESH (32),
3044 		 * start prefetching as long as there are at least
3045 		 * HTHRESH (1) buffers ready. The values are taken
3046 		 * from the Intel linux driver 3.8.21.
3047 		 * Prefetching enables tx line rate even with 1 queue.
3048 		 */
3049 		txdctl |= (32 << 0) | (1 << 8);
3050 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3051 	}
3052 
3053 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
3054 		struct rx_ring *rxr = &rx_que->rxr;
3055 
3056 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3057 		if (hw->mac.type == ixgbe_mac_82598EB) {
3058 			/*
3059 			 * PTHRESH = 21
3060 			 * HTHRESH = 4
3061 			 * WTHRESH = 8
3062 			 */
3063 			rxdctl &= ~0x3FFFFF;
3064 			rxdctl |= 0x080420;
3065 		}
3066 		rxdctl |= IXGBE_RXDCTL_ENABLE;
3067 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3068 		for (j = 0; j < 10; j++) {
3069 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3070 			    IXGBE_RXDCTL_ENABLE)
3071 				break;
3072 			else
3073 				msec_delay(1);
3074 		}
3075 		wmb();
3076 	}
3077 
3078 	/* Enable Receive engine */
3079 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3080 	if (hw->mac.type == ixgbe_mac_82598EB)
3081 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3082 	rxctrl |= IXGBE_RXCTRL_RXEN;
3083 	ixgbe_enable_rx_dma(hw, rxctrl);
3084 
3085 	/* Set up MSI/MSI-X routing */
3086 	if (ixgbe_enable_msix)  {
3087 		ixgbe_configure_ivars(adapter);
3088 		/* Set up auto-mask */
3089 		if (hw->mac.type == ixgbe_mac_82598EB)
3090 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3091 		else {
3092 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3093 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3094 		}
3095 	} else {  /* Simple settings for Legacy/MSI */
3096 		ixgbe_set_ivar(adapter, 0, 0, 0);
3097 		ixgbe_set_ivar(adapter, 0, 0, 1);
3098 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3099 	}
3100 
3101 	ixgbe_init_fdir(adapter);
3102 
3103 	/*
3104 	 * Check on any SFP devices that
3105 	 * need to be kick-started
3106 	 */
3107 	if (hw->phy.type == ixgbe_phy_none) {
3108 		err = hw->phy.ops.identify(hw);
3109 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3110 			device_printf(dev,
3111 			    "Unsupported SFP+ module type was detected.\n");
3112 			return;
3113 		}
3114 	}
3115 
3116 	/* Set moderation on the Link interrupt */
3117 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3118 
3119 	/* Enable power to the phy. */
3120 	ixgbe_set_phy_power(hw, TRUE);
3121 
3122 	/* Config/Enable Link */
3123 	ixgbe_config_link(ctx);
3124 
3125 	/* Hardware Packet Buffer & Flow Control setup */
3126 	ixgbe_config_delay_values(adapter);
3127 
3128 	/* Initialize the FC settings */
3129 	ixgbe_start_hw(hw);
3130 
3131 	/* Set up VLAN support and filter */
3132 	ixgbe_setup_vlan_hw_support(ctx);
3133 
3134 	/* Setup DMA Coalescing */
3135 	ixgbe_config_dmac(adapter);
3136 
3137 	/* And now turn on interrupts */
3138 	ixgbe_if_enable_intr(ctx);
3139 
3140 	/* Enable the use of the MBX by the VF's */
3141 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3142 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3143 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3144 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3145 	}
3146 
3147 } /* ixgbe_init_locked */
3148 
3149 /************************************************************************
3150  * ixgbe_set_ivar
3151  *
3152  *   Setup the correct IVAR register for a particular MSI-X interrupt
3153  *     (yes this is all very magic and confusing :)
3154  *    - entry is the register array entry
3155  *    - vector is the MSI-X vector for this queue
3156  *    - type is RX/TX/MISC
3157  ************************************************************************/
3158 static void
3159 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3160 {
3161 	struct ixgbe_hw *hw = &adapter->hw;
3162 	u32 ivar, index;
3163 
3164 	vector |= IXGBE_IVAR_ALLOC_VAL;
3165 
3166 	switch (hw->mac.type) {
3167 	case ixgbe_mac_82598EB:
3168 		if (type == -1)
3169 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3170 		else
3171 			entry += (type * 64);
3172 		index = (entry >> 2) & 0x1F;
3173 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3174 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3175 		ivar |= (vector << (8 * (entry & 0x3)));
3176 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3177 		break;
3178 	case ixgbe_mac_82599EB:
3179 	case ixgbe_mac_X540:
3180 	case ixgbe_mac_X550:
3181 	case ixgbe_mac_X550EM_x:
3182 	case ixgbe_mac_X550EM_a:
3183 		if (type == -1) { /* MISC IVAR */
3184 			index = (entry & 1) * 8;
3185 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3186 			ivar &= ~(0xFF << index);
3187 			ivar |= (vector << index);
3188 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3189 		} else {          /* RX/TX IVARS */
3190 			index = (16 * (entry & 1)) + (8 * type);
3191 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3192 			ivar &= ~(0xFF << index);
3193 			ivar |= (vector << index);
3194 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3195 		}
3196 	default:
3197 		break;
3198 	}
3199 } /* ixgbe_set_ivar */
3200 
3201 /************************************************************************
3202  * ixgbe_configure_ivars
3203  ************************************************************************/
3204 static void
3205 ixgbe_configure_ivars(struct adapter *adapter)
3206 {
3207 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3208 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3209 	u32                newitr;
3210 
3211 	if (ixgbe_max_interrupt_rate > 0)
3212 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3213 	else {
3214 		/*
3215 		 * Disable DMA coalescing if interrupt moderation is
3216 		 * disabled.
3217 		 */
3218 		adapter->dmac = 0;
3219 		newitr = 0;
3220 	}
3221 
3222 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3223 		struct rx_ring *rxr = &rx_que->rxr;
3224 
3225 		/* First the RX queue entry */
3226 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3227 
3228 		/* Set an Initial EITR value */
3229 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3230 	}
3231 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3232 		struct tx_ring *txr = &tx_que->txr;
3233 
3234 		/* ... and the TX */
3235 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3236 	}
3237 	/* For the Link interrupt */
3238 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3239 } /* ixgbe_configure_ivars */
3240 
3241 /************************************************************************
3242  * ixgbe_config_gpie
3243  ************************************************************************/
3244 static void
3245 ixgbe_config_gpie(struct adapter *adapter)
3246 {
3247 	struct ixgbe_hw *hw = &adapter->hw;
3248 	u32             gpie;
3249 
3250 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3251 
3252 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3253 		/* Enable Enhanced MSI-X mode */
3254 		gpie |= IXGBE_GPIE_MSIX_MODE
3255 		     |  IXGBE_GPIE_EIAME
3256 		     |  IXGBE_GPIE_PBA_SUPPORT
3257 		     |  IXGBE_GPIE_OCD;
3258 	}
3259 
3260 	/* Fan Failure Interrupt */
3261 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3262 		gpie |= IXGBE_SDP1_GPIEN;
3263 
3264 	/* Thermal Sensor Interrupt */
3265 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3266 		gpie |= IXGBE_SDP0_GPIEN_X540;
3267 
3268 	/* Link detection */
3269 	switch (hw->mac.type) {
3270 	case ixgbe_mac_82599EB:
3271 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3272 		break;
3273 	case ixgbe_mac_X550EM_x:
3274 	case ixgbe_mac_X550EM_a:
3275 		gpie |= IXGBE_SDP0_GPIEN_X540;
3276 		break;
3277 	default:
3278 		break;
3279 	}
3280 
3281 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3282 
3283 } /* ixgbe_config_gpie */
3284 
3285 /************************************************************************
3286  * ixgbe_config_delay_values
3287  *
3288  *   Requires adapter->max_frame_size to be set.
3289  ************************************************************************/
3290 static void
3291 ixgbe_config_delay_values(struct adapter *adapter)
3292 {
3293 	struct ixgbe_hw *hw = &adapter->hw;
3294 	u32             rxpb, frame, size, tmp;
3295 
3296 	frame = adapter->max_frame_size;
3297 
3298 	/* Calculate High Water */
3299 	switch (hw->mac.type) {
3300 	case ixgbe_mac_X540:
3301 	case ixgbe_mac_X550:
3302 	case ixgbe_mac_X550EM_x:
3303 	case ixgbe_mac_X550EM_a:
3304 		tmp = IXGBE_DV_X540(frame, frame);
3305 		break;
3306 	default:
3307 		tmp = IXGBE_DV(frame, frame);
3308 		break;
3309 	}
3310 	size = IXGBE_BT2KB(tmp);
3311 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3312 	hw->fc.high_water[0] = rxpb - size;
3313 
3314 	/* Now calculate Low Water */
3315 	switch (hw->mac.type) {
3316 	case ixgbe_mac_X540:
3317 	case ixgbe_mac_X550:
3318 	case ixgbe_mac_X550EM_x:
3319 	case ixgbe_mac_X550EM_a:
3320 		tmp = IXGBE_LOW_DV_X540(frame);
3321 		break;
3322 	default:
3323 		tmp = IXGBE_LOW_DV(frame);
3324 		break;
3325 	}
3326 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3327 
3328 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3329 	hw->fc.send_xon = TRUE;
3330 } /* ixgbe_config_delay_values */
3331 
3332 /************************************************************************
3333  * ixgbe_set_multi - Multicast Update
3334  *
3335  *   Called whenever multicast address list is updated.
3336  ************************************************************************/
3337 static u_int
3338 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3339 {
3340 	struct adapter *adapter = arg;
3341 	struct ixgbe_mc_addr *mta = adapter->mta;
3342 
3343 	if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3344 		return (0);
3345 	bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3346 	mta[idx].vmdq = adapter->pool;
3347 
3348 	return (1);
3349 } /* ixgbe_mc_filter_apply */
3350 
3351 static void
3352 ixgbe_if_multi_set(if_ctx_t ctx)
3353 {
3354 	struct adapter       *adapter = iflib_get_softc(ctx);
3355 	struct ixgbe_mc_addr *mta;
3356 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3357 	u8                   *update_ptr;
3358 	u32                  fctrl;
3359 	u_int		     mcnt;
3360 
3361 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3362 
3363 	mta = adapter->mta;
3364 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3365 
3366 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3367 	    adapter);
3368 
3369 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3370 
3371 	if (ifp->if_flags & IFF_PROMISC)
3372 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3373 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3374 	    ifp->if_flags & IFF_ALLMULTI) {
3375 		fctrl |= IXGBE_FCTRL_MPE;
3376 		fctrl &= ~IXGBE_FCTRL_UPE;
3377 	} else
3378 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3379 
3380 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3381 
3382 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3383 		update_ptr = (u8 *)mta;
3384 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3385 		    ixgbe_mc_array_itr, TRUE);
3386 	}
3387 
3388 } /* ixgbe_if_multi_set */
3389 
3390 /************************************************************************
3391  * ixgbe_mc_array_itr
3392  *
3393  *   An iterator function needed by the multicast shared code.
3394  *   It feeds the shared code routine the addresses in the
3395  *   array of ixgbe_set_multi() one by one.
3396  ************************************************************************/
3397 static u8 *
3398 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3399 {
3400 	struct ixgbe_mc_addr *mta;
3401 
3402 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3403 	*vmdq = mta->vmdq;
3404 
3405 	*update_ptr = (u8*)(mta + 1);
3406 
3407 	return (mta->addr);
3408 } /* ixgbe_mc_array_itr */
3409 
3410 /************************************************************************
3411  * ixgbe_local_timer - Timer routine
3412  *
3413  *   Checks for link status, updates statistics,
3414  *   and runs the watchdog check.
3415  ************************************************************************/
3416 static void
3417 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3418 {
3419 	struct adapter *adapter = iflib_get_softc(ctx);
3420 
3421 	if (qid != 0)
3422 		return;
3423 
3424 	/* Check for pluggable optics */
3425 	if (adapter->sfp_probe)
3426 		if (!ixgbe_sfp_probe(ctx))
3427 			return; /* Nothing to do */
3428 
3429 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3430 	    &adapter->link_up, 0);
3431 
3432 	/* Fire off the adminq task */
3433 	iflib_admin_intr_deferred(ctx);
3434 
3435 } /* ixgbe_if_timer */
3436 
3437 /************************************************************************
3438  * ixgbe_sfp_probe
3439  *
3440  *   Determine if a port had optics inserted.
3441  ************************************************************************/
3442 static bool
3443 ixgbe_sfp_probe(if_ctx_t ctx)
3444 {
3445 	struct adapter  *adapter = iflib_get_softc(ctx);
3446 	struct ixgbe_hw *hw = &adapter->hw;
3447 	device_t        dev = iflib_get_dev(ctx);
3448 	bool            result = FALSE;
3449 
3450 	if ((hw->phy.type == ixgbe_phy_nl) &&
3451 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3452 		s32 ret = hw->phy.ops.identify_sfp(hw);
3453 		if (ret)
3454 			goto out;
3455 		ret = hw->phy.ops.reset(hw);
3456 		adapter->sfp_probe = FALSE;
3457 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3458 			device_printf(dev, "Unsupported SFP+ module detected!");
3459 			device_printf(dev,
3460 			    "Reload driver with supported module.\n");
3461 			goto out;
3462 		} else
3463 			device_printf(dev, "SFP+ module detected!\n");
3464 		/* We now have supported optics */
3465 		result = TRUE;
3466 	}
3467 out:
3468 
3469 	return (result);
3470 } /* ixgbe_sfp_probe */
3471 
3472 /************************************************************************
3473  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3474  ************************************************************************/
3475 static void
3476 ixgbe_handle_mod(void *context)
3477 {
3478 	if_ctx_t        ctx = context;
3479 	struct adapter  *adapter = iflib_get_softc(ctx);
3480 	struct ixgbe_hw *hw = &adapter->hw;
3481 	device_t        dev = iflib_get_dev(ctx);
3482 	u32             err, cage_full = 0;
3483 
3484 	if (adapter->hw.need_crosstalk_fix) {
3485 		switch (hw->mac.type) {
3486 		case ixgbe_mac_82599EB:
3487 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3488 			    IXGBE_ESDP_SDP2;
3489 			break;
3490 		case ixgbe_mac_X550EM_x:
3491 		case ixgbe_mac_X550EM_a:
3492 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3493 			    IXGBE_ESDP_SDP0;
3494 			break;
3495 		default:
3496 			break;
3497 		}
3498 
3499 		if (!cage_full)
3500 			goto handle_mod_out;
3501 	}
3502 
3503 	err = hw->phy.ops.identify_sfp(hw);
3504 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3505 		device_printf(dev,
3506 		    "Unsupported SFP+ module type was detected.\n");
3507 		goto handle_mod_out;
3508 	}
3509 
3510 	if (hw->mac.type == ixgbe_mac_82598EB)
3511 		err = hw->phy.ops.reset(hw);
3512 	else
3513 		err = hw->mac.ops.setup_sfp(hw);
3514 
3515 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3516 		device_printf(dev,
3517 		    "Setup failure - unsupported SFP+ module type.\n");
3518 		goto handle_mod_out;
3519 	}
3520 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3521 	return;
3522 
3523 handle_mod_out:
3524 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3525 } /* ixgbe_handle_mod */
3526 
3527 
3528 /************************************************************************
3529  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3530  ************************************************************************/
3531 static void
3532 ixgbe_handle_msf(void *context)
3533 {
3534 	if_ctx_t        ctx = context;
3535 	struct adapter  *adapter = iflib_get_softc(ctx);
3536 	struct ixgbe_hw *hw = &adapter->hw;
3537 	u32             autoneg;
3538 	bool            negotiate;
3539 
3540 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3541 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3542 
3543 	autoneg = hw->phy.autoneg_advertised;
3544 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3545 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3546 	if (hw->mac.ops.setup_link)
3547 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3548 
3549 	/* Adjust media types shown in ifconfig */
3550 	ifmedia_removeall(adapter->media);
3551 	ixgbe_add_media_types(adapter->ctx);
3552 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3553 } /* ixgbe_handle_msf */
3554 
3555 /************************************************************************
3556  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3557  ************************************************************************/
3558 static void
3559 ixgbe_handle_phy(void *context)
3560 {
3561 	if_ctx_t        ctx = context;
3562 	struct adapter  *adapter = iflib_get_softc(ctx);
3563 	struct ixgbe_hw *hw = &adapter->hw;
3564 	int             error;
3565 
3566 	error = hw->phy.ops.handle_lasi(hw);
3567 	if (error == IXGBE_ERR_OVERTEMP)
3568 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3569 	else if (error)
3570 		device_printf(adapter->dev,
3571 		    "Error handling LASI interrupt: %d\n", error);
3572 } /* ixgbe_handle_phy */
3573 
3574 /************************************************************************
3575  * ixgbe_if_stop - Stop the hardware
3576  *
3577  *   Disables all traffic on the adapter by issuing a
3578  *   global reset on the MAC and deallocates TX/RX buffers.
3579  ************************************************************************/
3580 static void
3581 ixgbe_if_stop(if_ctx_t ctx)
3582 {
3583 	struct adapter  *adapter = iflib_get_softc(ctx);
3584 	struct ixgbe_hw *hw = &adapter->hw;
3585 
3586 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3587 
3588 	ixgbe_reset_hw(hw);
3589 	hw->adapter_stopped = FALSE;
3590 	ixgbe_stop_adapter(hw);
3591 	if (hw->mac.type == ixgbe_mac_82599EB)
3592 		ixgbe_stop_mac_link_on_d3_82599(hw);
3593 	/* Turn off the laser - noop with no optics */
3594 	ixgbe_disable_tx_laser(hw);
3595 
3596 	/* Update the stack */
3597 	adapter->link_up = FALSE;
3598 	ixgbe_if_update_admin_status(ctx);
3599 
3600 	/* reprogram the RAR[0] in case user changed it. */
3601 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3602 
3603 	return;
3604 } /* ixgbe_if_stop */
3605 
3606 /************************************************************************
3607  * ixgbe_update_link_status - Update OS on link state
3608  *
3609  * Note: Only updates the OS on the cached link state.
3610  *       The real check of the hardware only happens with
3611  *       a link interrupt.
3612  ************************************************************************/
3613 static void
3614 ixgbe_if_update_admin_status(if_ctx_t ctx)
3615 {
3616 	struct adapter *adapter = iflib_get_softc(ctx);
3617 	device_t       dev = iflib_get_dev(ctx);
3618 
3619 	if (adapter->link_up) {
3620 		if (adapter->link_active == FALSE) {
3621 			if (bootverbose)
3622 				device_printf(dev, "Link is up %d Gbps %s \n",
3623 				    ((adapter->link_speed == 128) ? 10 : 1),
3624 				    "Full Duplex");
3625 			adapter->link_active = TRUE;
3626 			/* Update any Flow Control changes */
3627 			ixgbe_fc_enable(&adapter->hw);
3628 			/* Update DMA coalescing config */
3629 			ixgbe_config_dmac(adapter);
3630 			/* should actually be negotiated value */
3631 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3632 
3633 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3634 				ixgbe_ping_all_vfs(adapter);
3635 		}
3636 	} else { /* Link down */
3637 		if (adapter->link_active == TRUE) {
3638 			if (bootverbose)
3639 				device_printf(dev, "Link is Down\n");
3640 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3641 			adapter->link_active = FALSE;
3642 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3643 				ixgbe_ping_all_vfs(adapter);
3644 		}
3645 	}
3646 
3647 	/* Handle task requests from msix_link() */
3648 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3649 		ixgbe_handle_mod(ctx);
3650 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3651 		ixgbe_handle_msf(ctx);
3652 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3653 		ixgbe_handle_mbx(ctx);
3654 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3655 		ixgbe_reinit_fdir(ctx);
3656 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3657 		ixgbe_handle_phy(ctx);
3658 	adapter->task_requests = 0;
3659 
3660 	ixgbe_update_stats_counters(adapter);
3661 } /* ixgbe_if_update_admin_status */
3662 
3663 /************************************************************************
3664  * ixgbe_config_dmac - Configure DMA Coalescing
3665  ************************************************************************/
3666 static void
3667 ixgbe_config_dmac(struct adapter *adapter)
3668 {
3669 	struct ixgbe_hw          *hw = &adapter->hw;
3670 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3671 
3672 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3673 		return;
3674 
3675 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3676 	    dcfg->link_speed ^ adapter->link_speed) {
3677 		dcfg->watchdog_timer = adapter->dmac;
3678 		dcfg->fcoe_en = FALSE;
3679 		dcfg->link_speed = adapter->link_speed;
3680 		dcfg->num_tcs = 1;
3681 
3682 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3683 		    dcfg->watchdog_timer, dcfg->link_speed);
3684 
3685 		hw->mac.ops.dmac_config(hw);
3686 	}
3687 } /* ixgbe_config_dmac */
3688 
3689 /************************************************************************
3690  * ixgbe_if_enable_intr
3691  ************************************************************************/
3692 void
3693 ixgbe_if_enable_intr(if_ctx_t ctx)
3694 {
3695 	struct adapter     *adapter = iflib_get_softc(ctx);
3696 	struct ixgbe_hw    *hw = &adapter->hw;
3697 	struct ix_rx_queue *que = adapter->rx_queues;
3698 	u32                mask, fwsm;
3699 
3700 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3701 
3702 	switch (adapter->hw.mac.type) {
3703 	case ixgbe_mac_82599EB:
3704 		mask |= IXGBE_EIMS_ECC;
3705 		/* Temperature sensor on some adapters */
3706 		mask |= IXGBE_EIMS_GPI_SDP0;
3707 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3708 		mask |= IXGBE_EIMS_GPI_SDP1;
3709 		mask |= IXGBE_EIMS_GPI_SDP2;
3710 		break;
3711 	case ixgbe_mac_X540:
3712 		/* Detect if Thermal Sensor is enabled */
3713 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3714 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3715 			mask |= IXGBE_EIMS_TS;
3716 		mask |= IXGBE_EIMS_ECC;
3717 		break;
3718 	case ixgbe_mac_X550:
3719 		/* MAC thermal sensor is automatically enabled */
3720 		mask |= IXGBE_EIMS_TS;
3721 		mask |= IXGBE_EIMS_ECC;
3722 		break;
3723 	case ixgbe_mac_X550EM_x:
3724 	case ixgbe_mac_X550EM_a:
3725 		/* Some devices use SDP0 for important information */
3726 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3727 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3728 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3729 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3730 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3731 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3732 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3733 		mask |= IXGBE_EIMS_ECC;
3734 		break;
3735 	default:
3736 		break;
3737 	}
3738 
3739 	/* Enable Fan Failure detection */
3740 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3741 		mask |= IXGBE_EIMS_GPI_SDP1;
3742 	/* Enable SR-IOV */
3743 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3744 		mask |= IXGBE_EIMS_MAILBOX;
3745 	/* Enable Flow Director */
3746 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3747 		mask |= IXGBE_EIMS_FLOW_DIR;
3748 
3749 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3750 
3751 	/* With MSI-X we use auto clear */
3752 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3753 		mask = IXGBE_EIMS_ENABLE_MASK;
3754 		/* Don't autoclear Link */
3755 		mask &= ~IXGBE_EIMS_OTHER;
3756 		mask &= ~IXGBE_EIMS_LSC;
3757 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3758 			mask &= ~IXGBE_EIMS_MAILBOX;
3759 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3760 	}
3761 
3762 	/*
3763 	 * Now enable all queues, this is done separately to
3764 	 * allow for handling the extended (beyond 32) MSI-X
3765 	 * vectors that can be used by 82599
3766 	 */
3767 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3768 		ixgbe_enable_queue(adapter, que->msix);
3769 
3770 	IXGBE_WRITE_FLUSH(hw);
3771 
3772 } /* ixgbe_if_enable_intr */
3773 
3774 /************************************************************************
3775  * ixgbe_disable_intr
3776  ************************************************************************/
3777 static void
3778 ixgbe_if_disable_intr(if_ctx_t ctx)
3779 {
3780 	struct adapter *adapter = iflib_get_softc(ctx);
3781 
3782 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3783 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3784 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3785 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3786 	} else {
3787 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3788 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3789 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3790 	}
3791 	IXGBE_WRITE_FLUSH(&adapter->hw);
3792 
3793 } /* ixgbe_if_disable_intr */
3794 
3795 /************************************************************************
3796  * ixgbe_link_intr_enable
3797  ************************************************************************/
3798 static void
3799 ixgbe_link_intr_enable(if_ctx_t ctx)
3800 {
3801 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3802 
3803 	/* Re-enable other interrupts */
3804 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3805 } /* ixgbe_link_intr_enable */
3806 
3807 /************************************************************************
3808  * ixgbe_if_rx_queue_intr_enable
3809  ************************************************************************/
3810 static int
3811 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3812 {
3813 	struct adapter     *adapter = iflib_get_softc(ctx);
3814 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3815 
3816 	ixgbe_enable_queue(adapter, que->msix);
3817 
3818 	return (0);
3819 } /* ixgbe_if_rx_queue_intr_enable */
3820 
3821 /************************************************************************
3822  * ixgbe_enable_queue
3823  ************************************************************************/
3824 static void
3825 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3826 {
3827 	struct ixgbe_hw *hw = &adapter->hw;
3828 	u64             queue = 1ULL << vector;
3829 	u32             mask;
3830 
3831 	if (hw->mac.type == ixgbe_mac_82598EB) {
3832 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3833 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3834 	} else {
3835 		mask = (queue & 0xFFFFFFFF);
3836 		if (mask)
3837 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3838 		mask = (queue >> 32);
3839 		if (mask)
3840 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3841 	}
3842 } /* ixgbe_enable_queue */
3843 
3844 /************************************************************************
3845  * ixgbe_disable_queue
3846  ************************************************************************/
3847 static void
3848 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3849 {
3850 	struct ixgbe_hw *hw = &adapter->hw;
3851 	u64             queue = 1ULL << vector;
3852 	u32             mask;
3853 
3854 	if (hw->mac.type == ixgbe_mac_82598EB) {
3855 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3856 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3857 	} else {
3858 		mask = (queue & 0xFFFFFFFF);
3859 		if (mask)
3860 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3861 		mask = (queue >> 32);
3862 		if (mask)
3863 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3864 	}
3865 } /* ixgbe_disable_queue */
3866 
3867 /************************************************************************
3868  * ixgbe_intr - Legacy Interrupt Service Routine
3869  ************************************************************************/
3870 int
3871 ixgbe_intr(void *arg)
3872 {
3873 	struct adapter     *adapter = arg;
3874 	struct ix_rx_queue *que = adapter->rx_queues;
3875 	struct ixgbe_hw    *hw = &adapter->hw;
3876 	if_ctx_t           ctx = adapter->ctx;
3877 	u32                eicr, eicr_mask;
3878 
3879 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3880 
3881 	++que->irqs;
3882 	if (eicr == 0) {
3883 		ixgbe_if_enable_intr(ctx);
3884 		return (FILTER_HANDLED);
3885 	}
3886 
3887 	/* Check for fan failure */
3888 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3889 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3890 		device_printf(adapter->dev,
3891 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3892 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3893 	}
3894 
3895 	/* Link status change */
3896 	if (eicr & IXGBE_EICR_LSC) {
3897 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3898 		iflib_admin_intr_deferred(ctx);
3899 	}
3900 
3901 	if (ixgbe_is_sfp(hw)) {
3902 		/* Pluggable optics-related interrupt */
3903 		if (hw->mac.type >= ixgbe_mac_X540)
3904 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3905 		else
3906 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3907 
3908 		if (eicr & eicr_mask) {
3909 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3910 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3911 		}
3912 
3913 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3914 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3915 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3916 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3917 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3918 		}
3919 	}
3920 
3921 	/* External PHY interrupt */
3922 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3923 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3924 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3925 
3926 	return (FILTER_SCHEDULE_THREAD);
3927 } /* ixgbe_intr */
3928 
3929 /************************************************************************
3930  * ixgbe_free_pci_resources
3931  ************************************************************************/
3932 static void
3933 ixgbe_free_pci_resources(if_ctx_t ctx)
3934 {
3935 	struct adapter *adapter = iflib_get_softc(ctx);
3936 	struct         ix_rx_queue *que = adapter->rx_queues;
3937 	device_t       dev = iflib_get_dev(ctx);
3938 
3939 	/* Release all MSI-X queue resources */
3940 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3941 		iflib_irq_free(ctx, &adapter->irq);
3942 
3943 	if (que != NULL) {
3944 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3945 			iflib_irq_free(ctx, &que->que_irq);
3946 		}
3947 	}
3948 
3949 	if (adapter->pci_mem != NULL)
3950 		bus_release_resource(dev, SYS_RES_MEMORY,
3951 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3952 } /* ixgbe_free_pci_resources */
3953 
3954 /************************************************************************
3955  * ixgbe_sysctl_flowcntl
3956  *
3957  *   SYSCTL wrapper around setting Flow Control
3958  ************************************************************************/
3959 static int
3960 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3961 {
3962 	struct adapter *adapter;
3963 	int            error, fc;
3964 
3965 	adapter = (struct adapter *)arg1;
3966 	fc = adapter->hw.fc.current_mode;
3967 
3968 	error = sysctl_handle_int(oidp, &fc, 0, req);
3969 	if ((error) || (req->newptr == NULL))
3970 		return (error);
3971 
3972 	/* Don't bother if it's not changed */
3973 	if (fc == adapter->hw.fc.current_mode)
3974 		return (0);
3975 
3976 	return ixgbe_set_flowcntl(adapter, fc);
3977 } /* ixgbe_sysctl_flowcntl */
3978 
3979 /************************************************************************
3980  * ixgbe_set_flowcntl - Set flow control
3981  *
3982  *   Flow control values:
3983  *     0 - off
3984  *     1 - rx pause
3985  *     2 - tx pause
3986  *     3 - full
3987  ************************************************************************/
3988 static int
3989 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3990 {
3991 	switch (fc) {
3992 	case ixgbe_fc_rx_pause:
3993 	case ixgbe_fc_tx_pause:
3994 	case ixgbe_fc_full:
3995 		adapter->hw.fc.requested_mode = fc;
3996 		if (adapter->num_rx_queues > 1)
3997 			ixgbe_disable_rx_drop(adapter);
3998 		break;
3999 	case ixgbe_fc_none:
4000 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
4001 		if (adapter->num_rx_queues > 1)
4002 			ixgbe_enable_rx_drop(adapter);
4003 		break;
4004 	default:
4005 		return (EINVAL);
4006 	}
4007 
4008 	/* Don't autoneg if forcing a value */
4009 	adapter->hw.fc.disable_fc_autoneg = TRUE;
4010 	ixgbe_fc_enable(&adapter->hw);
4011 
4012 	return (0);
4013 } /* ixgbe_set_flowcntl */
4014 
4015 /************************************************************************
4016  * ixgbe_enable_rx_drop
4017  *
4018  *   Enable the hardware to drop packets when the buffer is
4019  *   full. This is useful with multiqueue, so that no single
4020  *   queue being full stalls the entire RX engine. We only
4021  *   enable this when Multiqueue is enabled AND Flow Control
4022  *   is disabled.
4023  ************************************************************************/
4024 static void
4025 ixgbe_enable_rx_drop(struct adapter *adapter)
4026 {
4027 	struct ixgbe_hw *hw = &adapter->hw;
4028 	struct rx_ring  *rxr;
4029 	u32             srrctl;
4030 
4031 	for (int i = 0; i < adapter->num_rx_queues; i++) {
4032 		rxr = &adapter->rx_queues[i].rxr;
4033 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4034 		srrctl |= IXGBE_SRRCTL_DROP_EN;
4035 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4036 	}
4037 
4038 	/* enable drop for each vf */
4039 	for (int i = 0; i < adapter->num_vfs; i++) {
4040 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4041 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4042 		                IXGBE_QDE_ENABLE));
4043 	}
4044 } /* ixgbe_enable_rx_drop */
4045 
4046 /************************************************************************
4047  * ixgbe_disable_rx_drop
4048  ************************************************************************/
4049 static void
4050 ixgbe_disable_rx_drop(struct adapter *adapter)
4051 {
4052 	struct ixgbe_hw *hw = &adapter->hw;
4053 	struct rx_ring  *rxr;
4054 	u32             srrctl;
4055 
4056 	for (int i = 0; i < adapter->num_rx_queues; i++) {
4057 		rxr = &adapter->rx_queues[i].rxr;
4058 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4059 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4060 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4061 	}
4062 
4063 	/* disable drop for each vf */
4064 	for (int i = 0; i < adapter->num_vfs; i++) {
4065 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4066 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4067 	}
4068 } /* ixgbe_disable_rx_drop */
4069 
4070 /************************************************************************
4071  * ixgbe_sysctl_advertise
4072  *
4073  *   SYSCTL wrapper around setting advertised speed
4074  ************************************************************************/
4075 static int
4076 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4077 {
4078 	struct adapter *adapter;
4079 	int            error, advertise;
4080 
4081 	adapter = (struct adapter *)arg1;
4082 	advertise = adapter->advertise;
4083 
4084 	error = sysctl_handle_int(oidp, &advertise, 0, req);
4085 	if ((error) || (req->newptr == NULL))
4086 		return (error);
4087 
4088 	return ixgbe_set_advertise(adapter, advertise);
4089 } /* ixgbe_sysctl_advertise */
4090 
4091 /************************************************************************
4092  * ixgbe_set_advertise - Control advertised link speed
4093  *
4094  *   Flags:
4095  *     0x1 - advertise 100 Mb
4096  *     0x2 - advertise 1G
4097  *     0x4 - advertise 10G
4098  *     0x8 - advertise 10 Mb (yes, Mb)
4099  ************************************************************************/
4100 static int
4101 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4102 {
4103 	device_t         dev = iflib_get_dev(adapter->ctx);
4104 	struct ixgbe_hw  *hw;
4105 	ixgbe_link_speed speed = 0;
4106 	ixgbe_link_speed link_caps = 0;
4107 	s32              err = IXGBE_NOT_IMPLEMENTED;
4108 	bool             negotiate = FALSE;
4109 
4110 	/* Checks to validate new value */
4111 	if (adapter->advertise == advertise) /* no change */
4112 		return (0);
4113 
4114 	hw = &adapter->hw;
4115 
4116 	/* No speed changes for backplane media */
4117 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4118 		return (ENODEV);
4119 
4120 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4121 	      (hw->phy.multispeed_fiber))) {
4122 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4123 		return (EINVAL);
4124 	}
4125 
4126 	if (advertise < 0x1 || advertise > 0xF) {
4127 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4128 		return (EINVAL);
4129 	}
4130 
4131 	if (hw->mac.ops.get_link_capabilities) {
4132 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4133 		    &negotiate);
4134 		if (err != IXGBE_SUCCESS) {
4135 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4136 			return (ENODEV);
4137 		}
4138 	}
4139 
4140 	/* Set new value and report new advertised mode */
4141 	if (advertise & 0x1) {
4142 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4143 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4144 			return (EINVAL);
4145 		}
4146 		speed |= IXGBE_LINK_SPEED_100_FULL;
4147 	}
4148 	if (advertise & 0x2) {
4149 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4150 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4151 			return (EINVAL);
4152 		}
4153 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4154 	}
4155 	if (advertise & 0x4) {
4156 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4157 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4158 			return (EINVAL);
4159 		}
4160 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4161 	}
4162 	if (advertise & 0x8) {
4163 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4164 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4165 			return (EINVAL);
4166 		}
4167 		speed |= IXGBE_LINK_SPEED_10_FULL;
4168 	}
4169 
4170 	hw->mac.autotry_restart = TRUE;
4171 	hw->mac.ops.setup_link(hw, speed, TRUE);
4172 	adapter->advertise = advertise;
4173 
4174 	return (0);
4175 } /* ixgbe_set_advertise */
4176 
4177 /************************************************************************
4178  * ixgbe_get_advertise - Get current advertised speed settings
4179  *
4180  *   Formatted for sysctl usage.
4181  *   Flags:
4182  *     0x1 - advertise 100 Mb
4183  *     0x2 - advertise 1G
4184  *     0x4 - advertise 10G
4185  *     0x8 - advertise 10 Mb (yes, Mb)
4186  ************************************************************************/
4187 static int
4188 ixgbe_get_advertise(struct adapter *adapter)
4189 {
4190 	struct ixgbe_hw  *hw = &adapter->hw;
4191 	int              speed;
4192 	ixgbe_link_speed link_caps = 0;
4193 	s32              err;
4194 	bool             negotiate = FALSE;
4195 
4196 	/*
4197 	 * Advertised speed means nothing unless it's copper or
4198 	 * multi-speed fiber
4199 	 */
4200 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4201 	    !(hw->phy.multispeed_fiber))
4202 		return (0);
4203 
4204 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4205 	if (err != IXGBE_SUCCESS)
4206 		return (0);
4207 
4208 	speed =
4209 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4210 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4211 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4212 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4213 
4214 	return speed;
4215 } /* ixgbe_get_advertise */
4216 
4217 /************************************************************************
4218  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4219  *
4220  *   Control values:
4221  *     0/1 - off / on (use default value of 1000)
4222  *
4223  *     Legal timer values are:
4224  *     50,100,250,500,1000,2000,5000,10000
4225  *
4226  *     Turning off interrupt moderation will also turn this off.
4227  ************************************************************************/
4228 static int
4229 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4230 {
4231 	struct adapter *adapter = (struct adapter *)arg1;
4232 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4233 	int            error;
4234 	u16            newval;
4235 
4236 	newval = adapter->dmac;
4237 	error = sysctl_handle_16(oidp, &newval, 0, req);
4238 	if ((error) || (req->newptr == NULL))
4239 		return (error);
4240 
4241 	switch (newval) {
4242 	case 0:
4243 		/* Disabled */
4244 		adapter->dmac = 0;
4245 		break;
4246 	case 1:
4247 		/* Enable and use default */
4248 		adapter->dmac = 1000;
4249 		break;
4250 	case 50:
4251 	case 100:
4252 	case 250:
4253 	case 500:
4254 	case 1000:
4255 	case 2000:
4256 	case 5000:
4257 	case 10000:
4258 		/* Legal values - allow */
4259 		adapter->dmac = newval;
4260 		break;
4261 	default:
4262 		/* Do nothing, illegal value */
4263 		return (EINVAL);
4264 	}
4265 
4266 	/* Re-initialize hardware if it's already running */
4267 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4268 		ifp->if_init(ifp);
4269 
4270 	return (0);
4271 } /* ixgbe_sysctl_dmac */
4272 
4273 #ifdef IXGBE_DEBUG
4274 /************************************************************************
4275  * ixgbe_sysctl_power_state
4276  *
4277  *   Sysctl to test power states
4278  *   Values:
4279  *     0      - set device to D0
4280  *     3      - set device to D3
4281  *     (none) - get current device power state
4282  ************************************************************************/
4283 static int
4284 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4285 {
4286 	struct adapter *adapter = (struct adapter *)arg1;
4287 	device_t       dev = adapter->dev;
4288 	int            curr_ps, new_ps, error = 0;
4289 
4290 	curr_ps = new_ps = pci_get_powerstate(dev);
4291 
4292 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4293 	if ((error) || (req->newptr == NULL))
4294 		return (error);
4295 
4296 	if (new_ps == curr_ps)
4297 		return (0);
4298 
4299 	if (new_ps == 3 && curr_ps == 0)
4300 		error = DEVICE_SUSPEND(dev);
4301 	else if (new_ps == 0 && curr_ps == 3)
4302 		error = DEVICE_RESUME(dev);
4303 	else
4304 		return (EINVAL);
4305 
4306 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4307 
4308 	return (error);
4309 } /* ixgbe_sysctl_power_state */
4310 #endif
4311 
4312 /************************************************************************
4313  * ixgbe_sysctl_wol_enable
4314  *
4315  *   Sysctl to enable/disable the WoL capability,
4316  *   if supported by the adapter.
4317  *
4318  *   Values:
4319  *     0 - disabled
4320  *     1 - enabled
4321  ************************************************************************/
4322 static int
4323 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4324 {
4325 	struct adapter  *adapter = (struct adapter *)arg1;
4326 	struct ixgbe_hw *hw = &adapter->hw;
4327 	int             new_wol_enabled;
4328 	int             error = 0;
4329 
4330 	new_wol_enabled = hw->wol_enabled;
4331 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4332 	if ((error) || (req->newptr == NULL))
4333 		return (error);
4334 	new_wol_enabled = !!(new_wol_enabled);
4335 	if (new_wol_enabled == hw->wol_enabled)
4336 		return (0);
4337 
4338 	if (new_wol_enabled > 0 && !adapter->wol_support)
4339 		return (ENODEV);
4340 	else
4341 		hw->wol_enabled = new_wol_enabled;
4342 
4343 	return (0);
4344 } /* ixgbe_sysctl_wol_enable */
4345 
4346 /************************************************************************
4347  * ixgbe_sysctl_wufc - Wake Up Filter Control
4348  *
4349  *   Sysctl to enable/disable the types of packets that the
4350  *   adapter will wake up on upon receipt.
4351  *   Flags:
4352  *     0x1  - Link Status Change
4353  *     0x2  - Magic Packet
4354  *     0x4  - Direct Exact
4355  *     0x8  - Directed Multicast
4356  *     0x10 - Broadcast
4357  *     0x20 - ARP/IPv4 Request Packet
4358  *     0x40 - Direct IPv4 Packet
4359  *     0x80 - Direct IPv6 Packet
4360  *
4361  *   Settings not listed above will cause the sysctl to return an error.
4362  ************************************************************************/
4363 static int
4364 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4365 {
4366 	struct adapter *adapter = (struct adapter *)arg1;
4367 	int            error = 0;
4368 	u32            new_wufc;
4369 
4370 	new_wufc = adapter->wufc;
4371 
4372 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4373 	if ((error) || (req->newptr == NULL))
4374 		return (error);
4375 	if (new_wufc == adapter->wufc)
4376 		return (0);
4377 
4378 	if (new_wufc & 0xffffff00)
4379 		return (EINVAL);
4380 
4381 	new_wufc &= 0xff;
4382 	new_wufc |= (0xffffff & adapter->wufc);
4383 	adapter->wufc = new_wufc;
4384 
4385 	return (0);
4386 } /* ixgbe_sysctl_wufc */
4387 
4388 #ifdef IXGBE_DEBUG
4389 /************************************************************************
4390  * ixgbe_sysctl_print_rss_config
4391  ************************************************************************/
4392 static int
4393 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4394 {
4395 	struct adapter  *adapter = (struct adapter *)arg1;
4396 	struct ixgbe_hw *hw = &adapter->hw;
4397 	device_t        dev = adapter->dev;
4398 	struct sbuf     *buf;
4399 	int             error = 0, reta_size;
4400 	u32             reg;
4401 
4402 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4403 	if (!buf) {
4404 		device_printf(dev, "Could not allocate sbuf for output.\n");
4405 		return (ENOMEM);
4406 	}
4407 
4408 	// TODO: use sbufs to make a string to print out
4409 	/* Set multiplier for RETA setup and table size based on MAC */
4410 	switch (adapter->hw.mac.type) {
4411 	case ixgbe_mac_X550:
4412 	case ixgbe_mac_X550EM_x:
4413 	case ixgbe_mac_X550EM_a:
4414 		reta_size = 128;
4415 		break;
4416 	default:
4417 		reta_size = 32;
4418 		break;
4419 	}
4420 
4421 	/* Print out the redirection table */
4422 	sbuf_cat(buf, "\n");
4423 	for (int i = 0; i < reta_size; i++) {
4424 		if (i < 32) {
4425 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4426 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4427 		} else {
4428 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4429 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4430 		}
4431 	}
4432 
4433 	// TODO: print more config
4434 
4435 	error = sbuf_finish(buf);
4436 	if (error)
4437 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4438 
4439 	sbuf_delete(buf);
4440 
4441 	return (0);
4442 } /* ixgbe_sysctl_print_rss_config */
4443 #endif /* IXGBE_DEBUG */
4444 
4445 /************************************************************************
4446  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4447  *
4448  *   For X552/X557-AT devices using an external PHY
4449  ************************************************************************/
4450 static int
4451 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4452 {
4453 	struct adapter  *adapter = (struct adapter *)arg1;
4454 	struct ixgbe_hw *hw = &adapter->hw;
4455 	u16             reg;
4456 
4457 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4458 		device_printf(iflib_get_dev(adapter->ctx),
4459 		    "Device has no supported external thermal sensor.\n");
4460 		return (ENODEV);
4461 	}
4462 
4463 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4464 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4465 		device_printf(iflib_get_dev(adapter->ctx),
4466 		    "Error reading from PHY's current temperature register\n");
4467 		return (EAGAIN);
4468 	}
4469 
4470 	/* Shift temp for output */
4471 	reg = reg >> 8;
4472 
4473 	return (sysctl_handle_16(oidp, NULL, reg, req));
4474 } /* ixgbe_sysctl_phy_temp */
4475 
4476 /************************************************************************
4477  * ixgbe_sysctl_phy_overtemp_occurred
4478  *
4479  *   Reports (directly from the PHY) whether the current PHY
4480  *   temperature is over the overtemp threshold.
4481  ************************************************************************/
4482 static int
4483 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4484 {
4485 	struct adapter  *adapter = (struct adapter *)arg1;
4486 	struct ixgbe_hw *hw = &adapter->hw;
4487 	u16             reg;
4488 
4489 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4490 		device_printf(iflib_get_dev(adapter->ctx),
4491 		    "Device has no supported external thermal sensor.\n");
4492 		return (ENODEV);
4493 	}
4494 
4495 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4496 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4497 		device_printf(iflib_get_dev(adapter->ctx),
4498 		    "Error reading from PHY's temperature status register\n");
4499 		return (EAGAIN);
4500 	}
4501 
4502 	/* Get occurrence bit */
4503 	reg = !!(reg & 0x4000);
4504 
4505 	return (sysctl_handle_16(oidp, 0, reg, req));
4506 } /* ixgbe_sysctl_phy_overtemp_occurred */
4507 
4508 /************************************************************************
4509  * ixgbe_sysctl_eee_state
4510  *
4511  *   Sysctl to set EEE power saving feature
4512  *   Values:
4513  *     0      - disable EEE
4514  *     1      - enable EEE
4515  *     (none) - get current device EEE state
4516  ************************************************************************/
4517 static int
4518 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4519 {
4520 	struct adapter *adapter = (struct adapter *)arg1;
4521 	device_t       dev = adapter->dev;
4522 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4523 	int            curr_eee, new_eee, error = 0;
4524 	s32            retval;
4525 
4526 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4527 
4528 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4529 	if ((error) || (req->newptr == NULL))
4530 		return (error);
4531 
4532 	/* Nothing to do */
4533 	if (new_eee == curr_eee)
4534 		return (0);
4535 
4536 	/* Not supported */
4537 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4538 		return (EINVAL);
4539 
4540 	/* Bounds checking */
4541 	if ((new_eee < 0) || (new_eee > 1))
4542 		return (EINVAL);
4543 
4544 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4545 	if (retval) {
4546 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4547 		return (EINVAL);
4548 	}
4549 
4550 	/* Restart auto-neg */
4551 	ifp->if_init(ifp);
4552 
4553 	device_printf(dev, "New EEE state: %d\n", new_eee);
4554 
4555 	/* Cache new value */
4556 	if (new_eee)
4557 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4558 	else
4559 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4560 
4561 	return (error);
4562 } /* ixgbe_sysctl_eee_state */
4563 
4564 /************************************************************************
4565  * ixgbe_init_device_features
4566  ************************************************************************/
4567 static void
4568 ixgbe_init_device_features(struct adapter *adapter)
4569 {
4570 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4571 	                  | IXGBE_FEATURE_RSS
4572 	                  | IXGBE_FEATURE_MSI
4573 	                  | IXGBE_FEATURE_MSIX
4574 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4575 
4576 	/* Set capabilities first... */
4577 	switch (adapter->hw.mac.type) {
4578 	case ixgbe_mac_82598EB:
4579 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4580 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4581 		break;
4582 	case ixgbe_mac_X540:
4583 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4584 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4585 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4586 		    (adapter->hw.bus.func == 0))
4587 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4588 		break;
4589 	case ixgbe_mac_X550:
4590 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4591 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4592 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4593 		break;
4594 	case ixgbe_mac_X550EM_x:
4595 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4596 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4597 		break;
4598 	case ixgbe_mac_X550EM_a:
4599 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4600 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4601 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4602 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4603 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4604 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4605 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4606 		}
4607 		break;
4608 	case ixgbe_mac_82599EB:
4609 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4610 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4611 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4612 		    (adapter->hw.bus.func == 0))
4613 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4614 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4615 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4616 		break;
4617 	default:
4618 		break;
4619 	}
4620 
4621 	/* Enabled by default... */
4622 	/* Fan failure detection */
4623 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4624 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4625 	/* Netmap */
4626 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4627 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4628 	/* EEE */
4629 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4630 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4631 	/* Thermal Sensor */
4632 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4633 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4634 
4635 	/* Enabled via global sysctl... */
4636 	/* Flow Director */
4637 	if (ixgbe_enable_fdir) {
4638 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4639 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4640 		else
4641 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4642 	}
4643 	/*
4644 	 * Message Signal Interrupts - Extended (MSI-X)
4645 	 * Normal MSI is only enabled if MSI-X calls fail.
4646 	 */
4647 	if (!ixgbe_enable_msix)
4648 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4649 	/* Receive-Side Scaling (RSS) */
4650 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4651 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4652 
4653 	/* Disable features with unmet dependencies... */
4654 	/* No MSI-X */
4655 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4656 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4657 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4658 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4659 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4660 	}
4661 } /* ixgbe_init_device_features */
4662 
4663 /************************************************************************
4664  * ixgbe_check_fan_failure
4665  ************************************************************************/
4666 static void
4667 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4668 {
4669 	u32 mask;
4670 
4671 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4672 	    IXGBE_ESDP_SDP1;
4673 
4674 	if (reg & mask)
4675 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4676 } /* ixgbe_check_fan_failure */
4677