xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 0e2816f50ada7d863e26a630cdda327de449b1da)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 int ixgbe_intr(void *arg);
143 
144 /************************************************************************
145  * Function prototypes
146  ************************************************************************/
147 #if __FreeBSD_version >= 1100036
148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
149 #endif
150 
151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
154 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
155 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
156 
157 static void ixgbe_config_dmac(struct adapter *adapter);
158 static void ixgbe_configure_ivars(struct adapter *adapter);
159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
160                            s8 type);
161 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static bool ixgbe_sfp_probe(if_ctx_t ctx);
163 
164 static void ixgbe_free_pci_resources(if_ctx_t ctx);
165 
166 static int  ixgbe_msix_link(void *arg);
167 static int  ixgbe_msix_que(void *arg);
168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
169 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
171 
172 static int  ixgbe_setup_interface(if_ctx_t ctx);
173 static void ixgbe_init_device_features(struct adapter *adapter);
174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
175 static void ixgbe_add_media_types(if_ctx_t ctx);
176 static void ixgbe_update_stats_counters(struct adapter *adapter);
177 static void ixgbe_config_link(if_ctx_t ctx);
178 static void ixgbe_get_slot_info(struct adapter *);
179 static void ixgbe_check_wol_support(struct adapter *adapter);
180 static void ixgbe_enable_rx_drop(struct adapter *);
181 static void ixgbe_disable_rx_drop(struct adapter *);
182 
183 static void ixgbe_add_hw_stats(struct adapter *adapter);
184 static int  ixgbe_set_flowcntl(struct adapter *, int);
185 static int  ixgbe_set_advertise(struct adapter *, int);
186 static int  ixgbe_get_advertise(struct adapter *);
187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
188 static void ixgbe_config_gpie(struct adapter *adapter);
189 static void ixgbe_config_delay_values(struct adapter *adapter);
190 
191 /* Sysctl handlers */
192 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
198 #ifdef IXGBE_DEBUG
199 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
200 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
201 #endif
202 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
209 
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_msf(void *);
212 static void ixgbe_handle_mod(void *);
213 static void ixgbe_handle_phy(void *);
214 
215 /************************************************************************
216  *  FreeBSD Device Interface Entry Points
217  ************************************************************************/
218 static device_method_t ix_methods[] = {
219 	/* Device interface */
220 	DEVMETHOD(device_register, ixgbe_register),
221 	DEVMETHOD(device_probe, iflib_device_probe),
222 	DEVMETHOD(device_attach, iflib_device_attach),
223 	DEVMETHOD(device_detach, iflib_device_detach),
224 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
225 	DEVMETHOD(device_suspend, iflib_device_suspend),
226 	DEVMETHOD(device_resume, iflib_device_resume),
227 #ifdef PCI_IOV
228 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
229 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
230 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
231 #endif /* PCI_IOV */
232 	DEVMETHOD_END
233 };
234 
235 static driver_t ix_driver = {
236 	"ix", ix_methods, sizeof(struct adapter),
237 };
238 
239 devclass_t ix_devclass;
240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
242 MODULE_DEPEND(ix, pci, 1, 1, 1);
243 MODULE_DEPEND(ix, ether, 1, 1, 1);
244 MODULE_DEPEND(ix, iflib, 1, 1, 1);
245 
246 static device_method_t ixgbe_if_methods[] = {
247 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
248 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
249 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
250 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
251 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
252 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
253 	DEVMETHOD(ifdi_init, ixgbe_if_init),
254 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
255 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
256 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
257 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
258 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
259 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
260 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
262 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
263 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
264 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
265 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
266 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
267 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
268 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
269 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
270 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
271 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
272 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
273 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
274 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
275 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
276 #ifdef PCI_IOV
277 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
278 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
279 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
280 #endif /* PCI_IOV */
281 	DEVMETHOD_END
282 };
283 
284 /*
285  * TUNEABLE PARAMETERS:
286  */
287 
288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
289     "IXGBE driver parameters");
290 static driver_t ixgbe_if_driver = {
291   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
292 };
293 
294 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
295 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
296     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
297 
298 /* Flow control setting, default to full */
299 static int ixgbe_flow_control = ixgbe_fc_full;
300 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
301     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
302 
303 /* Advertise Speed, default to 0 (auto) */
304 static int ixgbe_advertise_speed = 0;
305 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
306     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
307 
308 /*
309  * Smart speed setting, default to on
310  * this only works as a compile option
311  * right now as its during attach, set
312  * this to 'ixgbe_smart_speed_off' to
313  * disable.
314  */
315 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
316 
317 /*
318  * MSI-X should be the default for best performance,
319  * but this allows it to be forced off for testing.
320  */
321 static int ixgbe_enable_msix = 1;
322 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
323     "Enable MSI-X interrupts");
324 
325 /*
326  * Defining this on will allow the use
327  * of unsupported SFP+ modules, note that
328  * doing so you are on your own :)
329  */
330 static int allow_unsupported_sfp = FALSE;
331 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
332     &allow_unsupported_sfp, 0,
333     "Allow unsupported SFP modules...use at your own risk");
334 
335 /*
336  * Not sure if Flow Director is fully baked,
337  * so we'll default to turning it off.
338  */
339 static int ixgbe_enable_fdir = 0;
340 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
341     "Enable Flow Director");
342 
343 /* Receive-Side Scaling */
344 static int ixgbe_enable_rss = 1;
345 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
346     "Enable Receive-Side Scaling (RSS)");
347 
348 #if 0
349 /* Keep running tab on them for sanity check */
350 static int ixgbe_total_ports;
351 #endif
352 
353 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
354 
355 /*
356  * For Flow Director: this is the number of TX packets we sample
357  * for the filter pool, this means every 20th packet will be probed.
358  *
359  * This feature can be disabled by setting this to 0.
360  */
361 static int atr_sample_rate = 20;
362 
363 extern struct if_txrx ixgbe_txrx;
364 
365 static struct if_shared_ctx ixgbe_sctx_init = {
366 	.isc_magic = IFLIB_MAGIC,
367 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
368 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
369 	.isc_tx_maxsegsize = PAGE_SIZE,
370 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
371 	.isc_tso_maxsegsize = PAGE_SIZE,
372 	.isc_rx_maxsize = PAGE_SIZE*4,
373 	.isc_rx_nsegments = 1,
374 	.isc_rx_maxsegsize = PAGE_SIZE*4,
375 	.isc_nfl = 1,
376 	.isc_ntxqs = 1,
377 	.isc_nrxqs = 1,
378 
379 	.isc_admin_intrcnt = 1,
380 	.isc_vendor_info = ixgbe_vendor_info_array,
381 	.isc_driver_version = ixgbe_driver_version,
382 	.isc_driver = &ixgbe_if_driver,
383 	.isc_flags = IFLIB_TSO_INIT_IP,
384 
385 	.isc_nrxd_min = {MIN_RXD},
386 	.isc_ntxd_min = {MIN_TXD},
387 	.isc_nrxd_max = {MAX_RXD},
388 	.isc_ntxd_max = {MAX_TXD},
389 	.isc_nrxd_default = {DEFAULT_RXD},
390 	.isc_ntxd_default = {DEFAULT_TXD},
391 };
392 
393 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
394 
395 /************************************************************************
396  * ixgbe_if_tx_queues_alloc
397  ************************************************************************/
398 static int
399 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
400                          int ntxqs, int ntxqsets)
401 {
402 	struct adapter     *adapter = iflib_get_softc(ctx);
403 	if_softc_ctx_t     scctx = adapter->shared;
404 	struct ix_tx_queue *que;
405 	int                i, j, error;
406 
407 	MPASS(adapter->num_tx_queues > 0);
408 	MPASS(adapter->num_tx_queues == ntxqsets);
409 	MPASS(ntxqs == 1);
410 
411 	/* Allocate queue structure memory */
412 	adapter->tx_queues =
413 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
414 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
415 	if (!adapter->tx_queues) {
416 		device_printf(iflib_get_dev(ctx),
417 		    "Unable to allocate TX ring memory\n");
418 		return (ENOMEM);
419 	}
420 
421 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
422 		struct tx_ring *txr = &que->txr;
423 
424 		/* In case SR-IOV is enabled, align the index properly */
425 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
426 		    i);
427 
428 		txr->adapter = que->adapter = adapter;
429 
430 		/* Allocate report status array */
431 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
432 		if (txr->tx_rsq == NULL) {
433 			error = ENOMEM;
434 			goto fail;
435 		}
436 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
437 			txr->tx_rsq[j] = QIDX_INVALID;
438 		/* get the virtual and physical address of the hardware queues */
439 		txr->tail = IXGBE_TDT(txr->me);
440 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
441 		txr->tx_paddr = paddrs[i];
442 
443 		txr->bytes = 0;
444 		txr->total_packets = 0;
445 
446 		/* Set the rate at which we sample packets */
447 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
448 			txr->atr_sample = atr_sample_rate;
449 
450 	}
451 
452 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
453 	    adapter->num_tx_queues);
454 
455 	return (0);
456 
457 fail:
458 	ixgbe_if_queues_free(ctx);
459 
460 	return (error);
461 } /* ixgbe_if_tx_queues_alloc */
462 
463 /************************************************************************
464  * ixgbe_if_rx_queues_alloc
465  ************************************************************************/
466 static int
467 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
468                          int nrxqs, int nrxqsets)
469 {
470 	struct adapter     *adapter = iflib_get_softc(ctx);
471 	struct ix_rx_queue *que;
472 	int                i;
473 
474 	MPASS(adapter->num_rx_queues > 0);
475 	MPASS(adapter->num_rx_queues == nrxqsets);
476 	MPASS(nrxqs == 1);
477 
478 	/* Allocate queue structure memory */
479 	adapter->rx_queues =
480 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
481 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
482 	if (!adapter->rx_queues) {
483 		device_printf(iflib_get_dev(ctx),
484 		    "Unable to allocate TX ring memory\n");
485 		return (ENOMEM);
486 	}
487 
488 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
489 		struct rx_ring *rxr = &que->rxr;
490 
491 		/* In case SR-IOV is enabled, align the index properly */
492 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
493 		    i);
494 
495 		rxr->adapter = que->adapter = adapter;
496 
497 		/* get the virtual and physical address of the hw queues */
498 		rxr->tail = IXGBE_RDT(rxr->me);
499 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
500 		rxr->rx_paddr = paddrs[i];
501 		rxr->bytes = 0;
502 		rxr->que = que;
503 	}
504 
505 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
506 	    adapter->num_rx_queues);
507 
508 	return (0);
509 } /* ixgbe_if_rx_queues_alloc */
510 
511 /************************************************************************
512  * ixgbe_if_queues_free
513  ************************************************************************/
514 static void
515 ixgbe_if_queues_free(if_ctx_t ctx)
516 {
517 	struct adapter     *adapter = iflib_get_softc(ctx);
518 	struct ix_tx_queue *tx_que = adapter->tx_queues;
519 	struct ix_rx_queue *rx_que = adapter->rx_queues;
520 	int                i;
521 
522 	if (tx_que != NULL) {
523 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
524 			struct tx_ring *txr = &tx_que->txr;
525 			if (txr->tx_rsq == NULL)
526 				break;
527 
528 			free(txr->tx_rsq, M_IXGBE);
529 			txr->tx_rsq = NULL;
530 		}
531 
532 		free(adapter->tx_queues, M_IXGBE);
533 		adapter->tx_queues = NULL;
534 	}
535 	if (rx_que != NULL) {
536 		free(adapter->rx_queues, M_IXGBE);
537 		adapter->rx_queues = NULL;
538 	}
539 } /* ixgbe_if_queues_free */
540 
541 /************************************************************************
542  * ixgbe_initialize_rss_mapping
543  ************************************************************************/
544 static void
545 ixgbe_initialize_rss_mapping(struct adapter *adapter)
546 {
547 	struct ixgbe_hw *hw = &adapter->hw;
548 	u32             reta = 0, mrqc, rss_key[10];
549 	int             queue_id, table_size, index_mult;
550 	int             i, j;
551 	u32             rss_hash_config;
552 
553 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
554 		/* Fetch the configured RSS key */
555 		rss_getkey((uint8_t *)&rss_key);
556 	} else {
557 		/* set up random bits */
558 		arc4rand(&rss_key, sizeof(rss_key), 0);
559 	}
560 
561 	/* Set multiplier for RETA setup and table size based on MAC */
562 	index_mult = 0x1;
563 	table_size = 128;
564 	switch (adapter->hw.mac.type) {
565 	case ixgbe_mac_82598EB:
566 		index_mult = 0x11;
567 		break;
568 	case ixgbe_mac_X550:
569 	case ixgbe_mac_X550EM_x:
570 	case ixgbe_mac_X550EM_a:
571 		table_size = 512;
572 		break;
573 	default:
574 		break;
575 	}
576 
577 	/* Set up the redirection table */
578 	for (i = 0, j = 0; i < table_size; i++, j++) {
579 		if (j == adapter->num_rx_queues)
580 			j = 0;
581 
582 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
583 			/*
584 			 * Fetch the RSS bucket id for the given indirection
585 			 * entry. Cap it at the number of configured buckets
586 			 * (which is num_rx_queues.)
587 			 */
588 			queue_id = rss_get_indirection_to_bucket(i);
589 			queue_id = queue_id % adapter->num_rx_queues;
590 		} else
591 			queue_id = (j * index_mult);
592 
593 		/*
594 		 * The low 8 bits are for hash value (n+0);
595 		 * The next 8 bits are for hash value (n+1), etc.
596 		 */
597 		reta = reta >> 8;
598 		reta = reta | (((uint32_t)queue_id) << 24);
599 		if ((i & 3) == 3) {
600 			if (i < 128)
601 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
602 			else
603 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
604 				    reta);
605 			reta = 0;
606 		}
607 	}
608 
609 	/* Now fill our hash function seeds */
610 	for (i = 0; i < 10; i++)
611 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
612 
613 	/* Perform hash on these packet types */
614 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
615 		rss_hash_config = rss_gethashconfig();
616 	else {
617 		/*
618 		 * Disable UDP - IP fragments aren't currently being handled
619 		 * and so we end up with a mix of 2-tuple and 4-tuple
620 		 * traffic.
621 		 */
622 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
623 		                | RSS_HASHTYPE_RSS_TCP_IPV4
624 		                | RSS_HASHTYPE_RSS_IPV6
625 		                | RSS_HASHTYPE_RSS_TCP_IPV6
626 		                | RSS_HASHTYPE_RSS_IPV6_EX
627 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
628 	}
629 
630 	mrqc = IXGBE_MRQC_RSSEN;
631 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
632 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
633 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
634 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
635 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
636 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
637 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
638 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
639 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
640 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
649 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
650 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
651 } /* ixgbe_initialize_rss_mapping */
652 
653 /************************************************************************
654  * ixgbe_initialize_receive_units - Setup receive registers and features.
655  ************************************************************************/
656 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
657 
658 static void
659 ixgbe_initialize_receive_units(if_ctx_t ctx)
660 {
661 	struct adapter     *adapter = iflib_get_softc(ctx);
662 	if_softc_ctx_t     scctx = adapter->shared;
663 	struct ixgbe_hw    *hw = &adapter->hw;
664 	struct ifnet       *ifp = iflib_get_ifp(ctx);
665 	struct ix_rx_queue *que;
666 	int                i, j;
667 	u32                bufsz, fctrl, srrctl, rxcsum;
668 	u32                hlreg;
669 
670 	/*
671 	 * Make sure receives are disabled while
672 	 * setting up the descriptor ring
673 	 */
674 	ixgbe_disable_rx(hw);
675 
676 	/* Enable broadcasts */
677 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
678 	fctrl |= IXGBE_FCTRL_BAM;
679 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
680 		fctrl |= IXGBE_FCTRL_DPF;
681 		fctrl |= IXGBE_FCTRL_PMCF;
682 	}
683 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
684 
685 	/* Set for Jumbo Frames? */
686 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
687 	if (ifp->if_mtu > ETHERMTU)
688 		hlreg |= IXGBE_HLREG0_JUMBOEN;
689 	else
690 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
691 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
692 
693 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
694 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
695 
696 	/* Setup the Base and Length of the Rx Descriptor Ring */
697 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
698 		struct rx_ring *rxr = &que->rxr;
699 		u64            rdba = rxr->rx_paddr;
700 
701 		j = rxr->me;
702 
703 		/* Setup the Base and Length of the Rx Descriptor Ring */
704 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
705 		    (rdba & 0x00000000ffffffffULL));
706 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
707 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
708 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
709 
710 		/* Set up the SRRCTL register */
711 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
712 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
713 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
714 		srrctl |= bufsz;
715 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
716 
717 		/*
718 		 * Set DROP_EN iff we have no flow control and >1 queue.
719 		 * Note that srrctl was cleared shortly before during reset,
720 		 * so we do not need to clear the bit, but do it just in case
721 		 * this code is moved elsewhere.
722 		 */
723 		if (adapter->num_rx_queues > 1 &&
724 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
725 			srrctl |= IXGBE_SRRCTL_DROP_EN;
726 		} else {
727 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
728 		}
729 
730 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
731 
732 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
733 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
734 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
735 
736 		/* Set the driver rx tail address */
737 		rxr->tail =  IXGBE_RDT(rxr->me);
738 	}
739 
740 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
741 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
742 		            | IXGBE_PSRTYPE_UDPHDR
743 		            | IXGBE_PSRTYPE_IPV4HDR
744 		            | IXGBE_PSRTYPE_IPV6HDR;
745 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
746 	}
747 
748 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
749 
750 	ixgbe_initialize_rss_mapping(adapter);
751 
752 	if (adapter->num_rx_queues > 1) {
753 		/* RSS and RX IPP Checksum are mutually exclusive */
754 		rxcsum |= IXGBE_RXCSUM_PCSD;
755 	}
756 
757 	if (ifp->if_capenable & IFCAP_RXCSUM)
758 		rxcsum |= IXGBE_RXCSUM_PCSD;
759 
760 	/* This is useful for calculating UDP/IP fragment checksums */
761 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
762 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
763 
764 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
765 
766 } /* ixgbe_initialize_receive_units */
767 
768 /************************************************************************
769  * ixgbe_initialize_transmit_units - Enable transmit units.
770  ************************************************************************/
771 static void
772 ixgbe_initialize_transmit_units(if_ctx_t ctx)
773 {
774 	struct adapter     *adapter = iflib_get_softc(ctx);
775 	struct ixgbe_hw    *hw = &adapter->hw;
776 	if_softc_ctx_t     scctx = adapter->shared;
777 	struct ix_tx_queue *que;
778 	int i;
779 
780 	/* Setup the Base and Length of the Tx Descriptor Ring */
781 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
782 	    i++, que++) {
783 		struct tx_ring	   *txr = &que->txr;
784 		u64 tdba = txr->tx_paddr;
785 		u32 txctrl = 0;
786 		int j = txr->me;
787 
788 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
789 		    (tdba & 0x00000000ffffffffULL));
790 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
791 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
792 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
793 
794 		/* Setup the HW Tx Head and Tail descriptor pointers */
795 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
796 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
797 
798 		/* Cache the tail address */
799 		txr->tail = IXGBE_TDT(txr->me);
800 
801 		txr->tx_rs_cidx = txr->tx_rs_pidx;
802 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
803 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
804 			txr->tx_rsq[k] = QIDX_INVALID;
805 
806 		/* Disable Head Writeback */
807 		/*
808 		 * Note: for X550 series devices, these registers are actually
809 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
810 		 * fields remain the same.
811 		 */
812 		switch (hw->mac.type) {
813 		case ixgbe_mac_82598EB:
814 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
815 			break;
816 		default:
817 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
818 			break;
819 		}
820 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
821 		switch (hw->mac.type) {
822 		case ixgbe_mac_82598EB:
823 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
824 			break;
825 		default:
826 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
827 			break;
828 		}
829 
830 	}
831 
832 	if (hw->mac.type != ixgbe_mac_82598EB) {
833 		u32 dmatxctl, rttdcs;
834 
835 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
836 		dmatxctl |= IXGBE_DMATXCTL_TE;
837 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
838 		/* Disable arbiter to set MTQC */
839 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
840 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
841 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
842 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
843 		    ixgbe_get_mtqc(adapter->iov_mode));
844 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
845 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
846 	}
847 
848 } /* ixgbe_initialize_transmit_units */
849 
850 /************************************************************************
851  * ixgbe_register
852  ************************************************************************/
853 static void *
854 ixgbe_register(device_t dev)
855 {
856 	return (ixgbe_sctx);
857 } /* ixgbe_register */
858 
859 /************************************************************************
860  * ixgbe_if_attach_pre - Device initialization routine, part 1
861  *
862  *   Called when the driver is being loaded.
863  *   Identifies the type of hardware, initializes the hardware,
864  *   and initializes iflib structures.
865  *
866  *   return 0 on success, positive on failure
867  ************************************************************************/
868 static int
869 ixgbe_if_attach_pre(if_ctx_t ctx)
870 {
871 	struct adapter  *adapter;
872 	device_t        dev;
873 	if_softc_ctx_t  scctx;
874 	struct ixgbe_hw *hw;
875 	int             error = 0;
876 	u32             ctrl_ext;
877 
878 	INIT_DEBUGOUT("ixgbe_attach: begin");
879 
880 	/* Allocate, clear, and link in our adapter structure */
881 	dev = iflib_get_dev(ctx);
882 	adapter = iflib_get_softc(ctx);
883 	adapter->hw.back = adapter;
884 	adapter->ctx = ctx;
885 	adapter->dev = dev;
886 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
887 	adapter->media = iflib_get_media(ctx);
888 	hw = &adapter->hw;
889 
890 	/* Determine hardware revision */
891 	hw->vendor_id = pci_get_vendor(dev);
892 	hw->device_id = pci_get_device(dev);
893 	hw->revision_id = pci_get_revid(dev);
894 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
895 	hw->subsystem_device_id = pci_get_subdevice(dev);
896 
897 	/* Do base PCI setup - map BAR0 */
898 	if (ixgbe_allocate_pci_resources(ctx)) {
899 		device_printf(dev, "Allocation of PCI resources failed\n");
900 		return (ENXIO);
901 	}
902 
903 	/* let hardware know driver is loaded */
904 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
905 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
906 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
907 
908 	/*
909 	 * Initialize the shared code
910 	 */
911 	if (ixgbe_init_shared_code(hw) != 0) {
912 		device_printf(dev, "Unable to initialize the shared code\n");
913 		error = ENXIO;
914 		goto err_pci;
915 	}
916 
917 	if (hw->mbx.ops.init_params)
918 		hw->mbx.ops.init_params(hw);
919 
920 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
921 
922 	if (hw->mac.type != ixgbe_mac_82598EB)
923 		hw->phy.smart_speed = ixgbe_smart_speed;
924 
925 	ixgbe_init_device_features(adapter);
926 
927 	/* Enable WoL (if supported) */
928 	ixgbe_check_wol_support(adapter);
929 
930 	/* Verify adapter fan is still functional (if applicable) */
931 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
932 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
933 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
934 	}
935 
936 	/* Ensure SW/FW semaphore is free */
937 	ixgbe_init_swfw_semaphore(hw);
938 
939 	/* Set an initial default flow control value */
940 	hw->fc.requested_mode = ixgbe_flow_control;
941 
942 	hw->phy.reset_if_overtemp = TRUE;
943 	error = ixgbe_reset_hw(hw);
944 	hw->phy.reset_if_overtemp = FALSE;
945 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
946 		/*
947 		 * No optics in this port, set up
948 		 * so the timer routine will probe
949 		 * for later insertion.
950 		 */
951 		adapter->sfp_probe = TRUE;
952 		error = 0;
953 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
954 		device_printf(dev, "Unsupported SFP+ module detected!\n");
955 		error = EIO;
956 		goto err_pci;
957 	} else if (error) {
958 		device_printf(dev, "Hardware initialization failed\n");
959 		error = EIO;
960 		goto err_pci;
961 	}
962 
963 	/* Make sure we have a good EEPROM before we read from it */
964 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
965 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
966 		error = EIO;
967 		goto err_pci;
968 	}
969 
970 	error = ixgbe_start_hw(hw);
971 	switch (error) {
972 	case IXGBE_ERR_EEPROM_VERSION:
973 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
974 		break;
975 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
976 		device_printf(dev, "Unsupported SFP+ Module\n");
977 		error = EIO;
978 		goto err_pci;
979 	case IXGBE_ERR_SFP_NOT_PRESENT:
980 		device_printf(dev, "No SFP+ Module found\n");
981 		/* falls thru */
982 	default:
983 		break;
984 	}
985 
986 	/* Most of the iflib initialization... */
987 
988 	iflib_set_mac(ctx, hw->mac.addr);
989 	switch (adapter->hw.mac.type) {
990 	case ixgbe_mac_X550:
991 	case ixgbe_mac_X550EM_x:
992 	case ixgbe_mac_X550EM_a:
993 		scctx->isc_rss_table_size = 512;
994 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
995 		break;
996 	default:
997 		scctx->isc_rss_table_size = 128;
998 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
999 	}
1000 
1001 	/* Allow legacy interrupts */
1002 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1003 
1004 	scctx->isc_txqsizes[0] =
1005 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1006 	    sizeof(u32), DBA_ALIGN),
1007 	scctx->isc_rxqsizes[0] =
1008 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1009 	    DBA_ALIGN);
1010 
1011 	/* XXX */
1012 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1013 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1014 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1015 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1016 	} else {
1017 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1018 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1019 	}
1020 
1021 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1022 
1023 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1024 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1025 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1026 
1027 	scctx->isc_txrx = &ixgbe_txrx;
1028 
1029 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1030 
1031 	return (0);
1032 
1033 err_pci:
1034 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1035 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1036 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1037 	ixgbe_free_pci_resources(ctx);
1038 
1039 	return (error);
1040 } /* ixgbe_if_attach_pre */
1041 
1042  /*********************************************************************
1043  * ixgbe_if_attach_post - Device initialization routine, part 2
1044  *
1045  *   Called during driver load, but after interrupts and
1046  *   resources have been allocated and configured.
1047  *   Sets up some data structures not relevant to iflib.
1048  *
1049  *   return 0 on success, positive on failure
1050  *********************************************************************/
1051 static int
1052 ixgbe_if_attach_post(if_ctx_t ctx)
1053 {
1054 	device_t dev;
1055 	struct adapter  *adapter;
1056 	struct ixgbe_hw *hw;
1057 	int             error = 0;
1058 
1059 	dev = iflib_get_dev(ctx);
1060 	adapter = iflib_get_softc(ctx);
1061 	hw = &adapter->hw;
1062 
1063 
1064 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1065 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1066 		device_printf(dev, "Device does not support legacy interrupts");
1067 		error = ENXIO;
1068 		goto err;
1069 	}
1070 
1071 	/* Allocate multicast array memory. */
1072 	adapter->mta = malloc(sizeof(*adapter->mta) *
1073 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1074 	if (adapter->mta == NULL) {
1075 		device_printf(dev, "Can not allocate multicast setup array\n");
1076 		error = ENOMEM;
1077 		goto err;
1078 	}
1079 
1080 	/* hw.ix defaults init */
1081 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1082 
1083 	/* Enable the optics for 82599 SFP+ fiber */
1084 	ixgbe_enable_tx_laser(hw);
1085 
1086 	/* Enable power to the phy. */
1087 	ixgbe_set_phy_power(hw, TRUE);
1088 
1089 	ixgbe_initialize_iov(adapter);
1090 
1091 	error = ixgbe_setup_interface(ctx);
1092 	if (error) {
1093 		device_printf(dev, "Interface setup failed: %d\n", error);
1094 		goto err;
1095 	}
1096 
1097 	ixgbe_if_update_admin_status(ctx);
1098 
1099 	/* Initialize statistics */
1100 	ixgbe_update_stats_counters(adapter);
1101 	ixgbe_add_hw_stats(adapter);
1102 
1103 	/* Check PCIE slot type/speed/width */
1104 	ixgbe_get_slot_info(adapter);
1105 
1106 	/*
1107 	 * Do time init and sysctl init here, but
1108 	 * only on the first port of a bypass adapter.
1109 	 */
1110 	ixgbe_bypass_init(adapter);
1111 
1112 	/* Set an initial dmac value */
1113 	adapter->dmac = 0;
1114 	/* Set initial advertised speeds (if applicable) */
1115 	adapter->advertise = ixgbe_get_advertise(adapter);
1116 
1117 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1118 		ixgbe_define_iov_schemas(dev, &error);
1119 
1120 	/* Add sysctls */
1121 	ixgbe_add_device_sysctls(ctx);
1122 
1123 	return (0);
1124 err:
1125 	return (error);
1126 } /* ixgbe_if_attach_post */
1127 
1128 /************************************************************************
1129  * ixgbe_check_wol_support
1130  *
1131  *   Checks whether the adapter's ports are capable of
1132  *   Wake On LAN by reading the adapter's NVM.
1133  *
1134  *   Sets each port's hw->wol_enabled value depending
1135  *   on the value read here.
1136  ************************************************************************/
1137 static void
1138 ixgbe_check_wol_support(struct adapter *adapter)
1139 {
1140 	struct ixgbe_hw *hw = &adapter->hw;
1141 	u16             dev_caps = 0;
1142 
1143 	/* Find out WoL support for port */
1144 	adapter->wol_support = hw->wol_enabled = 0;
1145 	ixgbe_get_device_caps(hw, &dev_caps);
1146 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1147 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1148 	     hw->bus.func == 0))
1149 		adapter->wol_support = hw->wol_enabled = 1;
1150 
1151 	/* Save initial wake up filter configuration */
1152 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1153 
1154 	return;
1155 } /* ixgbe_check_wol_support */
1156 
1157 /************************************************************************
1158  * ixgbe_setup_interface
1159  *
1160  *   Setup networking device structure and register an interface.
1161  ************************************************************************/
1162 static int
1163 ixgbe_setup_interface(if_ctx_t ctx)
1164 {
1165 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1166 	struct adapter *adapter = iflib_get_softc(ctx);
1167 
1168 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1169 
1170 	if_setbaudrate(ifp, IF_Gbps(10));
1171 
1172 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1173 
1174 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1175 
1176 	ixgbe_add_media_types(ctx);
1177 
1178 	/* Autoselect media by default */
1179 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1180 
1181 	return (0);
1182 } /* ixgbe_setup_interface */
1183 
1184 /************************************************************************
1185  * ixgbe_if_get_counter
1186  ************************************************************************/
1187 static uint64_t
1188 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1189 {
1190 	struct adapter *adapter = iflib_get_softc(ctx);
1191 	if_t           ifp = iflib_get_ifp(ctx);
1192 
1193 	switch (cnt) {
1194 	case IFCOUNTER_IPACKETS:
1195 		return (adapter->ipackets);
1196 	case IFCOUNTER_OPACKETS:
1197 		return (adapter->opackets);
1198 	case IFCOUNTER_IBYTES:
1199 		return (adapter->ibytes);
1200 	case IFCOUNTER_OBYTES:
1201 		return (adapter->obytes);
1202 	case IFCOUNTER_IMCASTS:
1203 		return (adapter->imcasts);
1204 	case IFCOUNTER_OMCASTS:
1205 		return (adapter->omcasts);
1206 	case IFCOUNTER_COLLISIONS:
1207 		return (0);
1208 	case IFCOUNTER_IQDROPS:
1209 		return (adapter->iqdrops);
1210 	case IFCOUNTER_OQDROPS:
1211 		return (0);
1212 	case IFCOUNTER_IERRORS:
1213 		return (adapter->ierrors);
1214 	default:
1215 		return (if_get_counter_default(ifp, cnt));
1216 	}
1217 } /* ixgbe_if_get_counter */
1218 
1219 /************************************************************************
1220  * ixgbe_if_i2c_req
1221  ************************************************************************/
1222 static int
1223 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1224 {
1225 	struct adapter		*adapter = iflib_get_softc(ctx);
1226 	struct ixgbe_hw 	*hw = &adapter->hw;
1227 	int 			i;
1228 
1229 
1230 	if (hw->phy.ops.read_i2c_byte == NULL)
1231 		return (ENXIO);
1232 	for (i = 0; i < req->len; i++)
1233 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1234 		    req->dev_addr, &req->data[i]);
1235 	return (0);
1236 } /* ixgbe_if_i2c_req */
1237 
1238 /************************************************************************
1239  * ixgbe_add_media_types
1240  ************************************************************************/
1241 static void
1242 ixgbe_add_media_types(if_ctx_t ctx)
1243 {
1244 	struct adapter  *adapter = iflib_get_softc(ctx);
1245 	struct ixgbe_hw *hw = &adapter->hw;
1246 	device_t        dev = iflib_get_dev(ctx);
1247 	u64             layer;
1248 
1249 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1250 
1251 	/* Media types with matching FreeBSD media defines */
1252 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1253 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1254 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1255 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1256 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1257 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1258 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1259 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1260 
1261 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1262 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1263 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1264 		    NULL);
1265 
1266 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1267 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1268 		if (hw->phy.multispeed_fiber)
1269 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1270 			    NULL);
1271 	}
1272 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1273 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1274 		if (hw->phy.multispeed_fiber)
1275 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1276 			    NULL);
1277 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1278 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1279 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1280 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1281 
1282 #ifdef IFM_ETH_XTYPE
1283 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1284 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1285 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1286 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1287 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1288 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1289 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1290 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1291 #else
1292 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1293 		device_printf(dev, "Media supported: 10GbaseKR\n");
1294 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1295 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1296 	}
1297 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1298 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1299 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1300 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1301 	}
1302 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1303 		device_printf(dev, "Media supported: 1000baseKX\n");
1304 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1305 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1306 	}
1307 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1308 		device_printf(dev, "Media supported: 2500baseKX\n");
1309 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1310 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1311 	}
1312 #endif
1313 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1314 		device_printf(dev, "Media supported: 1000baseBX\n");
1315 
1316 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1317 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1318 		    0, NULL);
1319 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1320 	}
1321 
1322 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1323 } /* ixgbe_add_media_types */
1324 
1325 /************************************************************************
1326  * ixgbe_is_sfp
1327  ************************************************************************/
1328 static inline bool
1329 ixgbe_is_sfp(struct ixgbe_hw *hw)
1330 {
1331 	switch (hw->mac.type) {
1332 	case ixgbe_mac_82598EB:
1333 		if (hw->phy.type == ixgbe_phy_nl)
1334 			return (TRUE);
1335 		return (FALSE);
1336 	case ixgbe_mac_82599EB:
1337 		switch (hw->mac.ops.get_media_type(hw)) {
1338 		case ixgbe_media_type_fiber:
1339 		case ixgbe_media_type_fiber_qsfp:
1340 			return (TRUE);
1341 		default:
1342 			return (FALSE);
1343 		}
1344 	case ixgbe_mac_X550EM_x:
1345 	case ixgbe_mac_X550EM_a:
1346 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1347 			return (TRUE);
1348 		return (FALSE);
1349 	default:
1350 		return (FALSE);
1351 	}
1352 } /* ixgbe_is_sfp */
1353 
1354 /************************************************************************
1355  * ixgbe_config_link
1356  ************************************************************************/
1357 static void
1358 ixgbe_config_link(if_ctx_t ctx)
1359 {
1360 	struct adapter  *adapter = iflib_get_softc(ctx);
1361 	struct ixgbe_hw *hw = &adapter->hw;
1362 	u32             autoneg, err = 0;
1363 	bool            sfp, negotiate;
1364 
1365 	sfp = ixgbe_is_sfp(hw);
1366 
1367 	if (sfp) {
1368 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1369 		iflib_admin_intr_deferred(ctx);
1370 	} else {
1371 		if (hw->mac.ops.check_link)
1372 			err = ixgbe_check_link(hw, &adapter->link_speed,
1373 			    &adapter->link_up, FALSE);
1374 		if (err)
1375 			return;
1376 		autoneg = hw->phy.autoneg_advertised;
1377 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1378 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1379 			    &negotiate);
1380 		if (err)
1381 			return;
1382 		if (hw->mac.ops.setup_link)
1383 			err = hw->mac.ops.setup_link(hw, autoneg,
1384 			    adapter->link_up);
1385 	}
1386 } /* ixgbe_config_link */
1387 
1388 /************************************************************************
1389  * ixgbe_update_stats_counters - Update board statistics counters.
1390  ************************************************************************/
1391 static void
1392 ixgbe_update_stats_counters(struct adapter *adapter)
1393 {
1394 	struct ixgbe_hw       *hw = &adapter->hw;
1395 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1396 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1397 	u32                   lxoffrxc;
1398 	u64                   total_missed_rx = 0;
1399 
1400 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1401 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1402 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1403 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1404 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1405 
1406 	for (int i = 0; i < 16; i++) {
1407 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1408 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1409 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1410 	}
1411 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1412 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1413 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1414 
1415 	/* Hardware workaround, gprc counts missed packets */
1416 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1417 	stats->gprc -= missed_rx;
1418 
1419 	if (hw->mac.type != ixgbe_mac_82598EB) {
1420 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1421 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1422 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1423 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1424 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1425 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1426 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1427 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1428 		stats->lxoffrxc += lxoffrxc;
1429 	} else {
1430 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1431 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1432 		stats->lxoffrxc += lxoffrxc;
1433 		/* 82598 only has a counter in the high register */
1434 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1435 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1436 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1437 	}
1438 
1439 	/*
1440 	 * For watchdog management we need to know if we have been paused
1441 	 * during the last interval, so capture that here.
1442 	*/
1443 	if (lxoffrxc)
1444 		adapter->shared->isc_pause_frames = 1;
1445 
1446 	/*
1447 	 * Workaround: mprc hardware is incorrectly counting
1448 	 * broadcasts, so for now we subtract those.
1449 	 */
1450 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1451 	stats->bprc += bprc;
1452 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1453 	if (hw->mac.type == ixgbe_mac_82598EB)
1454 		stats->mprc -= bprc;
1455 
1456 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1457 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1458 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1459 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1460 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1461 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1462 
1463 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1464 	stats->lxontxc += lxon;
1465 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1466 	stats->lxofftxc += lxoff;
1467 	total = lxon + lxoff;
1468 
1469 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1470 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1471 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1472 	stats->gptc -= total;
1473 	stats->mptc -= total;
1474 	stats->ptc64 -= total;
1475 	stats->gotc -= total * ETHER_MIN_LEN;
1476 
1477 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1478 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1479 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1480 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1481 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1482 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1483 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1484 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1485 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1486 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1487 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1488 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1489 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1490 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1491 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1492 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1493 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1494 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1495 	/* Only read FCOE on 82599 */
1496 	if (hw->mac.type != ixgbe_mac_82598EB) {
1497 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1498 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1499 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1500 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1501 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1502 	}
1503 
1504 	/* Fill out the OS statistics structure */
1505 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1506 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1507 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1508 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1509 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1510 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1511 	IXGBE_SET_COLLISIONS(adapter, 0);
1512 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1513 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1514 } /* ixgbe_update_stats_counters */
1515 
1516 /************************************************************************
1517  * ixgbe_add_hw_stats
1518  *
1519  *   Add sysctl variables, one per statistic, to the system.
1520  ************************************************************************/
1521 static void
1522 ixgbe_add_hw_stats(struct adapter *adapter)
1523 {
1524 	device_t               dev = iflib_get_dev(adapter->ctx);
1525 	struct ix_rx_queue     *rx_que;
1526 	struct ix_tx_queue     *tx_que;
1527 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1528 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1529 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1530 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1531 	struct sysctl_oid      *stat_node, *queue_node;
1532 	struct sysctl_oid_list *stat_list, *queue_list;
1533 	int                    i;
1534 
1535 #define QUEUE_NAME_LEN 32
1536 	char                   namebuf[QUEUE_NAME_LEN];
1537 
1538 	/* Driver Statistics */
1539 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1540 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1541 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1542 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1543 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1544 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1545 
1546 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1547 		struct tx_ring *txr = &tx_que->txr;
1548 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1549 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1550 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1551 		queue_list = SYSCTL_CHILDREN(queue_node);
1552 
1553 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1554 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1555 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1556 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1557 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
1558 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1559 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1560 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1561 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1562 		    CTLFLAG_RD, &txr->total_packets,
1563 		    "Queue Packets Transmitted");
1564 	}
1565 
1566 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1567 		struct rx_ring *rxr = &rx_que->rxr;
1568 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1569 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1570 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1571 		queue_list = SYSCTL_CHILDREN(queue_node);
1572 
1573 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1574 		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1575 		    &adapter->rx_queues[i], 0,
1576 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1577 		    "Interrupt Rate");
1578 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1579 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1580 		    "irqs on this queue");
1581 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1582 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1583 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1584 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1585 		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
1586 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1587 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1588 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1589 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1590 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1591 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1592 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1593 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1594 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1595 	}
1596 
1597 	/* MAC stats get their own sub node */
1598 
1599 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1600 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1601 	stat_list = SYSCTL_CHILDREN(stat_node);
1602 
1603 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1604 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1605 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1606 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1607 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1608 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1609 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1610 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1611 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1612 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1613 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1614 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1615 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1616 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1617 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1618 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1619 
1620 	/* Flow Control stats */
1621 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1622 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1623 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1624 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1625 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1626 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1627 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1628 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1629 
1630 	/* Packet Reception Stats */
1631 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1632 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1633 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1634 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1635 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1636 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1637 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1638 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1639 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1640 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1641 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1642 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1643 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1644 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1645 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1646 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1648 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1650 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1652 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1654 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1656 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1657 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1658 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1660 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1662 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1664 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1665 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1666 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1667 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1668 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1669 
1670 	/* Packet Transmission Stats */
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1672 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1674 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1676 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1678 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1680 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1681 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1682 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1683 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1684 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1685 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1686 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1687 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1688 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1689 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1690 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1691 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1692 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1693 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1694 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1695 } /* ixgbe_add_hw_stats */
1696 
1697 /************************************************************************
1698  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1699  *
1700  *   Retrieves the TDH value from the hardware
1701  ************************************************************************/
1702 static int
1703 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1704 {
1705 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1706 	int            error;
1707 	unsigned int   val;
1708 
1709 	if (!txr)
1710 		return (0);
1711 
1712 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1713 	error = sysctl_handle_int(oidp, &val, 0, req);
1714 	if (error || !req->newptr)
1715 		return error;
1716 
1717 	return (0);
1718 } /* ixgbe_sysctl_tdh_handler */
1719 
1720 /************************************************************************
1721  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1722  *
1723  *   Retrieves the TDT value from the hardware
1724  ************************************************************************/
1725 static int
1726 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1727 {
1728 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1729 	int            error;
1730 	unsigned int   val;
1731 
1732 	if (!txr)
1733 		return (0);
1734 
1735 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1736 	error = sysctl_handle_int(oidp, &val, 0, req);
1737 	if (error || !req->newptr)
1738 		return error;
1739 
1740 	return (0);
1741 } /* ixgbe_sysctl_tdt_handler */
1742 
1743 /************************************************************************
1744  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1745  *
1746  *   Retrieves the RDH value from the hardware
1747  ************************************************************************/
1748 static int
1749 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1750 {
1751 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1752 	int            error;
1753 	unsigned int   val;
1754 
1755 	if (!rxr)
1756 		return (0);
1757 
1758 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1759 	error = sysctl_handle_int(oidp, &val, 0, req);
1760 	if (error || !req->newptr)
1761 		return error;
1762 
1763 	return (0);
1764 } /* ixgbe_sysctl_rdh_handler */
1765 
1766 /************************************************************************
1767  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1768  *
1769  *   Retrieves the RDT value from the hardware
1770  ************************************************************************/
1771 static int
1772 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1773 {
1774 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1775 	int            error;
1776 	unsigned int   val;
1777 
1778 	if (!rxr)
1779 		return (0);
1780 
1781 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1782 	error = sysctl_handle_int(oidp, &val, 0, req);
1783 	if (error || !req->newptr)
1784 		return error;
1785 
1786 	return (0);
1787 } /* ixgbe_sysctl_rdt_handler */
1788 
1789 /************************************************************************
1790  * ixgbe_if_vlan_register
1791  *
1792  *   Run via vlan config EVENT, it enables us to use the
1793  *   HW Filter table since we can get the vlan id. This
1794  *   just creates the entry in the soft version of the
1795  *   VFTA, init will repopulate the real table.
1796  ************************************************************************/
1797 static void
1798 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1799 {
1800 	struct adapter *adapter = iflib_get_softc(ctx);
1801 	u16            index, bit;
1802 
1803 	index = (vtag >> 5) & 0x7F;
1804 	bit = vtag & 0x1F;
1805 	adapter->shadow_vfta[index] |= (1 << bit);
1806 	++adapter->num_vlans;
1807 	ixgbe_setup_vlan_hw_support(ctx);
1808 } /* ixgbe_if_vlan_register */
1809 
1810 /************************************************************************
1811  * ixgbe_if_vlan_unregister
1812  *
1813  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1814  ************************************************************************/
1815 static void
1816 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1817 {
1818 	struct adapter *adapter = iflib_get_softc(ctx);
1819 	u16            index, bit;
1820 
1821 	index = (vtag >> 5) & 0x7F;
1822 	bit = vtag & 0x1F;
1823 	adapter->shadow_vfta[index] &= ~(1 << bit);
1824 	--adapter->num_vlans;
1825 	/* Re-init to load the changes */
1826 	ixgbe_setup_vlan_hw_support(ctx);
1827 } /* ixgbe_if_vlan_unregister */
1828 
1829 /************************************************************************
1830  * ixgbe_setup_vlan_hw_support
1831  ************************************************************************/
1832 static void
1833 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1834 {
1835 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1836 	struct adapter  *adapter = iflib_get_softc(ctx);
1837 	struct ixgbe_hw *hw = &adapter->hw;
1838 	struct rx_ring  *rxr;
1839 	int             i;
1840 	u32             ctrl;
1841 
1842 
1843 	/*
1844 	 * We get here thru init_locked, meaning
1845 	 * a soft reset, this has already cleared
1846 	 * the VFTA and other state, so if there
1847 	 * have been no vlan's registered do nothing.
1848 	 */
1849 	if (adapter->num_vlans == 0)
1850 		return;
1851 
1852 	/* Setup the queues for vlans */
1853 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1854 		for (i = 0; i < adapter->num_rx_queues; i++) {
1855 			rxr = &adapter->rx_queues[i].rxr;
1856 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1857 			if (hw->mac.type != ixgbe_mac_82598EB) {
1858 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1859 				ctrl |= IXGBE_RXDCTL_VME;
1860 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1861 			}
1862 			rxr->vtag_strip = TRUE;
1863 		}
1864 	}
1865 
1866 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1867 		return;
1868 	/*
1869 	 * A soft reset zero's out the VFTA, so
1870 	 * we need to repopulate it now.
1871 	 */
1872 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1873 		if (adapter->shadow_vfta[i] != 0)
1874 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1875 			    adapter->shadow_vfta[i]);
1876 
1877 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1878 	/* Enable the Filter Table if enabled */
1879 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1880 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1881 		ctrl |= IXGBE_VLNCTRL_VFE;
1882 	}
1883 	if (hw->mac.type == ixgbe_mac_82598EB)
1884 		ctrl |= IXGBE_VLNCTRL_VME;
1885 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1886 } /* ixgbe_setup_vlan_hw_support */
1887 
1888 /************************************************************************
1889  * ixgbe_get_slot_info
1890  *
1891  *   Get the width and transaction speed of
1892  *   the slot this adapter is plugged into.
1893  ************************************************************************/
1894 static void
1895 ixgbe_get_slot_info(struct adapter *adapter)
1896 {
1897 	device_t        dev = iflib_get_dev(adapter->ctx);
1898 	struct ixgbe_hw *hw = &adapter->hw;
1899 	int             bus_info_valid = TRUE;
1900 	u32             offset;
1901 	u16             link;
1902 
1903 	/* Some devices are behind an internal bridge */
1904 	switch (hw->device_id) {
1905 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1906 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1907 		goto get_parent_info;
1908 	default:
1909 		break;
1910 	}
1911 
1912 	ixgbe_get_bus_info(hw);
1913 
1914 	/*
1915 	 * Some devices don't use PCI-E, but there is no need
1916 	 * to display "Unknown" for bus speed and width.
1917 	 */
1918 	switch (hw->mac.type) {
1919 	case ixgbe_mac_X550EM_x:
1920 	case ixgbe_mac_X550EM_a:
1921 		return;
1922 	default:
1923 		goto display;
1924 	}
1925 
1926 get_parent_info:
1927 	/*
1928 	 * For the Quad port adapter we need to parse back
1929 	 * up the PCI tree to find the speed of the expansion
1930 	 * slot into which this adapter is plugged. A bit more work.
1931 	 */
1932 	dev = device_get_parent(device_get_parent(dev));
1933 #ifdef IXGBE_DEBUG
1934 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1935 	    pci_get_slot(dev), pci_get_function(dev));
1936 #endif
1937 	dev = device_get_parent(device_get_parent(dev));
1938 #ifdef IXGBE_DEBUG
1939 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1940 	    pci_get_slot(dev), pci_get_function(dev));
1941 #endif
1942 	/* Now get the PCI Express Capabilities offset */
1943 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1944 		/*
1945 		 * Hmm...can't get PCI-Express capabilities.
1946 		 * Falling back to default method.
1947 		 */
1948 		bus_info_valid = FALSE;
1949 		ixgbe_get_bus_info(hw);
1950 		goto display;
1951 	}
1952 	/* ...and read the Link Status Register */
1953 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1954 	ixgbe_set_pci_config_data_generic(hw, link);
1955 
1956 display:
1957 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1958 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1959 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1960 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1961 	     "Unknown"),
1962 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1963 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1964 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1965 	     "Unknown"));
1966 
1967 	if (bus_info_valid) {
1968 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1969 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1970 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1971 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1972 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1973 		}
1974 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1975 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1976 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1977 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1978 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1979 		}
1980 	} else
1981 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1982 
1983 	return;
1984 } /* ixgbe_get_slot_info */
1985 
1986 /************************************************************************
1987  * ixgbe_if_msix_intr_assign
1988  *
1989  *   Setup MSI-X Interrupt resources and handlers
1990  ************************************************************************/
1991 static int
1992 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1993 {
1994 	struct adapter     *adapter = iflib_get_softc(ctx);
1995 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1996 	struct ix_tx_queue *tx_que;
1997 	int                error, rid, vector = 0;
1998 	int                cpu_id = 0;
1999 	char               buf[16];
2000 
2001 	/* Admin Que is vector 0*/
2002 	rid = vector + 1;
2003 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2004 		rid = vector + 1;
2005 
2006 		snprintf(buf, sizeof(buf), "rxq%d", i);
2007 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2008 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2009 
2010 		if (error) {
2011 			device_printf(iflib_get_dev(ctx),
2012 			    "Failed to allocate que int %d err: %d", i, error);
2013 			adapter->num_rx_queues = i + 1;
2014 			goto fail;
2015 		}
2016 
2017 		rx_que->msix = vector;
2018 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2019 			/*
2020 			 * The queue ID is used as the RSS layer bucket ID.
2021 			 * We look up the queue ID -> RSS CPU ID and select
2022 			 * that.
2023 			 */
2024 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2025 		} else {
2026 			/*
2027 			 * Bind the MSI-X vector, and thus the
2028 			 * rings to the corresponding cpu.
2029 			 *
2030 			 * This just happens to match the default RSS
2031 			 * round-robin bucket -> queue -> CPU allocation.
2032 			 */
2033 			if (adapter->num_rx_queues > 1)
2034 				cpu_id = i;
2035 		}
2036 
2037 	}
2038 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2039 		snprintf(buf, sizeof(buf), "txq%d", i);
2040 		tx_que = &adapter->tx_queues[i];
2041 		tx_que->msix = i % adapter->num_rx_queues;
2042 		iflib_softirq_alloc_generic(ctx,
2043 		    &adapter->rx_queues[tx_que->msix].que_irq,
2044 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2045 	}
2046 	rid = vector + 1;
2047 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2048 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2049 	if (error) {
2050 		device_printf(iflib_get_dev(ctx),
2051 		    "Failed to register admin handler");
2052 		return (error);
2053 	}
2054 
2055 	adapter->vector = vector;
2056 
2057 	return (0);
2058 fail:
2059 	iflib_irq_free(ctx, &adapter->irq);
2060 	rx_que = adapter->rx_queues;
2061 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2062 		iflib_irq_free(ctx, &rx_que->que_irq);
2063 
2064 	return (error);
2065 } /* ixgbe_if_msix_intr_assign */
2066 
2067 /*********************************************************************
2068  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2069  **********************************************************************/
2070 static int
2071 ixgbe_msix_que(void *arg)
2072 {
2073 	struct ix_rx_queue *que = arg;
2074 	struct adapter     *adapter = que->adapter;
2075 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2076 
2077 	/* Protect against spurious interrupts */
2078 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2079 		return (FILTER_HANDLED);
2080 
2081 	ixgbe_disable_queue(adapter, que->msix);
2082 	++que->irqs;
2083 
2084 	return (FILTER_SCHEDULE_THREAD);
2085 } /* ixgbe_msix_que */
2086 
2087 /************************************************************************
2088  * ixgbe_media_status - Media Ioctl callback
2089  *
2090  *   Called whenever the user queries the status of
2091  *   the interface using ifconfig.
2092  ************************************************************************/
2093 static void
2094 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2095 {
2096 	struct adapter  *adapter = iflib_get_softc(ctx);
2097 	struct ixgbe_hw *hw = &adapter->hw;
2098 	int             layer;
2099 
2100 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2101 
2102 	ifmr->ifm_status = IFM_AVALID;
2103 	ifmr->ifm_active = IFM_ETHER;
2104 
2105 	if (!adapter->link_active)
2106 		return;
2107 
2108 	ifmr->ifm_status |= IFM_ACTIVE;
2109 	layer = adapter->phy_layer;
2110 
2111 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2112 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2113 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2114 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2115 		switch (adapter->link_speed) {
2116 		case IXGBE_LINK_SPEED_10GB_FULL:
2117 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2118 			break;
2119 		case IXGBE_LINK_SPEED_1GB_FULL:
2120 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2121 			break;
2122 		case IXGBE_LINK_SPEED_100_FULL:
2123 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2124 			break;
2125 		case IXGBE_LINK_SPEED_10_FULL:
2126 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2127 			break;
2128 		}
2129 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2130 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2131 		switch (adapter->link_speed) {
2132 		case IXGBE_LINK_SPEED_10GB_FULL:
2133 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2134 			break;
2135 		}
2136 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2137 		switch (adapter->link_speed) {
2138 		case IXGBE_LINK_SPEED_10GB_FULL:
2139 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2140 			break;
2141 		case IXGBE_LINK_SPEED_1GB_FULL:
2142 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2143 			break;
2144 		}
2145 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2146 		switch (adapter->link_speed) {
2147 		case IXGBE_LINK_SPEED_10GB_FULL:
2148 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2149 			break;
2150 		case IXGBE_LINK_SPEED_1GB_FULL:
2151 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2152 			break;
2153 		}
2154 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2155 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2156 		switch (adapter->link_speed) {
2157 		case IXGBE_LINK_SPEED_10GB_FULL:
2158 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2159 			break;
2160 		case IXGBE_LINK_SPEED_1GB_FULL:
2161 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2162 			break;
2163 		}
2164 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2165 		switch (adapter->link_speed) {
2166 		case IXGBE_LINK_SPEED_10GB_FULL:
2167 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2168 			break;
2169 		}
2170 	/*
2171 	 * XXX: These need to use the proper media types once
2172 	 * they're added.
2173 	 */
2174 #ifndef IFM_ETH_XTYPE
2175 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2176 		switch (adapter->link_speed) {
2177 		case IXGBE_LINK_SPEED_10GB_FULL:
2178 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2179 			break;
2180 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2181 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2182 			break;
2183 		case IXGBE_LINK_SPEED_1GB_FULL:
2184 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2185 			break;
2186 		}
2187 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2188 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2189 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2190 		switch (adapter->link_speed) {
2191 		case IXGBE_LINK_SPEED_10GB_FULL:
2192 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2193 			break;
2194 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2195 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2196 			break;
2197 		case IXGBE_LINK_SPEED_1GB_FULL:
2198 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2199 			break;
2200 		}
2201 #else
2202 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2203 		switch (adapter->link_speed) {
2204 		case IXGBE_LINK_SPEED_10GB_FULL:
2205 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2206 			break;
2207 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2208 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2209 			break;
2210 		case IXGBE_LINK_SPEED_1GB_FULL:
2211 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2212 			break;
2213 		}
2214 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2215 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2216 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2217 		switch (adapter->link_speed) {
2218 		case IXGBE_LINK_SPEED_10GB_FULL:
2219 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2220 			break;
2221 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2222 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2223 			break;
2224 		case IXGBE_LINK_SPEED_1GB_FULL:
2225 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2226 			break;
2227 		}
2228 #endif
2229 
2230 	/* If nothing is recognized... */
2231 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2232 		ifmr->ifm_active |= IFM_UNKNOWN;
2233 
2234 	/* Display current flow control setting used on link */
2235 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2236 	    hw->fc.current_mode == ixgbe_fc_full)
2237 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2238 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2239 	    hw->fc.current_mode == ixgbe_fc_full)
2240 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2241 } /* ixgbe_media_status */
2242 
2243 /************************************************************************
2244  * ixgbe_media_change - Media Ioctl callback
2245  *
2246  *   Called when the user changes speed/duplex using
2247  *   media/mediopt option with ifconfig.
2248  ************************************************************************/
2249 static int
2250 ixgbe_if_media_change(if_ctx_t ctx)
2251 {
2252 	struct adapter   *adapter = iflib_get_softc(ctx);
2253 	struct ifmedia   *ifm = iflib_get_media(ctx);
2254 	struct ixgbe_hw  *hw = &adapter->hw;
2255 	ixgbe_link_speed speed = 0;
2256 
2257 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2258 
2259 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2260 		return (EINVAL);
2261 
2262 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2263 		return (EPERM);
2264 
2265 	/*
2266 	 * We don't actually need to check against the supported
2267 	 * media types of the adapter; ifmedia will take care of
2268 	 * that for us.
2269 	 */
2270 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2271 	case IFM_AUTO:
2272 	case IFM_10G_T:
2273 		speed |= IXGBE_LINK_SPEED_100_FULL;
2274 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2275 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2276 		break;
2277 	case IFM_10G_LRM:
2278 	case IFM_10G_LR:
2279 #ifndef IFM_ETH_XTYPE
2280 	case IFM_10G_SR: /* KR, too */
2281 	case IFM_10G_CX4: /* KX4 */
2282 #else
2283 	case IFM_10G_KR:
2284 	case IFM_10G_KX4:
2285 #endif
2286 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2287 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2288 		break;
2289 #ifndef IFM_ETH_XTYPE
2290 	case IFM_1000_CX: /* KX */
2291 #else
2292 	case IFM_1000_KX:
2293 #endif
2294 	case IFM_1000_LX:
2295 	case IFM_1000_SX:
2296 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2297 		break;
2298 	case IFM_1000_T:
2299 		speed |= IXGBE_LINK_SPEED_100_FULL;
2300 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2301 		break;
2302 	case IFM_10G_TWINAX:
2303 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2304 		break;
2305 	case IFM_100_TX:
2306 		speed |= IXGBE_LINK_SPEED_100_FULL;
2307 		break;
2308 	case IFM_10_T:
2309 		speed |= IXGBE_LINK_SPEED_10_FULL;
2310 		break;
2311 	default:
2312 		goto invalid;
2313 	}
2314 
2315 	hw->mac.autotry_restart = TRUE;
2316 	hw->mac.ops.setup_link(hw, speed, TRUE);
2317 	adapter->advertise =
2318 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2319 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2320 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2321 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2322 
2323 	return (0);
2324 
2325 invalid:
2326 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2327 
2328 	return (EINVAL);
2329 } /* ixgbe_if_media_change */
2330 
2331 /************************************************************************
2332  * ixgbe_set_promisc
2333  ************************************************************************/
2334 static int
2335 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2336 {
2337 	struct adapter *adapter = iflib_get_softc(ctx);
2338 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2339 	u32            rctl;
2340 	int            mcnt = 0;
2341 
2342 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2343 	rctl &= (~IXGBE_FCTRL_UPE);
2344 	if (ifp->if_flags & IFF_ALLMULTI)
2345 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2346 	else {
2347 		mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2348 	}
2349 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2350 		rctl &= (~IXGBE_FCTRL_MPE);
2351 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2352 
2353 	if (ifp->if_flags & IFF_PROMISC) {
2354 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2355 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2356 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2357 		rctl |= IXGBE_FCTRL_MPE;
2358 		rctl &= ~IXGBE_FCTRL_UPE;
2359 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2360 	}
2361 	return (0);
2362 } /* ixgbe_if_promisc_set */
2363 
2364 /************************************************************************
2365  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2366  ************************************************************************/
2367 static int
2368 ixgbe_msix_link(void *arg)
2369 {
2370 	struct adapter  *adapter = arg;
2371 	struct ixgbe_hw *hw = &adapter->hw;
2372 	u32             eicr, eicr_mask;
2373 	s32             retval;
2374 
2375 	++adapter->link_irq;
2376 
2377 	/* Pause other interrupts */
2378 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2379 
2380 	/* First get the cause */
2381 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2382 	/* Be sure the queue bits are not cleared */
2383 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2384 	/* Clear interrupt with write */
2385 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2386 
2387 	/* Link status change */
2388 	if (eicr & IXGBE_EICR_LSC) {
2389 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2390 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2391 	}
2392 
2393 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2394 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2395 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2396 			/* This is probably overkill :) */
2397 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2398 				return (FILTER_HANDLED);
2399 			/* Disable the interrupt */
2400 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2401 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2402 		} else
2403 			if (eicr & IXGBE_EICR_ECC) {
2404 				device_printf(iflib_get_dev(adapter->ctx),
2405 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2406 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2407 			}
2408 
2409 		/* Check for over temp condition */
2410 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2411 			switch (adapter->hw.mac.type) {
2412 			case ixgbe_mac_X550EM_a:
2413 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2414 					break;
2415 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2416 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2417 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2418 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2419 				retval = hw->phy.ops.check_overtemp(hw);
2420 				if (retval != IXGBE_ERR_OVERTEMP)
2421 					break;
2422 				device_printf(iflib_get_dev(adapter->ctx),
2423 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2424 				device_printf(iflib_get_dev(adapter->ctx),
2425 				    "System shutdown required!\n");
2426 				break;
2427 			default:
2428 				if (!(eicr & IXGBE_EICR_TS))
2429 					break;
2430 				retval = hw->phy.ops.check_overtemp(hw);
2431 				if (retval != IXGBE_ERR_OVERTEMP)
2432 					break;
2433 				device_printf(iflib_get_dev(adapter->ctx),
2434 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2435 				device_printf(iflib_get_dev(adapter->ctx),
2436 				    "System shutdown required!\n");
2437 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2438 				break;
2439 			}
2440 		}
2441 
2442 		/* Check for VF message */
2443 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2444 		    (eicr & IXGBE_EICR_MAILBOX))
2445 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2446 	}
2447 
2448 	if (ixgbe_is_sfp(hw)) {
2449 		/* Pluggable optics-related interrupt */
2450 		if (hw->mac.type >= ixgbe_mac_X540)
2451 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2452 		else
2453 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2454 
2455 		if (eicr & eicr_mask) {
2456 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2457 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2458 		}
2459 
2460 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2461 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2462 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2463 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2464 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2465 		}
2466 	}
2467 
2468 	/* Check for fan failure */
2469 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2470 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2471 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2472 	}
2473 
2474 	/* External PHY interrupt */
2475 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2476 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2477 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2478 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2479 	}
2480 
2481 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2482 } /* ixgbe_msix_link */
2483 
2484 /************************************************************************
2485  * ixgbe_sysctl_interrupt_rate_handler
2486  ************************************************************************/
2487 static int
2488 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2489 {
2490 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2491 	int                error;
2492 	unsigned int       reg, usec, rate;
2493 
2494 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2495 	usec = ((reg & 0x0FF8) >> 3);
2496 	if (usec > 0)
2497 		rate = 500000 / usec;
2498 	else
2499 		rate = 0;
2500 	error = sysctl_handle_int(oidp, &rate, 0, req);
2501 	if (error || !req->newptr)
2502 		return error;
2503 	reg &= ~0xfff; /* default, no limitation */
2504 	ixgbe_max_interrupt_rate = 0;
2505 	if (rate > 0 && rate < 500000) {
2506 		if (rate < 1000)
2507 			rate = 1000;
2508 		ixgbe_max_interrupt_rate = rate;
2509 		reg |= ((4000000/rate) & 0xff8);
2510 	}
2511 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2512 
2513 	return (0);
2514 } /* ixgbe_sysctl_interrupt_rate_handler */
2515 
2516 /************************************************************************
2517  * ixgbe_add_device_sysctls
2518  ************************************************************************/
2519 static void
2520 ixgbe_add_device_sysctls(if_ctx_t ctx)
2521 {
2522 	struct adapter         *adapter = iflib_get_softc(ctx);
2523 	device_t               dev = iflib_get_dev(ctx);
2524 	struct ixgbe_hw        *hw = &adapter->hw;
2525 	struct sysctl_oid_list *child;
2526 	struct sysctl_ctx_list *ctx_list;
2527 
2528 	ctx_list = device_get_sysctl_ctx(dev);
2529 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2530 
2531 	/* Sysctls for all devices */
2532 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2533 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2534 	    adapter, 0, ixgbe_sysctl_flowcntl, "I",
2535 	    IXGBE_SYSCTL_DESC_SET_FC);
2536 
2537 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2538 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2539 	    adapter, 0, ixgbe_sysctl_advertise, "I",
2540 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2541 
2542 #ifdef IXGBE_DEBUG
2543 	/* testing sysctls (for all devices) */
2544 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2545 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2546 	    adapter, 0, ixgbe_sysctl_power_state,
2547 	    "I", "PCI Power State");
2548 
2549 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2550 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2551 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2552 #endif
2553 	/* for X550 series devices */
2554 	if (hw->mac.type >= ixgbe_mac_X550)
2555 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2556 		    CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2557 		    adapter, 0, ixgbe_sysctl_dmac,
2558 		    "I", "DMA Coalesce");
2559 
2560 	/* for WoL-capable devices */
2561 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2562 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2563 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2564 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2565 
2566 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2567 		    CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2568 		    adapter, 0, ixgbe_sysctl_wufc,
2569 		    "I", "Enable/Disable Wake Up Filters");
2570 	}
2571 
2572 	/* for X552/X557-AT devices */
2573 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2574 		struct sysctl_oid *phy_node;
2575 		struct sysctl_oid_list *phy_list;
2576 
2577 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2578 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2579 		phy_list = SYSCTL_CHILDREN(phy_node);
2580 
2581 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2582 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2583 		    adapter, 0, ixgbe_sysctl_phy_temp,
2584 		    "I", "Current External PHY Temperature (Celsius)");
2585 
2586 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2587 		    "overtemp_occurred",
2588 		    CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
2589 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2590 		    "External PHY High Temperature Event Occurred");
2591 	}
2592 
2593 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2594 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2595 		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
2596 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2597 	}
2598 } /* ixgbe_add_device_sysctls */
2599 
2600 /************************************************************************
2601  * ixgbe_allocate_pci_resources
2602  ************************************************************************/
2603 static int
2604 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2605 {
2606 	struct adapter *adapter = iflib_get_softc(ctx);
2607 	device_t        dev = iflib_get_dev(ctx);
2608 	int             rid;
2609 
2610 	rid = PCIR_BAR(0);
2611 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2612 	    RF_ACTIVE);
2613 
2614 	if (!(adapter->pci_mem)) {
2615 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2616 		return (ENXIO);
2617 	}
2618 
2619 	/* Save bus_space values for READ/WRITE_REG macros */
2620 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2621 	adapter->osdep.mem_bus_space_handle =
2622 	    rman_get_bushandle(adapter->pci_mem);
2623 	/* Set hw values for shared code */
2624 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2625 
2626 	return (0);
2627 } /* ixgbe_allocate_pci_resources */
2628 
2629 /************************************************************************
2630  * ixgbe_detach - Device removal routine
2631  *
2632  *   Called when the driver is being removed.
2633  *   Stops the adapter and deallocates all the resources
2634  *   that were allocated for driver operation.
2635  *
2636  *   return 0 on success, positive on failure
2637  ************************************************************************/
2638 static int
2639 ixgbe_if_detach(if_ctx_t ctx)
2640 {
2641 	struct adapter *adapter = iflib_get_softc(ctx);
2642 	device_t       dev = iflib_get_dev(ctx);
2643 	u32            ctrl_ext;
2644 
2645 	INIT_DEBUGOUT("ixgbe_detach: begin");
2646 
2647 	if (ixgbe_pci_iov_detach(dev) != 0) {
2648 		device_printf(dev, "SR-IOV in use; detach first.\n");
2649 		return (EBUSY);
2650 	}
2651 
2652 	ixgbe_setup_low_power_mode(ctx);
2653 
2654 	/* let hardware know driver is unloading */
2655 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2656 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2657 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2658 
2659 	ixgbe_free_pci_resources(ctx);
2660 	free(adapter->mta, M_IXGBE);
2661 
2662 	return (0);
2663 } /* ixgbe_if_detach */
2664 
2665 /************************************************************************
2666  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2667  *
2668  *   Prepare the adapter/port for LPLU and/or WoL
2669  ************************************************************************/
2670 static int
2671 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2672 {
2673 	struct adapter  *adapter = iflib_get_softc(ctx);
2674 	struct ixgbe_hw *hw = &adapter->hw;
2675 	device_t        dev = iflib_get_dev(ctx);
2676 	s32             error = 0;
2677 
2678 	if (!hw->wol_enabled)
2679 		ixgbe_set_phy_power(hw, FALSE);
2680 
2681 	/* Limit power management flow to X550EM baseT */
2682 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2683 	    hw->phy.ops.enter_lplu) {
2684 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2685 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2686 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2687 
2688 		/*
2689 		 * Clear Wake Up Status register to prevent any previous wakeup
2690 		 * events from waking us up immediately after we suspend.
2691 		 */
2692 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2693 
2694 		/*
2695 		 * Program the Wakeup Filter Control register with user filter
2696 		 * settings
2697 		 */
2698 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2699 
2700 		/* Enable wakeups and power management in Wakeup Control */
2701 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2702 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2703 
2704 		/* X550EM baseT adapters need a special LPLU flow */
2705 		hw->phy.reset_disable = TRUE;
2706 		ixgbe_if_stop(ctx);
2707 		error = hw->phy.ops.enter_lplu(hw);
2708 		if (error)
2709 			device_printf(dev, "Error entering LPLU: %d\n", error);
2710 		hw->phy.reset_disable = FALSE;
2711 	} else {
2712 		/* Just stop for other adapters */
2713 		ixgbe_if_stop(ctx);
2714 	}
2715 
2716 	return error;
2717 } /* ixgbe_setup_low_power_mode */
2718 
2719 /************************************************************************
2720  * ixgbe_shutdown - Shutdown entry point
2721  ************************************************************************/
2722 static int
2723 ixgbe_if_shutdown(if_ctx_t ctx)
2724 {
2725 	int error = 0;
2726 
2727 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2728 
2729 	error = ixgbe_setup_low_power_mode(ctx);
2730 
2731 	return (error);
2732 } /* ixgbe_if_shutdown */
2733 
2734 /************************************************************************
2735  * ixgbe_suspend
2736  *
2737  *   From D0 to D3
2738  ************************************************************************/
2739 static int
2740 ixgbe_if_suspend(if_ctx_t ctx)
2741 {
2742 	int error = 0;
2743 
2744 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2745 
2746 	error = ixgbe_setup_low_power_mode(ctx);
2747 
2748 	return (error);
2749 } /* ixgbe_if_suspend */
2750 
2751 /************************************************************************
2752  * ixgbe_resume
2753  *
2754  *   From D3 to D0
2755  ************************************************************************/
2756 static int
2757 ixgbe_if_resume(if_ctx_t ctx)
2758 {
2759 	struct adapter  *adapter = iflib_get_softc(ctx);
2760 	device_t        dev = iflib_get_dev(ctx);
2761 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2762 	struct ixgbe_hw *hw = &adapter->hw;
2763 	u32             wus;
2764 
2765 	INIT_DEBUGOUT("ixgbe_resume: begin");
2766 
2767 	/* Read & clear WUS register */
2768 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2769 	if (wus)
2770 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2771 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2772 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2773 	/* And clear WUFC until next low-power transition */
2774 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2775 
2776 	/*
2777 	 * Required after D3->D0 transition;
2778 	 * will re-advertise all previous advertised speeds
2779 	 */
2780 	if (ifp->if_flags & IFF_UP)
2781 		ixgbe_if_init(ctx);
2782 
2783 	return (0);
2784 } /* ixgbe_if_resume */
2785 
2786 /************************************************************************
2787  * ixgbe_if_mtu_set - Ioctl mtu entry point
2788  *
2789  *   Return 0 on success, EINVAL on failure
2790  ************************************************************************/
2791 static int
2792 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2793 {
2794 	struct adapter *adapter = iflib_get_softc(ctx);
2795 	int error = 0;
2796 
2797 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2798 
2799 	if (mtu > IXGBE_MAX_MTU) {
2800 		error = EINVAL;
2801 	} else {
2802 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2803 	}
2804 
2805 	return error;
2806 } /* ixgbe_if_mtu_set */
2807 
2808 /************************************************************************
2809  * ixgbe_if_crcstrip_set
2810  ************************************************************************/
2811 static void
2812 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2813 {
2814 	struct adapter *sc = iflib_get_softc(ctx);
2815 	struct ixgbe_hw *hw = &sc->hw;
2816 	/* crc stripping is set in two places:
2817 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2818 	 * IXGBE_RDRXCTL (set by the original driver in
2819 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2820 	 *	We disable the setting when netmap is compiled in).
2821 	 * We update the values here, but also in ixgbe.c because
2822 	 * init_locked sometimes is called outside our control.
2823 	 */
2824 	uint32_t hl, rxc;
2825 
2826 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2827 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2828 #ifdef NETMAP
2829 	if (netmap_verbose)
2830 		D("%s read  HLREG 0x%x rxc 0x%x",
2831 			onoff ? "enter" : "exit", hl, rxc);
2832 #endif
2833 	/* hw requirements ... */
2834 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2835 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2836 	if (onoff && !crcstrip) {
2837 		/* keep the crc. Fast rx */
2838 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2839 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2840 	} else {
2841 		/* reset default mode */
2842 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2843 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2844 	}
2845 #ifdef NETMAP
2846 	if (netmap_verbose)
2847 		D("%s write HLREG 0x%x rxc 0x%x",
2848 			onoff ? "enter" : "exit", hl, rxc);
2849 #endif
2850 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2851 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2852 } /* ixgbe_if_crcstrip_set */
2853 
2854 /*********************************************************************
2855  * ixgbe_if_init - Init entry point
2856  *
2857  *   Used in two ways: It is used by the stack as an init
2858  *   entry point in network interface structure. It is also
2859  *   used by the driver as a hw/sw initialization routine to
2860  *   get to a consistent state.
2861  *
2862  *   Return 0 on success, positive on failure
2863  **********************************************************************/
2864 void
2865 ixgbe_if_init(if_ctx_t ctx)
2866 {
2867 	struct adapter     *adapter = iflib_get_softc(ctx);
2868 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2869 	device_t           dev = iflib_get_dev(ctx);
2870 	struct ixgbe_hw *hw = &adapter->hw;
2871 	struct ix_rx_queue *rx_que;
2872 	struct ix_tx_queue *tx_que;
2873 	u32             txdctl, mhadd;
2874 	u32             rxdctl, rxctrl;
2875 	u32             ctrl_ext;
2876 
2877 	int             i, j, err;
2878 
2879 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2880 
2881 	/* Queue indices may change with IOV mode */
2882 	ixgbe_align_all_queue_indices(adapter);
2883 
2884 	/* reprogram the RAR[0] in case user changed it. */
2885 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2886 
2887 	/* Get the latest mac address, User can use a LAA */
2888 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2889 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2890 	hw->addr_ctrl.rar_used_count = 1;
2891 
2892 	ixgbe_init_hw(hw);
2893 
2894 	ixgbe_initialize_iov(adapter);
2895 
2896 	ixgbe_initialize_transmit_units(ctx);
2897 
2898 	/* Setup Multicast table */
2899 	ixgbe_if_multi_set(ctx);
2900 
2901 	/* Determine the correct mbuf pool, based on frame size */
2902 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2903 
2904 	/* Configure RX settings */
2905 	ixgbe_initialize_receive_units(ctx);
2906 
2907 	/*
2908 	 * Initialize variable holding task enqueue requests
2909 	 * from MSI-X interrupts
2910 	 */
2911 	adapter->task_requests = 0;
2912 
2913 	/* Enable SDP & MSI-X interrupts based on adapter */
2914 	ixgbe_config_gpie(adapter);
2915 
2916 	/* Set MTU size */
2917 	if (ifp->if_mtu > ETHERMTU) {
2918 		/* aka IXGBE_MAXFRS on 82599 and newer */
2919 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2920 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2921 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2922 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2923 	}
2924 
2925 	/* Now enable all the queues */
2926 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2927 		struct tx_ring *txr = &tx_que->txr;
2928 
2929 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2930 		txdctl |= IXGBE_TXDCTL_ENABLE;
2931 		/* Set WTHRESH to 8, burst writeback */
2932 		txdctl |= (8 << 16);
2933 		/*
2934 		 * When the internal queue falls below PTHRESH (32),
2935 		 * start prefetching as long as there are at least
2936 		 * HTHRESH (1) buffers ready. The values are taken
2937 		 * from the Intel linux driver 3.8.21.
2938 		 * Prefetching enables tx line rate even with 1 queue.
2939 		 */
2940 		txdctl |= (32 << 0) | (1 << 8);
2941 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2942 	}
2943 
2944 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2945 		struct rx_ring *rxr = &rx_que->rxr;
2946 
2947 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2948 		if (hw->mac.type == ixgbe_mac_82598EB) {
2949 			/*
2950 			 * PTHRESH = 21
2951 			 * HTHRESH = 4
2952 			 * WTHRESH = 8
2953 			 */
2954 			rxdctl &= ~0x3FFFFF;
2955 			rxdctl |= 0x080420;
2956 		}
2957 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2958 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2959 		for (j = 0; j < 10; j++) {
2960 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2961 			    IXGBE_RXDCTL_ENABLE)
2962 				break;
2963 			else
2964 				msec_delay(1);
2965 		}
2966 		wmb();
2967 	}
2968 
2969 	/* Enable Receive engine */
2970 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2971 	if (hw->mac.type == ixgbe_mac_82598EB)
2972 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2973 	rxctrl |= IXGBE_RXCTRL_RXEN;
2974 	ixgbe_enable_rx_dma(hw, rxctrl);
2975 
2976 	/* Set up MSI/MSI-X routing */
2977 	if (ixgbe_enable_msix)  {
2978 		ixgbe_configure_ivars(adapter);
2979 		/* Set up auto-mask */
2980 		if (hw->mac.type == ixgbe_mac_82598EB)
2981 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2982 		else {
2983 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2984 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2985 		}
2986 	} else {  /* Simple settings for Legacy/MSI */
2987 		ixgbe_set_ivar(adapter, 0, 0, 0);
2988 		ixgbe_set_ivar(adapter, 0, 0, 1);
2989 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2990 	}
2991 
2992 	ixgbe_init_fdir(adapter);
2993 
2994 	/*
2995 	 * Check on any SFP devices that
2996 	 * need to be kick-started
2997 	 */
2998 	if (hw->phy.type == ixgbe_phy_none) {
2999 		err = hw->phy.ops.identify(hw);
3000 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3001 			device_printf(dev,
3002 			    "Unsupported SFP+ module type was detected.\n");
3003 			return;
3004 		}
3005 	}
3006 
3007 	/* Set moderation on the Link interrupt */
3008 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3009 
3010 	/* Enable power to the phy. */
3011 	ixgbe_set_phy_power(hw, TRUE);
3012 
3013 	/* Config/Enable Link */
3014 	ixgbe_config_link(ctx);
3015 
3016 	/* Hardware Packet Buffer & Flow Control setup */
3017 	ixgbe_config_delay_values(adapter);
3018 
3019 	/* Initialize the FC settings */
3020 	ixgbe_start_hw(hw);
3021 
3022 	/* Set up VLAN support and filter */
3023 	ixgbe_setup_vlan_hw_support(ctx);
3024 
3025 	/* Setup DMA Coalescing */
3026 	ixgbe_config_dmac(adapter);
3027 
3028 	/* And now turn on interrupts */
3029 	ixgbe_if_enable_intr(ctx);
3030 
3031 	/* Enable the use of the MBX by the VF's */
3032 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3033 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3034 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3035 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3036 	}
3037 
3038 } /* ixgbe_init_locked */
3039 
3040 /************************************************************************
3041  * ixgbe_set_ivar
3042  *
3043  *   Setup the correct IVAR register for a particular MSI-X interrupt
3044  *     (yes this is all very magic and confusing :)
3045  *    - entry is the register array entry
3046  *    - vector is the MSI-X vector for this queue
3047  *    - type is RX/TX/MISC
3048  ************************************************************************/
3049 static void
3050 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3051 {
3052 	struct ixgbe_hw *hw = &adapter->hw;
3053 	u32 ivar, index;
3054 
3055 	vector |= IXGBE_IVAR_ALLOC_VAL;
3056 
3057 	switch (hw->mac.type) {
3058 	case ixgbe_mac_82598EB:
3059 		if (type == -1)
3060 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3061 		else
3062 			entry += (type * 64);
3063 		index = (entry >> 2) & 0x1F;
3064 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3065 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3066 		ivar |= (vector << (8 * (entry & 0x3)));
3067 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3068 		break;
3069 	case ixgbe_mac_82599EB:
3070 	case ixgbe_mac_X540:
3071 	case ixgbe_mac_X550:
3072 	case ixgbe_mac_X550EM_x:
3073 	case ixgbe_mac_X550EM_a:
3074 		if (type == -1) { /* MISC IVAR */
3075 			index = (entry & 1) * 8;
3076 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3077 			ivar &= ~(0xFF << index);
3078 			ivar |= (vector << index);
3079 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3080 		} else {          /* RX/TX IVARS */
3081 			index = (16 * (entry & 1)) + (8 * type);
3082 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3083 			ivar &= ~(0xFF << index);
3084 			ivar |= (vector << index);
3085 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3086 		}
3087 	default:
3088 		break;
3089 	}
3090 } /* ixgbe_set_ivar */
3091 
3092 /************************************************************************
3093  * ixgbe_configure_ivars
3094  ************************************************************************/
3095 static void
3096 ixgbe_configure_ivars(struct adapter *adapter)
3097 {
3098 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3099 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3100 	u32                newitr;
3101 
3102 	if (ixgbe_max_interrupt_rate > 0)
3103 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3104 	else {
3105 		/*
3106 		 * Disable DMA coalescing if interrupt moderation is
3107 		 * disabled.
3108 		 */
3109 		adapter->dmac = 0;
3110 		newitr = 0;
3111 	}
3112 
3113 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3114 		struct rx_ring *rxr = &rx_que->rxr;
3115 
3116 		/* First the RX queue entry */
3117 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3118 
3119 		/* Set an Initial EITR value */
3120 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3121 	}
3122 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3123 		struct tx_ring *txr = &tx_que->txr;
3124 
3125 		/* ... and the TX */
3126 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3127 	}
3128 	/* For the Link interrupt */
3129 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3130 } /* ixgbe_configure_ivars */
3131 
3132 /************************************************************************
3133  * ixgbe_config_gpie
3134  ************************************************************************/
3135 static void
3136 ixgbe_config_gpie(struct adapter *adapter)
3137 {
3138 	struct ixgbe_hw *hw = &adapter->hw;
3139 	u32             gpie;
3140 
3141 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3142 
3143 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3144 		/* Enable Enhanced MSI-X mode */
3145 		gpie |= IXGBE_GPIE_MSIX_MODE
3146 		     |  IXGBE_GPIE_EIAME
3147 		     |  IXGBE_GPIE_PBA_SUPPORT
3148 		     |  IXGBE_GPIE_OCD;
3149 	}
3150 
3151 	/* Fan Failure Interrupt */
3152 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3153 		gpie |= IXGBE_SDP1_GPIEN;
3154 
3155 	/* Thermal Sensor Interrupt */
3156 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3157 		gpie |= IXGBE_SDP0_GPIEN_X540;
3158 
3159 	/* Link detection */
3160 	switch (hw->mac.type) {
3161 	case ixgbe_mac_82599EB:
3162 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3163 		break;
3164 	case ixgbe_mac_X550EM_x:
3165 	case ixgbe_mac_X550EM_a:
3166 		gpie |= IXGBE_SDP0_GPIEN_X540;
3167 		break;
3168 	default:
3169 		break;
3170 	}
3171 
3172 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3173 
3174 } /* ixgbe_config_gpie */
3175 
3176 /************************************************************************
3177  * ixgbe_config_delay_values
3178  *
3179  *   Requires adapter->max_frame_size to be set.
3180  ************************************************************************/
3181 static void
3182 ixgbe_config_delay_values(struct adapter *adapter)
3183 {
3184 	struct ixgbe_hw *hw = &adapter->hw;
3185 	u32             rxpb, frame, size, tmp;
3186 
3187 	frame = adapter->max_frame_size;
3188 
3189 	/* Calculate High Water */
3190 	switch (hw->mac.type) {
3191 	case ixgbe_mac_X540:
3192 	case ixgbe_mac_X550:
3193 	case ixgbe_mac_X550EM_x:
3194 	case ixgbe_mac_X550EM_a:
3195 		tmp = IXGBE_DV_X540(frame, frame);
3196 		break;
3197 	default:
3198 		tmp = IXGBE_DV(frame, frame);
3199 		break;
3200 	}
3201 	size = IXGBE_BT2KB(tmp);
3202 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3203 	hw->fc.high_water[0] = rxpb - size;
3204 
3205 	/* Now calculate Low Water */
3206 	switch (hw->mac.type) {
3207 	case ixgbe_mac_X540:
3208 	case ixgbe_mac_X550:
3209 	case ixgbe_mac_X550EM_x:
3210 	case ixgbe_mac_X550EM_a:
3211 		tmp = IXGBE_LOW_DV_X540(frame);
3212 		break;
3213 	default:
3214 		tmp = IXGBE_LOW_DV(frame);
3215 		break;
3216 	}
3217 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3218 
3219 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3220 	hw->fc.send_xon = TRUE;
3221 } /* ixgbe_config_delay_values */
3222 
3223 /************************************************************************
3224  * ixgbe_set_multi - Multicast Update
3225  *
3226  *   Called whenever multicast address list is updated.
3227  ************************************************************************/
3228 static u_int
3229 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count)
3230 {
3231 	struct adapter *adapter = arg;
3232 	struct ixgbe_mc_addr *mta = adapter->mta;
3233 
3234 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3235 		return (0);
3236 	bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3237 	mta[count].vmdq = adapter->pool;
3238 
3239 	return (1);
3240 } /* ixgbe_mc_filter_apply */
3241 
3242 static void
3243 ixgbe_if_multi_set(if_ctx_t ctx)
3244 {
3245 	struct adapter       *adapter = iflib_get_softc(ctx);
3246 	struct ixgbe_mc_addr *mta;
3247 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3248 	u8                   *update_ptr;
3249 	u32                  fctrl;
3250 	u_int		     mcnt;
3251 
3252 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3253 
3254 	mta = adapter->mta;
3255 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3256 
3257 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3258 	    adapter);
3259 
3260 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3261 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3262 	if (ifp->if_flags & IFF_PROMISC)
3263 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3264 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3265 	    ifp->if_flags & IFF_ALLMULTI) {
3266 		fctrl |= IXGBE_FCTRL_MPE;
3267 		fctrl &= ~IXGBE_FCTRL_UPE;
3268 	} else
3269 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3270 
3271 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3272 
3273 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3274 		update_ptr = (u8 *)mta;
3275 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3276 		    ixgbe_mc_array_itr, TRUE);
3277 	}
3278 
3279 } /* ixgbe_if_multi_set */
3280 
3281 /************************************************************************
3282  * ixgbe_mc_array_itr
3283  *
3284  *   An iterator function needed by the multicast shared code.
3285  *   It feeds the shared code routine the addresses in the
3286  *   array of ixgbe_set_multi() one by one.
3287  ************************************************************************/
3288 static u8 *
3289 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3290 {
3291 	struct ixgbe_mc_addr *mta;
3292 
3293 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3294 	*vmdq = mta->vmdq;
3295 
3296 	*update_ptr = (u8*)(mta + 1);
3297 
3298 	return (mta->addr);
3299 } /* ixgbe_mc_array_itr */
3300 
3301 /************************************************************************
3302  * ixgbe_local_timer - Timer routine
3303  *
3304  *   Checks for link status, updates statistics,
3305  *   and runs the watchdog check.
3306  ************************************************************************/
3307 static void
3308 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3309 {
3310 	struct adapter *adapter = iflib_get_softc(ctx);
3311 
3312 	if (qid != 0)
3313 		return;
3314 
3315 	/* Check for pluggable optics */
3316 	if (adapter->sfp_probe)
3317 		if (!ixgbe_sfp_probe(ctx))
3318 			return; /* Nothing to do */
3319 
3320 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3321 	    &adapter->link_up, 0);
3322 
3323 	/* Fire off the adminq task */
3324 	iflib_admin_intr_deferred(ctx);
3325 
3326 } /* ixgbe_if_timer */
3327 
3328 /************************************************************************
3329  * ixgbe_sfp_probe
3330  *
3331  *   Determine if a port had optics inserted.
3332  ************************************************************************/
3333 static bool
3334 ixgbe_sfp_probe(if_ctx_t ctx)
3335 {
3336 	struct adapter  *adapter = iflib_get_softc(ctx);
3337 	struct ixgbe_hw *hw = &adapter->hw;
3338 	device_t        dev = iflib_get_dev(ctx);
3339 	bool            result = FALSE;
3340 
3341 	if ((hw->phy.type == ixgbe_phy_nl) &&
3342 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3343 		s32 ret = hw->phy.ops.identify_sfp(hw);
3344 		if (ret)
3345 			goto out;
3346 		ret = hw->phy.ops.reset(hw);
3347 		adapter->sfp_probe = FALSE;
3348 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3349 			device_printf(dev, "Unsupported SFP+ module detected!");
3350 			device_printf(dev,
3351 			    "Reload driver with supported module.\n");
3352 			goto out;
3353 		} else
3354 			device_printf(dev, "SFP+ module detected!\n");
3355 		/* We now have supported optics */
3356 		result = TRUE;
3357 	}
3358 out:
3359 
3360 	return (result);
3361 } /* ixgbe_sfp_probe */
3362 
3363 /************************************************************************
3364  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3365  ************************************************************************/
3366 static void
3367 ixgbe_handle_mod(void *context)
3368 {
3369 	if_ctx_t        ctx = context;
3370 	struct adapter  *adapter = iflib_get_softc(ctx);
3371 	struct ixgbe_hw *hw = &adapter->hw;
3372 	device_t        dev = iflib_get_dev(ctx);
3373 	u32             err, cage_full = 0;
3374 
3375 	if (adapter->hw.need_crosstalk_fix) {
3376 		switch (hw->mac.type) {
3377 		case ixgbe_mac_82599EB:
3378 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3379 			    IXGBE_ESDP_SDP2;
3380 			break;
3381 		case ixgbe_mac_X550EM_x:
3382 		case ixgbe_mac_X550EM_a:
3383 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3384 			    IXGBE_ESDP_SDP0;
3385 			break;
3386 		default:
3387 			break;
3388 		}
3389 
3390 		if (!cage_full)
3391 			goto handle_mod_out;
3392 	}
3393 
3394 	err = hw->phy.ops.identify_sfp(hw);
3395 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3396 		device_printf(dev,
3397 		    "Unsupported SFP+ module type was detected.\n");
3398 		goto handle_mod_out;
3399 	}
3400 
3401 	if (hw->mac.type == ixgbe_mac_82598EB)
3402 		err = hw->phy.ops.reset(hw);
3403 	else
3404 		err = hw->mac.ops.setup_sfp(hw);
3405 
3406 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3407 		device_printf(dev,
3408 		    "Setup failure - unsupported SFP+ module type.\n");
3409 		goto handle_mod_out;
3410 	}
3411 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3412 	return;
3413 
3414 handle_mod_out:
3415 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3416 } /* ixgbe_handle_mod */
3417 
3418 
3419 /************************************************************************
3420  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3421  ************************************************************************/
3422 static void
3423 ixgbe_handle_msf(void *context)
3424 {
3425 	if_ctx_t        ctx = context;
3426 	struct adapter  *adapter = iflib_get_softc(ctx);
3427 	struct ixgbe_hw *hw = &adapter->hw;
3428 	u32             autoneg;
3429 	bool            negotiate;
3430 
3431 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3432 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3433 
3434 	autoneg = hw->phy.autoneg_advertised;
3435 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3436 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3437 	if (hw->mac.ops.setup_link)
3438 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3439 
3440 	/* Adjust media types shown in ifconfig */
3441 	ifmedia_removeall(adapter->media);
3442 	ixgbe_add_media_types(adapter->ctx);
3443 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3444 } /* ixgbe_handle_msf */
3445 
3446 /************************************************************************
3447  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3448  ************************************************************************/
3449 static void
3450 ixgbe_handle_phy(void *context)
3451 {
3452 	if_ctx_t        ctx = context;
3453 	struct adapter  *adapter = iflib_get_softc(ctx);
3454 	struct ixgbe_hw *hw = &adapter->hw;
3455 	int             error;
3456 
3457 	error = hw->phy.ops.handle_lasi(hw);
3458 	if (error == IXGBE_ERR_OVERTEMP)
3459 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3460 	else if (error)
3461 		device_printf(adapter->dev,
3462 		    "Error handling LASI interrupt: %d\n", error);
3463 } /* ixgbe_handle_phy */
3464 
3465 /************************************************************************
3466  * ixgbe_if_stop - Stop the hardware
3467  *
3468  *   Disables all traffic on the adapter by issuing a
3469  *   global reset on the MAC and deallocates TX/RX buffers.
3470  ************************************************************************/
3471 static void
3472 ixgbe_if_stop(if_ctx_t ctx)
3473 {
3474 	struct adapter  *adapter = iflib_get_softc(ctx);
3475 	struct ixgbe_hw *hw = &adapter->hw;
3476 
3477 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3478 
3479 	ixgbe_reset_hw(hw);
3480 	hw->adapter_stopped = FALSE;
3481 	ixgbe_stop_adapter(hw);
3482 	if (hw->mac.type == ixgbe_mac_82599EB)
3483 		ixgbe_stop_mac_link_on_d3_82599(hw);
3484 	/* Turn off the laser - noop with no optics */
3485 	ixgbe_disable_tx_laser(hw);
3486 
3487 	/* Update the stack */
3488 	adapter->link_up = FALSE;
3489 	ixgbe_if_update_admin_status(ctx);
3490 
3491 	/* reprogram the RAR[0] in case user changed it. */
3492 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3493 
3494 	return;
3495 } /* ixgbe_if_stop */
3496 
3497 /************************************************************************
3498  * ixgbe_update_link_status - Update OS on link state
3499  *
3500  * Note: Only updates the OS on the cached link state.
3501  *       The real check of the hardware only happens with
3502  *       a link interrupt.
3503  ************************************************************************/
3504 static void
3505 ixgbe_if_update_admin_status(if_ctx_t ctx)
3506 {
3507 	struct adapter *adapter = iflib_get_softc(ctx);
3508 	device_t       dev = iflib_get_dev(ctx);
3509 
3510 	if (adapter->link_up) {
3511 		if (adapter->link_active == FALSE) {
3512 			if (bootverbose)
3513 				device_printf(dev, "Link is up %d Gbps %s \n",
3514 				    ((adapter->link_speed == 128) ? 10 : 1),
3515 				    "Full Duplex");
3516 			adapter->link_active = TRUE;
3517 			/* Update any Flow Control changes */
3518 			ixgbe_fc_enable(&adapter->hw);
3519 			/* Update DMA coalescing config */
3520 			ixgbe_config_dmac(adapter);
3521 			/* should actually be negotiated value */
3522 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3523 
3524 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3525 				ixgbe_ping_all_vfs(adapter);
3526 		}
3527 	} else { /* Link down */
3528 		if (adapter->link_active == TRUE) {
3529 			if (bootverbose)
3530 				device_printf(dev, "Link is Down\n");
3531 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3532 			adapter->link_active = FALSE;
3533 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3534 				ixgbe_ping_all_vfs(adapter);
3535 		}
3536 	}
3537 
3538 	/* Handle task requests from msix_link() */
3539 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3540 		ixgbe_handle_mod(ctx);
3541 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3542 		ixgbe_handle_msf(ctx);
3543 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3544 		ixgbe_handle_mbx(ctx);
3545 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3546 		ixgbe_reinit_fdir(ctx);
3547 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3548 		ixgbe_handle_phy(ctx);
3549 	adapter->task_requests = 0;
3550 
3551 	ixgbe_update_stats_counters(adapter);
3552 } /* ixgbe_if_update_admin_status */
3553 
3554 /************************************************************************
3555  * ixgbe_config_dmac - Configure DMA Coalescing
3556  ************************************************************************/
3557 static void
3558 ixgbe_config_dmac(struct adapter *adapter)
3559 {
3560 	struct ixgbe_hw          *hw = &adapter->hw;
3561 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3562 
3563 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3564 		return;
3565 
3566 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3567 	    dcfg->link_speed ^ adapter->link_speed) {
3568 		dcfg->watchdog_timer = adapter->dmac;
3569 		dcfg->fcoe_en = FALSE;
3570 		dcfg->link_speed = adapter->link_speed;
3571 		dcfg->num_tcs = 1;
3572 
3573 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3574 		    dcfg->watchdog_timer, dcfg->link_speed);
3575 
3576 		hw->mac.ops.dmac_config(hw);
3577 	}
3578 } /* ixgbe_config_dmac */
3579 
3580 /************************************************************************
3581  * ixgbe_if_enable_intr
3582  ************************************************************************/
3583 void
3584 ixgbe_if_enable_intr(if_ctx_t ctx)
3585 {
3586 	struct adapter     *adapter = iflib_get_softc(ctx);
3587 	struct ixgbe_hw    *hw = &adapter->hw;
3588 	struct ix_rx_queue *que = adapter->rx_queues;
3589 	u32                mask, fwsm;
3590 
3591 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3592 
3593 	switch (adapter->hw.mac.type) {
3594 	case ixgbe_mac_82599EB:
3595 		mask |= IXGBE_EIMS_ECC;
3596 		/* Temperature sensor on some adapters */
3597 		mask |= IXGBE_EIMS_GPI_SDP0;
3598 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3599 		mask |= IXGBE_EIMS_GPI_SDP1;
3600 		mask |= IXGBE_EIMS_GPI_SDP2;
3601 		break;
3602 	case ixgbe_mac_X540:
3603 		/* Detect if Thermal Sensor is enabled */
3604 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3605 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3606 			mask |= IXGBE_EIMS_TS;
3607 		mask |= IXGBE_EIMS_ECC;
3608 		break;
3609 	case ixgbe_mac_X550:
3610 		/* MAC thermal sensor is automatically enabled */
3611 		mask |= IXGBE_EIMS_TS;
3612 		mask |= IXGBE_EIMS_ECC;
3613 		break;
3614 	case ixgbe_mac_X550EM_x:
3615 	case ixgbe_mac_X550EM_a:
3616 		/* Some devices use SDP0 for important information */
3617 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3618 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3619 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3620 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3621 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3622 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3623 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3624 		mask |= IXGBE_EIMS_ECC;
3625 		break;
3626 	default:
3627 		break;
3628 	}
3629 
3630 	/* Enable Fan Failure detection */
3631 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3632 		mask |= IXGBE_EIMS_GPI_SDP1;
3633 	/* Enable SR-IOV */
3634 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3635 		mask |= IXGBE_EIMS_MAILBOX;
3636 	/* Enable Flow Director */
3637 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3638 		mask |= IXGBE_EIMS_FLOW_DIR;
3639 
3640 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3641 
3642 	/* With MSI-X we use auto clear */
3643 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3644 		mask = IXGBE_EIMS_ENABLE_MASK;
3645 		/* Don't autoclear Link */
3646 		mask &= ~IXGBE_EIMS_OTHER;
3647 		mask &= ~IXGBE_EIMS_LSC;
3648 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3649 			mask &= ~IXGBE_EIMS_MAILBOX;
3650 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3651 	}
3652 
3653 	/*
3654 	 * Now enable all queues, this is done separately to
3655 	 * allow for handling the extended (beyond 32) MSI-X
3656 	 * vectors that can be used by 82599
3657 	 */
3658 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3659 		ixgbe_enable_queue(adapter, que->msix);
3660 
3661 	IXGBE_WRITE_FLUSH(hw);
3662 
3663 } /* ixgbe_if_enable_intr */
3664 
3665 /************************************************************************
3666  * ixgbe_disable_intr
3667  ************************************************************************/
3668 static void
3669 ixgbe_if_disable_intr(if_ctx_t ctx)
3670 {
3671 	struct adapter *adapter = iflib_get_softc(ctx);
3672 
3673 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3674 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3675 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3676 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3677 	} else {
3678 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3679 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3680 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3681 	}
3682 	IXGBE_WRITE_FLUSH(&adapter->hw);
3683 
3684 } /* ixgbe_if_disable_intr */
3685 
3686 /************************************************************************
3687  * ixgbe_link_intr_enable
3688  ************************************************************************/
3689 static void
3690 ixgbe_link_intr_enable(if_ctx_t ctx)
3691 {
3692 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3693 
3694 	/* Re-enable other interrupts */
3695 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3696 } /* ixgbe_link_intr_enable */
3697 
3698 /************************************************************************
3699  * ixgbe_if_rx_queue_intr_enable
3700  ************************************************************************/
3701 static int
3702 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3703 {
3704 	struct adapter     *adapter = iflib_get_softc(ctx);
3705 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3706 
3707 	ixgbe_enable_queue(adapter, que->msix);
3708 
3709 	return (0);
3710 } /* ixgbe_if_rx_queue_intr_enable */
3711 
3712 /************************************************************************
3713  * ixgbe_enable_queue
3714  ************************************************************************/
3715 static void
3716 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3717 {
3718 	struct ixgbe_hw *hw = &adapter->hw;
3719 	u64             queue = 1ULL << vector;
3720 	u32             mask;
3721 
3722 	if (hw->mac.type == ixgbe_mac_82598EB) {
3723 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3724 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3725 	} else {
3726 		mask = (queue & 0xFFFFFFFF);
3727 		if (mask)
3728 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3729 		mask = (queue >> 32);
3730 		if (mask)
3731 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3732 	}
3733 } /* ixgbe_enable_queue */
3734 
3735 /************************************************************************
3736  * ixgbe_disable_queue
3737  ************************************************************************/
3738 static void
3739 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3740 {
3741 	struct ixgbe_hw *hw = &adapter->hw;
3742 	u64             queue = 1ULL << vector;
3743 	u32             mask;
3744 
3745 	if (hw->mac.type == ixgbe_mac_82598EB) {
3746 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3747 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3748 	} else {
3749 		mask = (queue & 0xFFFFFFFF);
3750 		if (mask)
3751 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3752 		mask = (queue >> 32);
3753 		if (mask)
3754 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3755 	}
3756 } /* ixgbe_disable_queue */
3757 
3758 /************************************************************************
3759  * ixgbe_intr - Legacy Interrupt Service Routine
3760  ************************************************************************/
3761 int
3762 ixgbe_intr(void *arg)
3763 {
3764 	struct adapter     *adapter = arg;
3765 	struct ix_rx_queue *que = adapter->rx_queues;
3766 	struct ixgbe_hw    *hw = &adapter->hw;
3767 	if_ctx_t           ctx = adapter->ctx;
3768 	u32                eicr, eicr_mask;
3769 
3770 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3771 
3772 	++que->irqs;
3773 	if (eicr == 0) {
3774 		ixgbe_if_enable_intr(ctx);
3775 		return (FILTER_HANDLED);
3776 	}
3777 
3778 	/* Check for fan failure */
3779 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3780 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3781 		device_printf(adapter->dev,
3782 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3783 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3784 	}
3785 
3786 	/* Link status change */
3787 	if (eicr & IXGBE_EICR_LSC) {
3788 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3789 		iflib_admin_intr_deferred(ctx);
3790 	}
3791 
3792 	if (ixgbe_is_sfp(hw)) {
3793 		/* Pluggable optics-related interrupt */
3794 		if (hw->mac.type >= ixgbe_mac_X540)
3795 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3796 		else
3797 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3798 
3799 		if (eicr & eicr_mask) {
3800 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3801 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3802 		}
3803 
3804 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3805 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3806 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3807 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3808 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3809 		}
3810 	}
3811 
3812 	/* External PHY interrupt */
3813 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3814 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3815 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3816 
3817 	return (FILTER_SCHEDULE_THREAD);
3818 } /* ixgbe_intr */
3819 
3820 /************************************************************************
3821  * ixgbe_free_pci_resources
3822  ************************************************************************/
3823 static void
3824 ixgbe_free_pci_resources(if_ctx_t ctx)
3825 {
3826 	struct adapter *adapter = iflib_get_softc(ctx);
3827 	struct         ix_rx_queue *que = adapter->rx_queues;
3828 	device_t       dev = iflib_get_dev(ctx);
3829 
3830 	/* Release all MSI-X queue resources */
3831 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3832 		iflib_irq_free(ctx, &adapter->irq);
3833 
3834 	if (que != NULL) {
3835 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3836 			iflib_irq_free(ctx, &que->que_irq);
3837 		}
3838 	}
3839 
3840 	if (adapter->pci_mem != NULL)
3841 		bus_release_resource(dev, SYS_RES_MEMORY,
3842 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3843 } /* ixgbe_free_pci_resources */
3844 
3845 /************************************************************************
3846  * ixgbe_sysctl_flowcntl
3847  *
3848  *   SYSCTL wrapper around setting Flow Control
3849  ************************************************************************/
3850 static int
3851 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3852 {
3853 	struct adapter *adapter;
3854 	int            error, fc;
3855 
3856 	adapter = (struct adapter *)arg1;
3857 	fc = adapter->hw.fc.current_mode;
3858 
3859 	error = sysctl_handle_int(oidp, &fc, 0, req);
3860 	if ((error) || (req->newptr == NULL))
3861 		return (error);
3862 
3863 	/* Don't bother if it's not changed */
3864 	if (fc == adapter->hw.fc.current_mode)
3865 		return (0);
3866 
3867 	return ixgbe_set_flowcntl(adapter, fc);
3868 } /* ixgbe_sysctl_flowcntl */
3869 
3870 /************************************************************************
3871  * ixgbe_set_flowcntl - Set flow control
3872  *
3873  *   Flow control values:
3874  *     0 - off
3875  *     1 - rx pause
3876  *     2 - tx pause
3877  *     3 - full
3878  ************************************************************************/
3879 static int
3880 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3881 {
3882 	switch (fc) {
3883 	case ixgbe_fc_rx_pause:
3884 	case ixgbe_fc_tx_pause:
3885 	case ixgbe_fc_full:
3886 		adapter->hw.fc.requested_mode = fc;
3887 		if (adapter->num_rx_queues > 1)
3888 			ixgbe_disable_rx_drop(adapter);
3889 		break;
3890 	case ixgbe_fc_none:
3891 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3892 		if (adapter->num_rx_queues > 1)
3893 			ixgbe_enable_rx_drop(adapter);
3894 		break;
3895 	default:
3896 		return (EINVAL);
3897 	}
3898 
3899 	/* Don't autoneg if forcing a value */
3900 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3901 	ixgbe_fc_enable(&adapter->hw);
3902 
3903 	return (0);
3904 } /* ixgbe_set_flowcntl */
3905 
3906 /************************************************************************
3907  * ixgbe_enable_rx_drop
3908  *
3909  *   Enable the hardware to drop packets when the buffer is
3910  *   full. This is useful with multiqueue, so that no single
3911  *   queue being full stalls the entire RX engine. We only
3912  *   enable this when Multiqueue is enabled AND Flow Control
3913  *   is disabled.
3914  ************************************************************************/
3915 static void
3916 ixgbe_enable_rx_drop(struct adapter *adapter)
3917 {
3918 	struct ixgbe_hw *hw = &adapter->hw;
3919 	struct rx_ring  *rxr;
3920 	u32             srrctl;
3921 
3922 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3923 		rxr = &adapter->rx_queues[i].rxr;
3924 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3925 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3926 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3927 	}
3928 
3929 	/* enable drop for each vf */
3930 	for (int i = 0; i < adapter->num_vfs; i++) {
3931 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3932 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3933 		                IXGBE_QDE_ENABLE));
3934 	}
3935 } /* ixgbe_enable_rx_drop */
3936 
3937 /************************************************************************
3938  * ixgbe_disable_rx_drop
3939  ************************************************************************/
3940 static void
3941 ixgbe_disable_rx_drop(struct adapter *adapter)
3942 {
3943 	struct ixgbe_hw *hw = &adapter->hw;
3944 	struct rx_ring  *rxr;
3945 	u32             srrctl;
3946 
3947 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3948 		rxr = &adapter->rx_queues[i].rxr;
3949 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3950 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3951 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3952 	}
3953 
3954 	/* disable drop for each vf */
3955 	for (int i = 0; i < adapter->num_vfs; i++) {
3956 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3957 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3958 	}
3959 } /* ixgbe_disable_rx_drop */
3960 
3961 /************************************************************************
3962  * ixgbe_sysctl_advertise
3963  *
3964  *   SYSCTL wrapper around setting advertised speed
3965  ************************************************************************/
3966 static int
3967 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3968 {
3969 	struct adapter *adapter;
3970 	int            error, advertise;
3971 
3972 	adapter = (struct adapter *)arg1;
3973 	advertise = adapter->advertise;
3974 
3975 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3976 	if ((error) || (req->newptr == NULL))
3977 		return (error);
3978 
3979 	return ixgbe_set_advertise(adapter, advertise);
3980 } /* ixgbe_sysctl_advertise */
3981 
3982 /************************************************************************
3983  * ixgbe_set_advertise - Control advertised link speed
3984  *
3985  *   Flags:
3986  *     0x1 - advertise 100 Mb
3987  *     0x2 - advertise 1G
3988  *     0x4 - advertise 10G
3989  *     0x8 - advertise 10 Mb (yes, Mb)
3990  ************************************************************************/
3991 static int
3992 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3993 {
3994 	device_t         dev = iflib_get_dev(adapter->ctx);
3995 	struct ixgbe_hw  *hw;
3996 	ixgbe_link_speed speed = 0;
3997 	ixgbe_link_speed link_caps = 0;
3998 	s32              err = IXGBE_NOT_IMPLEMENTED;
3999 	bool             negotiate = FALSE;
4000 
4001 	/* Checks to validate new value */
4002 	if (adapter->advertise == advertise) /* no change */
4003 		return (0);
4004 
4005 	hw = &adapter->hw;
4006 
4007 	/* No speed changes for backplane media */
4008 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4009 		return (ENODEV);
4010 
4011 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4012 	      (hw->phy.multispeed_fiber))) {
4013 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4014 		return (EINVAL);
4015 	}
4016 
4017 	if (advertise < 0x1 || advertise > 0xF) {
4018 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4019 		return (EINVAL);
4020 	}
4021 
4022 	if (hw->mac.ops.get_link_capabilities) {
4023 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4024 		    &negotiate);
4025 		if (err != IXGBE_SUCCESS) {
4026 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4027 			return (ENODEV);
4028 		}
4029 	}
4030 
4031 	/* Set new value and report new advertised mode */
4032 	if (advertise & 0x1) {
4033 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4034 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4035 			return (EINVAL);
4036 		}
4037 		speed |= IXGBE_LINK_SPEED_100_FULL;
4038 	}
4039 	if (advertise & 0x2) {
4040 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4041 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4042 			return (EINVAL);
4043 		}
4044 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4045 	}
4046 	if (advertise & 0x4) {
4047 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4048 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4049 			return (EINVAL);
4050 		}
4051 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4052 	}
4053 	if (advertise & 0x8) {
4054 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4055 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4056 			return (EINVAL);
4057 		}
4058 		speed |= IXGBE_LINK_SPEED_10_FULL;
4059 	}
4060 
4061 	hw->mac.autotry_restart = TRUE;
4062 	hw->mac.ops.setup_link(hw, speed, TRUE);
4063 	adapter->advertise = advertise;
4064 
4065 	return (0);
4066 } /* ixgbe_set_advertise */
4067 
4068 /************************************************************************
4069  * ixgbe_get_advertise - Get current advertised speed settings
4070  *
4071  *   Formatted for sysctl usage.
4072  *   Flags:
4073  *     0x1 - advertise 100 Mb
4074  *     0x2 - advertise 1G
4075  *     0x4 - advertise 10G
4076  *     0x8 - advertise 10 Mb (yes, Mb)
4077  ************************************************************************/
4078 static int
4079 ixgbe_get_advertise(struct adapter *adapter)
4080 {
4081 	struct ixgbe_hw  *hw = &adapter->hw;
4082 	int              speed;
4083 	ixgbe_link_speed link_caps = 0;
4084 	s32              err;
4085 	bool             negotiate = FALSE;
4086 
4087 	/*
4088 	 * Advertised speed means nothing unless it's copper or
4089 	 * multi-speed fiber
4090 	 */
4091 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4092 	    !(hw->phy.multispeed_fiber))
4093 		return (0);
4094 
4095 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4096 	if (err != IXGBE_SUCCESS)
4097 		return (0);
4098 
4099 	speed =
4100 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4101 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4102 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4103 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4104 
4105 	return speed;
4106 } /* ixgbe_get_advertise */
4107 
4108 /************************************************************************
4109  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4110  *
4111  *   Control values:
4112  *     0/1 - off / on (use default value of 1000)
4113  *
4114  *     Legal timer values are:
4115  *     50,100,250,500,1000,2000,5000,10000
4116  *
4117  *     Turning off interrupt moderation will also turn this off.
4118  ************************************************************************/
4119 static int
4120 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4121 {
4122 	struct adapter *adapter = (struct adapter *)arg1;
4123 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4124 	int            error;
4125 	u16            newval;
4126 
4127 	newval = adapter->dmac;
4128 	error = sysctl_handle_16(oidp, &newval, 0, req);
4129 	if ((error) || (req->newptr == NULL))
4130 		return (error);
4131 
4132 	switch (newval) {
4133 	case 0:
4134 		/* Disabled */
4135 		adapter->dmac = 0;
4136 		break;
4137 	case 1:
4138 		/* Enable and use default */
4139 		adapter->dmac = 1000;
4140 		break;
4141 	case 50:
4142 	case 100:
4143 	case 250:
4144 	case 500:
4145 	case 1000:
4146 	case 2000:
4147 	case 5000:
4148 	case 10000:
4149 		/* Legal values - allow */
4150 		adapter->dmac = newval;
4151 		break;
4152 	default:
4153 		/* Do nothing, illegal value */
4154 		return (EINVAL);
4155 	}
4156 
4157 	/* Re-initialize hardware if it's already running */
4158 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4159 		ifp->if_init(ifp);
4160 
4161 	return (0);
4162 } /* ixgbe_sysctl_dmac */
4163 
4164 #ifdef IXGBE_DEBUG
4165 /************************************************************************
4166  * ixgbe_sysctl_power_state
4167  *
4168  *   Sysctl to test power states
4169  *   Values:
4170  *     0      - set device to D0
4171  *     3      - set device to D3
4172  *     (none) - get current device power state
4173  ************************************************************************/
4174 static int
4175 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4176 {
4177 	struct adapter *adapter = (struct adapter *)arg1;
4178 	device_t       dev = adapter->dev;
4179 	int            curr_ps, new_ps, error = 0;
4180 
4181 	curr_ps = new_ps = pci_get_powerstate(dev);
4182 
4183 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4184 	if ((error) || (req->newptr == NULL))
4185 		return (error);
4186 
4187 	if (new_ps == curr_ps)
4188 		return (0);
4189 
4190 	if (new_ps == 3 && curr_ps == 0)
4191 		error = DEVICE_SUSPEND(dev);
4192 	else if (new_ps == 0 && curr_ps == 3)
4193 		error = DEVICE_RESUME(dev);
4194 	else
4195 		return (EINVAL);
4196 
4197 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4198 
4199 	return (error);
4200 } /* ixgbe_sysctl_power_state */
4201 #endif
4202 
4203 /************************************************************************
4204  * ixgbe_sysctl_wol_enable
4205  *
4206  *   Sysctl to enable/disable the WoL capability,
4207  *   if supported by the adapter.
4208  *
4209  *   Values:
4210  *     0 - disabled
4211  *     1 - enabled
4212  ************************************************************************/
4213 static int
4214 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4215 {
4216 	struct adapter  *adapter = (struct adapter *)arg1;
4217 	struct ixgbe_hw *hw = &adapter->hw;
4218 	int             new_wol_enabled;
4219 	int             error = 0;
4220 
4221 	new_wol_enabled = hw->wol_enabled;
4222 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4223 	if ((error) || (req->newptr == NULL))
4224 		return (error);
4225 	new_wol_enabled = !!(new_wol_enabled);
4226 	if (new_wol_enabled == hw->wol_enabled)
4227 		return (0);
4228 
4229 	if (new_wol_enabled > 0 && !adapter->wol_support)
4230 		return (ENODEV);
4231 	else
4232 		hw->wol_enabled = new_wol_enabled;
4233 
4234 	return (0);
4235 } /* ixgbe_sysctl_wol_enable */
4236 
4237 /************************************************************************
4238  * ixgbe_sysctl_wufc - Wake Up Filter Control
4239  *
4240  *   Sysctl to enable/disable the types of packets that the
4241  *   adapter will wake up on upon receipt.
4242  *   Flags:
4243  *     0x1  - Link Status Change
4244  *     0x2  - Magic Packet
4245  *     0x4  - Direct Exact
4246  *     0x8  - Directed Multicast
4247  *     0x10 - Broadcast
4248  *     0x20 - ARP/IPv4 Request Packet
4249  *     0x40 - Direct IPv4 Packet
4250  *     0x80 - Direct IPv6 Packet
4251  *
4252  *   Settings not listed above will cause the sysctl to return an error.
4253  ************************************************************************/
4254 static int
4255 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4256 {
4257 	struct adapter *adapter = (struct adapter *)arg1;
4258 	int            error = 0;
4259 	u32            new_wufc;
4260 
4261 	new_wufc = adapter->wufc;
4262 
4263 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4264 	if ((error) || (req->newptr == NULL))
4265 		return (error);
4266 	if (new_wufc == adapter->wufc)
4267 		return (0);
4268 
4269 	if (new_wufc & 0xffffff00)
4270 		return (EINVAL);
4271 
4272 	new_wufc &= 0xff;
4273 	new_wufc |= (0xffffff & adapter->wufc);
4274 	adapter->wufc = new_wufc;
4275 
4276 	return (0);
4277 } /* ixgbe_sysctl_wufc */
4278 
4279 #ifdef IXGBE_DEBUG
4280 /************************************************************************
4281  * ixgbe_sysctl_print_rss_config
4282  ************************************************************************/
4283 static int
4284 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4285 {
4286 	struct adapter  *adapter = (struct adapter *)arg1;
4287 	struct ixgbe_hw *hw = &adapter->hw;
4288 	device_t        dev = adapter->dev;
4289 	struct sbuf     *buf;
4290 	int             error = 0, reta_size;
4291 	u32             reg;
4292 
4293 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4294 	if (!buf) {
4295 		device_printf(dev, "Could not allocate sbuf for output.\n");
4296 		return (ENOMEM);
4297 	}
4298 
4299 	// TODO: use sbufs to make a string to print out
4300 	/* Set multiplier for RETA setup and table size based on MAC */
4301 	switch (adapter->hw.mac.type) {
4302 	case ixgbe_mac_X550:
4303 	case ixgbe_mac_X550EM_x:
4304 	case ixgbe_mac_X550EM_a:
4305 		reta_size = 128;
4306 		break;
4307 	default:
4308 		reta_size = 32;
4309 		break;
4310 	}
4311 
4312 	/* Print out the redirection table */
4313 	sbuf_cat(buf, "\n");
4314 	for (int i = 0; i < reta_size; i++) {
4315 		if (i < 32) {
4316 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4317 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4318 		} else {
4319 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4320 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4321 		}
4322 	}
4323 
4324 	// TODO: print more config
4325 
4326 	error = sbuf_finish(buf);
4327 	if (error)
4328 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4329 
4330 	sbuf_delete(buf);
4331 
4332 	return (0);
4333 } /* ixgbe_sysctl_print_rss_config */
4334 #endif /* IXGBE_DEBUG */
4335 
4336 /************************************************************************
4337  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4338  *
4339  *   For X552/X557-AT devices using an external PHY
4340  ************************************************************************/
4341 static int
4342 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4343 {
4344 	struct adapter  *adapter = (struct adapter *)arg1;
4345 	struct ixgbe_hw *hw = &adapter->hw;
4346 	u16             reg;
4347 
4348 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4349 		device_printf(iflib_get_dev(adapter->ctx),
4350 		    "Device has no supported external thermal sensor.\n");
4351 		return (ENODEV);
4352 	}
4353 
4354 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4355 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4356 		device_printf(iflib_get_dev(adapter->ctx),
4357 		    "Error reading from PHY's current temperature register\n");
4358 		return (EAGAIN);
4359 	}
4360 
4361 	/* Shift temp for output */
4362 	reg = reg >> 8;
4363 
4364 	return (sysctl_handle_16(oidp, NULL, reg, req));
4365 } /* ixgbe_sysctl_phy_temp */
4366 
4367 /************************************************************************
4368  * ixgbe_sysctl_phy_overtemp_occurred
4369  *
4370  *   Reports (directly from the PHY) whether the current PHY
4371  *   temperature is over the overtemp threshold.
4372  ************************************************************************/
4373 static int
4374 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4375 {
4376 	struct adapter  *adapter = (struct adapter *)arg1;
4377 	struct ixgbe_hw *hw = &adapter->hw;
4378 	u16             reg;
4379 
4380 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4381 		device_printf(iflib_get_dev(adapter->ctx),
4382 		    "Device has no supported external thermal sensor.\n");
4383 		return (ENODEV);
4384 	}
4385 
4386 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4387 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4388 		device_printf(iflib_get_dev(adapter->ctx),
4389 		    "Error reading from PHY's temperature status register\n");
4390 		return (EAGAIN);
4391 	}
4392 
4393 	/* Get occurrence bit */
4394 	reg = !!(reg & 0x4000);
4395 
4396 	return (sysctl_handle_16(oidp, 0, reg, req));
4397 } /* ixgbe_sysctl_phy_overtemp_occurred */
4398 
4399 /************************************************************************
4400  * ixgbe_sysctl_eee_state
4401  *
4402  *   Sysctl to set EEE power saving feature
4403  *   Values:
4404  *     0      - disable EEE
4405  *     1      - enable EEE
4406  *     (none) - get current device EEE state
4407  ************************************************************************/
4408 static int
4409 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4410 {
4411 	struct adapter *adapter = (struct adapter *)arg1;
4412 	device_t       dev = adapter->dev;
4413 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4414 	int            curr_eee, new_eee, error = 0;
4415 	s32            retval;
4416 
4417 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4418 
4419 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4420 	if ((error) || (req->newptr == NULL))
4421 		return (error);
4422 
4423 	/* Nothing to do */
4424 	if (new_eee == curr_eee)
4425 		return (0);
4426 
4427 	/* Not supported */
4428 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4429 		return (EINVAL);
4430 
4431 	/* Bounds checking */
4432 	if ((new_eee < 0) || (new_eee > 1))
4433 		return (EINVAL);
4434 
4435 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4436 	if (retval) {
4437 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4438 		return (EINVAL);
4439 	}
4440 
4441 	/* Restart auto-neg */
4442 	ifp->if_init(ifp);
4443 
4444 	device_printf(dev, "New EEE state: %d\n", new_eee);
4445 
4446 	/* Cache new value */
4447 	if (new_eee)
4448 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4449 	else
4450 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4451 
4452 	return (error);
4453 } /* ixgbe_sysctl_eee_state */
4454 
4455 /************************************************************************
4456  * ixgbe_init_device_features
4457  ************************************************************************/
4458 static void
4459 ixgbe_init_device_features(struct adapter *adapter)
4460 {
4461 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4462 	                  | IXGBE_FEATURE_RSS
4463 	                  | IXGBE_FEATURE_MSI
4464 	                  | IXGBE_FEATURE_MSIX
4465 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4466 
4467 	/* Set capabilities first... */
4468 	switch (adapter->hw.mac.type) {
4469 	case ixgbe_mac_82598EB:
4470 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4471 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4472 		break;
4473 	case ixgbe_mac_X540:
4474 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4475 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4476 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4477 		    (adapter->hw.bus.func == 0))
4478 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4479 		break;
4480 	case ixgbe_mac_X550:
4481 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4482 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4483 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4484 		break;
4485 	case ixgbe_mac_X550EM_x:
4486 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4487 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4488 		break;
4489 	case ixgbe_mac_X550EM_a:
4490 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4491 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4492 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4493 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4494 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4495 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4496 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4497 		}
4498 		break;
4499 	case ixgbe_mac_82599EB:
4500 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4501 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4502 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4503 		    (adapter->hw.bus.func == 0))
4504 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4505 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4506 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4507 		break;
4508 	default:
4509 		break;
4510 	}
4511 
4512 	/* Enabled by default... */
4513 	/* Fan failure detection */
4514 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4515 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4516 	/* Netmap */
4517 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4518 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4519 	/* EEE */
4520 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4521 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4522 	/* Thermal Sensor */
4523 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4524 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4525 
4526 	/* Enabled via global sysctl... */
4527 	/* Flow Director */
4528 	if (ixgbe_enable_fdir) {
4529 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4530 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4531 		else
4532 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4533 	}
4534 	/*
4535 	 * Message Signal Interrupts - Extended (MSI-X)
4536 	 * Normal MSI is only enabled if MSI-X calls fail.
4537 	 */
4538 	if (!ixgbe_enable_msix)
4539 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4540 	/* Receive-Side Scaling (RSS) */
4541 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4542 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4543 
4544 	/* Disable features with unmet dependencies... */
4545 	/* No MSI-X */
4546 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4547 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4548 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4549 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4550 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4551 	}
4552 } /* ixgbe_init_device_features */
4553 
4554 /************************************************************************
4555  * ixgbe_check_fan_failure
4556  ************************************************************************/
4557 static void
4558 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4559 {
4560 	u32 mask;
4561 
4562 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4563 	    IXGBE_ESDP_SDP1;
4564 
4565 	if (reg & mask)
4566 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4567 } /* ixgbe_check_fan_failure */
4568