xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision f2530c80db7b29b95368fce956b3a778f096b368)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static void ixgbe_link_intr_enable(if_ctx_t ctx);
124 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
125 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
126 static int  ixgbe_if_media_change(if_ctx_t ctx);
127 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
128 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
129 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
130 static void ixgbe_if_multi_set(if_ctx_t ctx);
131 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
132 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
135                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
136 static void ixgbe_if_queues_free(if_ctx_t ctx);
137 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
138 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
139 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
140 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
141 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
142 int ixgbe_intr(void *arg);
143 
144 /************************************************************************
145  * Function prototypes
146  ************************************************************************/
147 #if __FreeBSD_version >= 1100036
148 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
149 #endif
150 
151 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
153 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
154 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
155 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
156 
157 static void ixgbe_config_dmac(struct adapter *adapter);
158 static void ixgbe_configure_ivars(struct adapter *adapter);
159 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
160                            s8 type);
161 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162 static bool ixgbe_sfp_probe(if_ctx_t ctx);
163 
164 static void ixgbe_free_pci_resources(if_ctx_t ctx);
165 
166 static int  ixgbe_msix_link(void *arg);
167 static int  ixgbe_msix_que(void *arg);
168 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
169 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
170 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
171 
172 static int  ixgbe_setup_interface(if_ctx_t ctx);
173 static void ixgbe_init_device_features(struct adapter *adapter);
174 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
175 static void ixgbe_add_media_types(if_ctx_t ctx);
176 static void ixgbe_update_stats_counters(struct adapter *adapter);
177 static void ixgbe_config_link(if_ctx_t ctx);
178 static void ixgbe_get_slot_info(struct adapter *);
179 static void ixgbe_check_wol_support(struct adapter *adapter);
180 static void ixgbe_enable_rx_drop(struct adapter *);
181 static void ixgbe_disable_rx_drop(struct adapter *);
182 
183 static void ixgbe_add_hw_stats(struct adapter *adapter);
184 static int  ixgbe_set_flowcntl(struct adapter *, int);
185 static int  ixgbe_set_advertise(struct adapter *, int);
186 static int  ixgbe_get_advertise(struct adapter *);
187 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
188 static void ixgbe_config_gpie(struct adapter *adapter);
189 static void ixgbe_config_delay_values(struct adapter *adapter);
190 
191 /* Sysctl handlers */
192 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
197 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
198 #ifdef IXGBE_DEBUG
199 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
200 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
201 #endif
202 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
208 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
209 
210 /* Deferred interrupt tasklets */
211 static void ixgbe_handle_msf(void *);
212 static void ixgbe_handle_mod(void *);
213 static void ixgbe_handle_phy(void *);
214 
215 /************************************************************************
216  *  FreeBSD Device Interface Entry Points
217  ************************************************************************/
218 static device_method_t ix_methods[] = {
219 	/* Device interface */
220 	DEVMETHOD(device_register, ixgbe_register),
221 	DEVMETHOD(device_probe, iflib_device_probe),
222 	DEVMETHOD(device_attach, iflib_device_attach),
223 	DEVMETHOD(device_detach, iflib_device_detach),
224 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
225 	DEVMETHOD(device_suspend, iflib_device_suspend),
226 	DEVMETHOD(device_resume, iflib_device_resume),
227 #ifdef PCI_IOV
228 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
229 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
230 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
231 #endif /* PCI_IOV */
232 	DEVMETHOD_END
233 };
234 
235 static driver_t ix_driver = {
236 	"ix", ix_methods, sizeof(struct adapter),
237 };
238 
239 devclass_t ix_devclass;
240 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
241 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
242 MODULE_DEPEND(ix, pci, 1, 1, 1);
243 MODULE_DEPEND(ix, ether, 1, 1, 1);
244 MODULE_DEPEND(ix, iflib, 1, 1, 1);
245 
246 static device_method_t ixgbe_if_methods[] = {
247 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
248 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
249 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
250 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
251 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
252 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
253 	DEVMETHOD(ifdi_init, ixgbe_if_init),
254 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
255 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
256 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
257 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
258 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
259 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
260 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
261 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
262 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
263 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
264 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
265 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
266 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
267 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
268 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
269 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
270 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
271 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
272 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
273 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
274 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
275 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
276 #ifdef PCI_IOV
277 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
278 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
279 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
280 #endif /* PCI_IOV */
281 	DEVMETHOD_END
282 };
283 
284 /*
285  * TUNEABLE PARAMETERS:
286  */
287 
288 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
289 static driver_t ixgbe_if_driver = {
290   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
291 };
292 
293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
295     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
296 
297 /* Flow control setting, default to full */
298 static int ixgbe_flow_control = ixgbe_fc_full;
299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
300     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
301 
302 /* Advertise Speed, default to 0 (auto) */
303 static int ixgbe_advertise_speed = 0;
304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
305     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306 
307 /*
308  * Smart speed setting, default to on
309  * this only works as a compile option
310  * right now as its during attach, set
311  * this to 'ixgbe_smart_speed_off' to
312  * disable.
313  */
314 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315 
316 /*
317  * MSI-X should be the default for best performance,
318  * but this allows it to be forced off for testing.
319  */
320 static int ixgbe_enable_msix = 1;
321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
322     "Enable MSI-X interrupts");
323 
324 /*
325  * Defining this on will allow the use
326  * of unsupported SFP+ modules, note that
327  * doing so you are on your own :)
328  */
329 static int allow_unsupported_sfp = FALSE;
330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
331     &allow_unsupported_sfp, 0,
332     "Allow unsupported SFP modules...use at your own risk");
333 
334 /*
335  * Not sure if Flow Director is fully baked,
336  * so we'll default to turning it off.
337  */
338 static int ixgbe_enable_fdir = 0;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
340     "Enable Flow Director");
341 
342 /* Receive-Side Scaling */
343 static int ixgbe_enable_rss = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
345     "Enable Receive-Side Scaling (RSS)");
346 
347 #if 0
348 /* Keep running tab on them for sanity check */
349 static int ixgbe_total_ports;
350 #endif
351 
352 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
353 
354 /*
355  * For Flow Director: this is the number of TX packets we sample
356  * for the filter pool, this means every 20th packet will be probed.
357  *
358  * This feature can be disabled by setting this to 0.
359  */
360 static int atr_sample_rate = 20;
361 
362 extern struct if_txrx ixgbe_txrx;
363 
364 static struct if_shared_ctx ixgbe_sctx_init = {
365 	.isc_magic = IFLIB_MAGIC,
366 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
367 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 	.isc_tx_maxsegsize = PAGE_SIZE,
369 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
370 	.isc_tso_maxsegsize = PAGE_SIZE,
371 	.isc_rx_maxsize = PAGE_SIZE*4,
372 	.isc_rx_nsegments = 1,
373 	.isc_rx_maxsegsize = PAGE_SIZE*4,
374 	.isc_nfl = 1,
375 	.isc_ntxqs = 1,
376 	.isc_nrxqs = 1,
377 
378 	.isc_admin_intrcnt = 1,
379 	.isc_vendor_info = ixgbe_vendor_info_array,
380 	.isc_driver_version = ixgbe_driver_version,
381 	.isc_driver = &ixgbe_if_driver,
382 	.isc_flags = IFLIB_TSO_INIT_IP,
383 
384 	.isc_nrxd_min = {MIN_RXD},
385 	.isc_ntxd_min = {MIN_TXD},
386 	.isc_nrxd_max = {MAX_RXD},
387 	.isc_ntxd_max = {MAX_TXD},
388 	.isc_nrxd_default = {DEFAULT_RXD},
389 	.isc_ntxd_default = {DEFAULT_TXD},
390 };
391 
392 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
393 
394 /************************************************************************
395  * ixgbe_if_tx_queues_alloc
396  ************************************************************************/
397 static int
398 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
399                          int ntxqs, int ntxqsets)
400 {
401 	struct adapter     *adapter = iflib_get_softc(ctx);
402 	if_softc_ctx_t     scctx = adapter->shared;
403 	struct ix_tx_queue *que;
404 	int                i, j, error;
405 
406 	MPASS(adapter->num_tx_queues > 0);
407 	MPASS(adapter->num_tx_queues == ntxqsets);
408 	MPASS(ntxqs == 1);
409 
410 	/* Allocate queue structure memory */
411 	adapter->tx_queues =
412 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
413 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
414 	if (!adapter->tx_queues) {
415 		device_printf(iflib_get_dev(ctx),
416 		    "Unable to allocate TX ring memory\n");
417 		return (ENOMEM);
418 	}
419 
420 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
421 		struct tx_ring *txr = &que->txr;
422 
423 		/* In case SR-IOV is enabled, align the index properly */
424 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
425 		    i);
426 
427 		txr->adapter = que->adapter = adapter;
428 
429 		/* Allocate report status array */
430 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
431 		if (txr->tx_rsq == NULL) {
432 			error = ENOMEM;
433 			goto fail;
434 		}
435 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
436 			txr->tx_rsq[j] = QIDX_INVALID;
437 		/* get the virtual and physical address of the hardware queues */
438 		txr->tail = IXGBE_TDT(txr->me);
439 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
440 		txr->tx_paddr = paddrs[i];
441 
442 		txr->bytes = 0;
443 		txr->total_packets = 0;
444 
445 		/* Set the rate at which we sample packets */
446 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
447 			txr->atr_sample = atr_sample_rate;
448 
449 	}
450 
451 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
452 	    adapter->num_tx_queues);
453 
454 	return (0);
455 
456 fail:
457 	ixgbe_if_queues_free(ctx);
458 
459 	return (error);
460 } /* ixgbe_if_tx_queues_alloc */
461 
462 /************************************************************************
463  * ixgbe_if_rx_queues_alloc
464  ************************************************************************/
465 static int
466 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
467                          int nrxqs, int nrxqsets)
468 {
469 	struct adapter     *adapter = iflib_get_softc(ctx);
470 	struct ix_rx_queue *que;
471 	int                i;
472 
473 	MPASS(adapter->num_rx_queues > 0);
474 	MPASS(adapter->num_rx_queues == nrxqsets);
475 	MPASS(nrxqs == 1);
476 
477 	/* Allocate queue structure memory */
478 	adapter->rx_queues =
479 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
480 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
481 	if (!adapter->rx_queues) {
482 		device_printf(iflib_get_dev(ctx),
483 		    "Unable to allocate TX ring memory\n");
484 		return (ENOMEM);
485 	}
486 
487 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
488 		struct rx_ring *rxr = &que->rxr;
489 
490 		/* In case SR-IOV is enabled, align the index properly */
491 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
492 		    i);
493 
494 		rxr->adapter = que->adapter = adapter;
495 
496 		/* get the virtual and physical address of the hw queues */
497 		rxr->tail = IXGBE_RDT(rxr->me);
498 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
499 		rxr->rx_paddr = paddrs[i];
500 		rxr->bytes = 0;
501 		rxr->que = que;
502 	}
503 
504 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
505 	    adapter->num_rx_queues);
506 
507 	return (0);
508 } /* ixgbe_if_rx_queues_alloc */
509 
510 /************************************************************************
511  * ixgbe_if_queues_free
512  ************************************************************************/
513 static void
514 ixgbe_if_queues_free(if_ctx_t ctx)
515 {
516 	struct adapter     *adapter = iflib_get_softc(ctx);
517 	struct ix_tx_queue *tx_que = adapter->tx_queues;
518 	struct ix_rx_queue *rx_que = adapter->rx_queues;
519 	int                i;
520 
521 	if (tx_que != NULL) {
522 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
523 			struct tx_ring *txr = &tx_que->txr;
524 			if (txr->tx_rsq == NULL)
525 				break;
526 
527 			free(txr->tx_rsq, M_IXGBE);
528 			txr->tx_rsq = NULL;
529 		}
530 
531 		free(adapter->tx_queues, M_IXGBE);
532 		adapter->tx_queues = NULL;
533 	}
534 	if (rx_que != NULL) {
535 		free(adapter->rx_queues, M_IXGBE);
536 		adapter->rx_queues = NULL;
537 	}
538 } /* ixgbe_if_queues_free */
539 
540 /************************************************************************
541  * ixgbe_initialize_rss_mapping
542  ************************************************************************/
543 static void
544 ixgbe_initialize_rss_mapping(struct adapter *adapter)
545 {
546 	struct ixgbe_hw *hw = &adapter->hw;
547 	u32             reta = 0, mrqc, rss_key[10];
548 	int             queue_id, table_size, index_mult;
549 	int             i, j;
550 	u32             rss_hash_config;
551 
552 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
553 		/* Fetch the configured RSS key */
554 		rss_getkey((uint8_t *)&rss_key);
555 	} else {
556 		/* set up random bits */
557 		arc4rand(&rss_key, sizeof(rss_key), 0);
558 	}
559 
560 	/* Set multiplier for RETA setup and table size based on MAC */
561 	index_mult = 0x1;
562 	table_size = 128;
563 	switch (adapter->hw.mac.type) {
564 	case ixgbe_mac_82598EB:
565 		index_mult = 0x11;
566 		break;
567 	case ixgbe_mac_X550:
568 	case ixgbe_mac_X550EM_x:
569 	case ixgbe_mac_X550EM_a:
570 		table_size = 512;
571 		break;
572 	default:
573 		break;
574 	}
575 
576 	/* Set up the redirection table */
577 	for (i = 0, j = 0; i < table_size; i++, j++) {
578 		if (j == adapter->num_rx_queues)
579 			j = 0;
580 
581 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
582 			/*
583 			 * Fetch the RSS bucket id for the given indirection
584 			 * entry. Cap it at the number of configured buckets
585 			 * (which is num_rx_queues.)
586 			 */
587 			queue_id = rss_get_indirection_to_bucket(i);
588 			queue_id = queue_id % adapter->num_rx_queues;
589 		} else
590 			queue_id = (j * index_mult);
591 
592 		/*
593 		 * The low 8 bits are for hash value (n+0);
594 		 * The next 8 bits are for hash value (n+1), etc.
595 		 */
596 		reta = reta >> 8;
597 		reta = reta | (((uint32_t)queue_id) << 24);
598 		if ((i & 3) == 3) {
599 			if (i < 128)
600 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
601 			else
602 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
603 				    reta);
604 			reta = 0;
605 		}
606 	}
607 
608 	/* Now fill our hash function seeds */
609 	for (i = 0; i < 10; i++)
610 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
611 
612 	/* Perform hash on these packet types */
613 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
614 		rss_hash_config = rss_gethashconfig();
615 	else {
616 		/*
617 		 * Disable UDP - IP fragments aren't currently being handled
618 		 * and so we end up with a mix of 2-tuple and 4-tuple
619 		 * traffic.
620 		 */
621 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
622 		                | RSS_HASHTYPE_RSS_TCP_IPV4
623 		                | RSS_HASHTYPE_RSS_IPV6
624 		                | RSS_HASHTYPE_RSS_TCP_IPV6
625 		                | RSS_HASHTYPE_RSS_IPV6_EX
626 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
627 	}
628 
629 	mrqc = IXGBE_MRQC_RSSEN;
630 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
631 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
632 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
633 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
634 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
635 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
636 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
637 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
638 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
639 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
640 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
641 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
642 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
643 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
644 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
645 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
646 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
647 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
648 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
649 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
650 } /* ixgbe_initialize_rss_mapping */
651 
652 /************************************************************************
653  * ixgbe_initialize_receive_units - Setup receive registers and features.
654  ************************************************************************/
655 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
656 
657 static void
658 ixgbe_initialize_receive_units(if_ctx_t ctx)
659 {
660 	struct adapter     *adapter = iflib_get_softc(ctx);
661 	if_softc_ctx_t     scctx = adapter->shared;
662 	struct ixgbe_hw    *hw = &adapter->hw;
663 	struct ifnet       *ifp = iflib_get_ifp(ctx);
664 	struct ix_rx_queue *que;
665 	int                i, j;
666 	u32                bufsz, fctrl, srrctl, rxcsum;
667 	u32                hlreg;
668 
669 	/*
670 	 * Make sure receives are disabled while
671 	 * setting up the descriptor ring
672 	 */
673 	ixgbe_disable_rx(hw);
674 
675 	/* Enable broadcasts */
676 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
677 	fctrl |= IXGBE_FCTRL_BAM;
678 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
679 		fctrl |= IXGBE_FCTRL_DPF;
680 		fctrl |= IXGBE_FCTRL_PMCF;
681 	}
682 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
683 
684 	/* Set for Jumbo Frames? */
685 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
686 	if (ifp->if_mtu > ETHERMTU)
687 		hlreg |= IXGBE_HLREG0_JUMBOEN;
688 	else
689 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
690 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
691 
692 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
693 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
694 
695 	/* Setup the Base and Length of the Rx Descriptor Ring */
696 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
697 		struct rx_ring *rxr = &que->rxr;
698 		u64            rdba = rxr->rx_paddr;
699 
700 		j = rxr->me;
701 
702 		/* Setup the Base and Length of the Rx Descriptor Ring */
703 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
704 		    (rdba & 0x00000000ffffffffULL));
705 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
706 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
707 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
708 
709 		/* Set up the SRRCTL register */
710 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
711 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
712 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
713 		srrctl |= bufsz;
714 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
715 
716 		/*
717 		 * Set DROP_EN iff we have no flow control and >1 queue.
718 		 * Note that srrctl was cleared shortly before during reset,
719 		 * so we do not need to clear the bit, but do it just in case
720 		 * this code is moved elsewhere.
721 		 */
722 		if (adapter->num_rx_queues > 1 &&
723 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
724 			srrctl |= IXGBE_SRRCTL_DROP_EN;
725 		} else {
726 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
727 		}
728 
729 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
730 
731 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
732 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
733 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
734 
735 		/* Set the driver rx tail address */
736 		rxr->tail =  IXGBE_RDT(rxr->me);
737 	}
738 
739 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
740 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
741 		            | IXGBE_PSRTYPE_UDPHDR
742 		            | IXGBE_PSRTYPE_IPV4HDR
743 		            | IXGBE_PSRTYPE_IPV6HDR;
744 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
745 	}
746 
747 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
748 
749 	ixgbe_initialize_rss_mapping(adapter);
750 
751 	if (adapter->num_rx_queues > 1) {
752 		/* RSS and RX IPP Checksum are mutually exclusive */
753 		rxcsum |= IXGBE_RXCSUM_PCSD;
754 	}
755 
756 	if (ifp->if_capenable & IFCAP_RXCSUM)
757 		rxcsum |= IXGBE_RXCSUM_PCSD;
758 
759 	/* This is useful for calculating UDP/IP fragment checksums */
760 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
761 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
762 
763 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
764 
765 } /* ixgbe_initialize_receive_units */
766 
767 /************************************************************************
768  * ixgbe_initialize_transmit_units - Enable transmit units.
769  ************************************************************************/
770 static void
771 ixgbe_initialize_transmit_units(if_ctx_t ctx)
772 {
773 	struct adapter     *adapter = iflib_get_softc(ctx);
774 	struct ixgbe_hw    *hw = &adapter->hw;
775 	if_softc_ctx_t     scctx = adapter->shared;
776 	struct ix_tx_queue *que;
777 	int i;
778 
779 	/* Setup the Base and Length of the Tx Descriptor Ring */
780 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
781 	    i++, que++) {
782 		struct tx_ring	   *txr = &que->txr;
783 		u64 tdba = txr->tx_paddr;
784 		u32 txctrl = 0;
785 		int j = txr->me;
786 
787 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
788 		    (tdba & 0x00000000ffffffffULL));
789 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
790 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
791 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
792 
793 		/* Setup the HW Tx Head and Tail descriptor pointers */
794 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
795 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
796 
797 		/* Cache the tail address */
798 		txr->tail = IXGBE_TDT(txr->me);
799 
800 		txr->tx_rs_cidx = txr->tx_rs_pidx;
801 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
802 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
803 			txr->tx_rsq[k] = QIDX_INVALID;
804 
805 		/* Disable Head Writeback */
806 		/*
807 		 * Note: for X550 series devices, these registers are actually
808 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
809 		 * fields remain the same.
810 		 */
811 		switch (hw->mac.type) {
812 		case ixgbe_mac_82598EB:
813 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
814 			break;
815 		default:
816 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
817 			break;
818 		}
819 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
820 		switch (hw->mac.type) {
821 		case ixgbe_mac_82598EB:
822 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
823 			break;
824 		default:
825 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
826 			break;
827 		}
828 
829 	}
830 
831 	if (hw->mac.type != ixgbe_mac_82598EB) {
832 		u32 dmatxctl, rttdcs;
833 
834 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
835 		dmatxctl |= IXGBE_DMATXCTL_TE;
836 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
837 		/* Disable arbiter to set MTQC */
838 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
839 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
840 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
841 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
842 		    ixgbe_get_mtqc(adapter->iov_mode));
843 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
844 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
845 	}
846 
847 } /* ixgbe_initialize_transmit_units */
848 
849 /************************************************************************
850  * ixgbe_register
851  ************************************************************************/
852 static void *
853 ixgbe_register(device_t dev)
854 {
855 	return (ixgbe_sctx);
856 } /* ixgbe_register */
857 
858 /************************************************************************
859  * ixgbe_if_attach_pre - Device initialization routine, part 1
860  *
861  *   Called when the driver is being loaded.
862  *   Identifies the type of hardware, initializes the hardware,
863  *   and initializes iflib structures.
864  *
865  *   return 0 on success, positive on failure
866  ************************************************************************/
867 static int
868 ixgbe_if_attach_pre(if_ctx_t ctx)
869 {
870 	struct adapter  *adapter;
871 	device_t        dev;
872 	if_softc_ctx_t  scctx;
873 	struct ixgbe_hw *hw;
874 	int             error = 0;
875 	u32             ctrl_ext;
876 
877 	INIT_DEBUGOUT("ixgbe_attach: begin");
878 
879 	/* Allocate, clear, and link in our adapter structure */
880 	dev = iflib_get_dev(ctx);
881 	adapter = iflib_get_softc(ctx);
882 	adapter->hw.back = adapter;
883 	adapter->ctx = ctx;
884 	adapter->dev = dev;
885 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
886 	adapter->media = iflib_get_media(ctx);
887 	hw = &adapter->hw;
888 
889 	/* Determine hardware revision */
890 	hw->vendor_id = pci_get_vendor(dev);
891 	hw->device_id = pci_get_device(dev);
892 	hw->revision_id = pci_get_revid(dev);
893 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
894 	hw->subsystem_device_id = pci_get_subdevice(dev);
895 
896 	/* Do base PCI setup - map BAR0 */
897 	if (ixgbe_allocate_pci_resources(ctx)) {
898 		device_printf(dev, "Allocation of PCI resources failed\n");
899 		return (ENXIO);
900 	}
901 
902 	/* let hardware know driver is loaded */
903 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
904 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
905 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
906 
907 	/*
908 	 * Initialize the shared code
909 	 */
910 	if (ixgbe_init_shared_code(hw) != 0) {
911 		device_printf(dev, "Unable to initialize the shared code\n");
912 		error = ENXIO;
913 		goto err_pci;
914 	}
915 
916 	if (hw->mbx.ops.init_params)
917 		hw->mbx.ops.init_params(hw);
918 
919 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
920 
921 	if (hw->mac.type != ixgbe_mac_82598EB)
922 		hw->phy.smart_speed = ixgbe_smart_speed;
923 
924 	ixgbe_init_device_features(adapter);
925 
926 	/* Enable WoL (if supported) */
927 	ixgbe_check_wol_support(adapter);
928 
929 	/* Verify adapter fan is still functional (if applicable) */
930 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
931 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
932 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
933 	}
934 
935 	/* Ensure SW/FW semaphore is free */
936 	ixgbe_init_swfw_semaphore(hw);
937 
938 	/* Set an initial default flow control value */
939 	hw->fc.requested_mode = ixgbe_flow_control;
940 
941 	hw->phy.reset_if_overtemp = TRUE;
942 	error = ixgbe_reset_hw(hw);
943 	hw->phy.reset_if_overtemp = FALSE;
944 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
945 		/*
946 		 * No optics in this port, set up
947 		 * so the timer routine will probe
948 		 * for later insertion.
949 		 */
950 		adapter->sfp_probe = TRUE;
951 		error = 0;
952 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
953 		device_printf(dev, "Unsupported SFP+ module detected!\n");
954 		error = EIO;
955 		goto err_pci;
956 	} else if (error) {
957 		device_printf(dev, "Hardware initialization failed\n");
958 		error = EIO;
959 		goto err_pci;
960 	}
961 
962 	/* Make sure we have a good EEPROM before we read from it */
963 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
964 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
965 		error = EIO;
966 		goto err_pci;
967 	}
968 
969 	error = ixgbe_start_hw(hw);
970 	switch (error) {
971 	case IXGBE_ERR_EEPROM_VERSION:
972 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
973 		break;
974 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
975 		device_printf(dev, "Unsupported SFP+ Module\n");
976 		error = EIO;
977 		goto err_pci;
978 	case IXGBE_ERR_SFP_NOT_PRESENT:
979 		device_printf(dev, "No SFP+ Module found\n");
980 		/* falls thru */
981 	default:
982 		break;
983 	}
984 
985 	/* Most of the iflib initialization... */
986 
987 	iflib_set_mac(ctx, hw->mac.addr);
988 	switch (adapter->hw.mac.type) {
989 	case ixgbe_mac_X550:
990 	case ixgbe_mac_X550EM_x:
991 	case ixgbe_mac_X550EM_a:
992 		scctx->isc_rss_table_size = 512;
993 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
994 		break;
995 	default:
996 		scctx->isc_rss_table_size = 128;
997 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
998 	}
999 
1000 	/* Allow legacy interrupts */
1001 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1002 
1003 	scctx->isc_txqsizes[0] =
1004 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1005 	    sizeof(u32), DBA_ALIGN),
1006 	scctx->isc_rxqsizes[0] =
1007 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1008 	    DBA_ALIGN);
1009 
1010 	/* XXX */
1011 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1012 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1013 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1014 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1015 	} else {
1016 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1017 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1018 	}
1019 
1020 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1021 
1022 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1023 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1024 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1025 
1026 	scctx->isc_txrx = &ixgbe_txrx;
1027 
1028 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1029 
1030 	return (0);
1031 
1032 err_pci:
1033 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1034 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1035 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1036 	ixgbe_free_pci_resources(ctx);
1037 
1038 	return (error);
1039 } /* ixgbe_if_attach_pre */
1040 
1041  /*********************************************************************
1042  * ixgbe_if_attach_post - Device initialization routine, part 2
1043  *
1044  *   Called during driver load, but after interrupts and
1045  *   resources have been allocated and configured.
1046  *   Sets up some data structures not relevant to iflib.
1047  *
1048  *   return 0 on success, positive on failure
1049  *********************************************************************/
1050 static int
1051 ixgbe_if_attach_post(if_ctx_t ctx)
1052 {
1053 	device_t dev;
1054 	struct adapter  *adapter;
1055 	struct ixgbe_hw *hw;
1056 	int             error = 0;
1057 
1058 	dev = iflib_get_dev(ctx);
1059 	adapter = iflib_get_softc(ctx);
1060 	hw = &adapter->hw;
1061 
1062 
1063 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1064 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1065 		device_printf(dev, "Device does not support legacy interrupts");
1066 		error = ENXIO;
1067 		goto err;
1068 	}
1069 
1070 	/* Allocate multicast array memory. */
1071 	adapter->mta = malloc(sizeof(*adapter->mta) *
1072 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1073 	if (adapter->mta == NULL) {
1074 		device_printf(dev, "Can not allocate multicast setup array\n");
1075 		error = ENOMEM;
1076 		goto err;
1077 	}
1078 
1079 	/* hw.ix defaults init */
1080 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1081 
1082 	/* Enable the optics for 82599 SFP+ fiber */
1083 	ixgbe_enable_tx_laser(hw);
1084 
1085 	/* Enable power to the phy. */
1086 	ixgbe_set_phy_power(hw, TRUE);
1087 
1088 	ixgbe_initialize_iov(adapter);
1089 
1090 	error = ixgbe_setup_interface(ctx);
1091 	if (error) {
1092 		device_printf(dev, "Interface setup failed: %d\n", error);
1093 		goto err;
1094 	}
1095 
1096 	ixgbe_if_update_admin_status(ctx);
1097 
1098 	/* Initialize statistics */
1099 	ixgbe_update_stats_counters(adapter);
1100 	ixgbe_add_hw_stats(adapter);
1101 
1102 	/* Check PCIE slot type/speed/width */
1103 	ixgbe_get_slot_info(adapter);
1104 
1105 	/*
1106 	 * Do time init and sysctl init here, but
1107 	 * only on the first port of a bypass adapter.
1108 	 */
1109 	ixgbe_bypass_init(adapter);
1110 
1111 	/* Set an initial dmac value */
1112 	adapter->dmac = 0;
1113 	/* Set initial advertised speeds (if applicable) */
1114 	adapter->advertise = ixgbe_get_advertise(adapter);
1115 
1116 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1117 		ixgbe_define_iov_schemas(dev, &error);
1118 
1119 	/* Add sysctls */
1120 	ixgbe_add_device_sysctls(ctx);
1121 
1122 	return (0);
1123 err:
1124 	return (error);
1125 } /* ixgbe_if_attach_post */
1126 
1127 /************************************************************************
1128  * ixgbe_check_wol_support
1129  *
1130  *   Checks whether the adapter's ports are capable of
1131  *   Wake On LAN by reading the adapter's NVM.
1132  *
1133  *   Sets each port's hw->wol_enabled value depending
1134  *   on the value read here.
1135  ************************************************************************/
1136 static void
1137 ixgbe_check_wol_support(struct adapter *adapter)
1138 {
1139 	struct ixgbe_hw *hw = &adapter->hw;
1140 	u16             dev_caps = 0;
1141 
1142 	/* Find out WoL support for port */
1143 	adapter->wol_support = hw->wol_enabled = 0;
1144 	ixgbe_get_device_caps(hw, &dev_caps);
1145 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1146 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1147 	     hw->bus.func == 0))
1148 		adapter->wol_support = hw->wol_enabled = 1;
1149 
1150 	/* Save initial wake up filter configuration */
1151 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1152 
1153 	return;
1154 } /* ixgbe_check_wol_support */
1155 
1156 /************************************************************************
1157  * ixgbe_setup_interface
1158  *
1159  *   Setup networking device structure and register an interface.
1160  ************************************************************************/
1161 static int
1162 ixgbe_setup_interface(if_ctx_t ctx)
1163 {
1164 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1165 	struct adapter *adapter = iflib_get_softc(ctx);
1166 
1167 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1168 
1169 	if_setbaudrate(ifp, IF_Gbps(10));
1170 
1171 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1172 
1173 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1174 
1175 	ixgbe_add_media_types(ctx);
1176 
1177 	/* Autoselect media by default */
1178 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1179 
1180 	return (0);
1181 } /* ixgbe_setup_interface */
1182 
1183 /************************************************************************
1184  * ixgbe_if_get_counter
1185  ************************************************************************/
1186 static uint64_t
1187 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1188 {
1189 	struct adapter *adapter = iflib_get_softc(ctx);
1190 	if_t           ifp = iflib_get_ifp(ctx);
1191 
1192 	switch (cnt) {
1193 	case IFCOUNTER_IPACKETS:
1194 		return (adapter->ipackets);
1195 	case IFCOUNTER_OPACKETS:
1196 		return (adapter->opackets);
1197 	case IFCOUNTER_IBYTES:
1198 		return (adapter->ibytes);
1199 	case IFCOUNTER_OBYTES:
1200 		return (adapter->obytes);
1201 	case IFCOUNTER_IMCASTS:
1202 		return (adapter->imcasts);
1203 	case IFCOUNTER_OMCASTS:
1204 		return (adapter->omcasts);
1205 	case IFCOUNTER_COLLISIONS:
1206 		return (0);
1207 	case IFCOUNTER_IQDROPS:
1208 		return (adapter->iqdrops);
1209 	case IFCOUNTER_OQDROPS:
1210 		return (0);
1211 	case IFCOUNTER_IERRORS:
1212 		return (adapter->ierrors);
1213 	default:
1214 		return (if_get_counter_default(ifp, cnt));
1215 	}
1216 } /* ixgbe_if_get_counter */
1217 
1218 /************************************************************************
1219  * ixgbe_if_i2c_req
1220  ************************************************************************/
1221 static int
1222 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1223 {
1224 	struct adapter		*adapter = iflib_get_softc(ctx);
1225 	struct ixgbe_hw 	*hw = &adapter->hw;
1226 	int 			i;
1227 
1228 
1229 	if (hw->phy.ops.read_i2c_byte == NULL)
1230 		return (ENXIO);
1231 	for (i = 0; i < req->len; i++)
1232 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1233 		    req->dev_addr, &req->data[i]);
1234 	return (0);
1235 } /* ixgbe_if_i2c_req */
1236 
1237 /************************************************************************
1238  * ixgbe_add_media_types
1239  ************************************************************************/
1240 static void
1241 ixgbe_add_media_types(if_ctx_t ctx)
1242 {
1243 	struct adapter  *adapter = iflib_get_softc(ctx);
1244 	struct ixgbe_hw *hw = &adapter->hw;
1245 	device_t        dev = iflib_get_dev(ctx);
1246 	u64             layer;
1247 
1248 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1249 
1250 	/* Media types with matching FreeBSD media defines */
1251 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1252 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1253 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1254 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1255 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1256 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1257 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1258 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1259 
1260 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1261 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1262 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1263 		    NULL);
1264 
1265 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1266 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1267 		if (hw->phy.multispeed_fiber)
1268 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1269 			    NULL);
1270 	}
1271 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1272 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1273 		if (hw->phy.multispeed_fiber)
1274 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1275 			    NULL);
1276 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1277 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1278 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1279 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1280 
1281 #ifdef IFM_ETH_XTYPE
1282 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1283 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1284 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1285 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1286 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1287 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1288 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1289 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1290 #else
1291 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1292 		device_printf(dev, "Media supported: 10GbaseKR\n");
1293 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1294 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1295 	}
1296 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1297 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1298 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1299 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1300 	}
1301 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1302 		device_printf(dev, "Media supported: 1000baseKX\n");
1303 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1304 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1305 	}
1306 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1307 		device_printf(dev, "Media supported: 2500baseKX\n");
1308 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1309 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1310 	}
1311 #endif
1312 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1313 		device_printf(dev, "Media supported: 1000baseBX\n");
1314 
1315 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1316 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1317 		    0, NULL);
1318 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1319 	}
1320 
1321 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1322 } /* ixgbe_add_media_types */
1323 
1324 /************************************************************************
1325  * ixgbe_is_sfp
1326  ************************************************************************/
1327 static inline bool
1328 ixgbe_is_sfp(struct ixgbe_hw *hw)
1329 {
1330 	switch (hw->mac.type) {
1331 	case ixgbe_mac_82598EB:
1332 		if (hw->phy.type == ixgbe_phy_nl)
1333 			return (TRUE);
1334 		return (FALSE);
1335 	case ixgbe_mac_82599EB:
1336 		switch (hw->mac.ops.get_media_type(hw)) {
1337 		case ixgbe_media_type_fiber:
1338 		case ixgbe_media_type_fiber_qsfp:
1339 			return (TRUE);
1340 		default:
1341 			return (FALSE);
1342 		}
1343 	case ixgbe_mac_X550EM_x:
1344 	case ixgbe_mac_X550EM_a:
1345 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1346 			return (TRUE);
1347 		return (FALSE);
1348 	default:
1349 		return (FALSE);
1350 	}
1351 } /* ixgbe_is_sfp */
1352 
1353 /************************************************************************
1354  * ixgbe_config_link
1355  ************************************************************************/
1356 static void
1357 ixgbe_config_link(if_ctx_t ctx)
1358 {
1359 	struct adapter  *adapter = iflib_get_softc(ctx);
1360 	struct ixgbe_hw *hw = &adapter->hw;
1361 	u32             autoneg, err = 0;
1362 	bool            sfp, negotiate;
1363 
1364 	sfp = ixgbe_is_sfp(hw);
1365 
1366 	if (sfp) {
1367 		adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1368 		iflib_admin_intr_deferred(ctx);
1369 	} else {
1370 		if (hw->mac.ops.check_link)
1371 			err = ixgbe_check_link(hw, &adapter->link_speed,
1372 			    &adapter->link_up, FALSE);
1373 		if (err)
1374 			return;
1375 		autoneg = hw->phy.autoneg_advertised;
1376 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1377 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1378 			    &negotiate);
1379 		if (err)
1380 			return;
1381 		if (hw->mac.ops.setup_link)
1382 			err = hw->mac.ops.setup_link(hw, autoneg,
1383 			    adapter->link_up);
1384 	}
1385 } /* ixgbe_config_link */
1386 
1387 /************************************************************************
1388  * ixgbe_update_stats_counters - Update board statistics counters.
1389  ************************************************************************/
1390 static void
1391 ixgbe_update_stats_counters(struct adapter *adapter)
1392 {
1393 	struct ixgbe_hw       *hw = &adapter->hw;
1394 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1395 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1396 	u32                   lxoffrxc;
1397 	u64                   total_missed_rx = 0;
1398 
1399 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1400 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1401 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1402 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1403 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1404 
1405 	for (int i = 0; i < 16; i++) {
1406 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1407 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1408 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1409 	}
1410 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1411 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1412 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1413 
1414 	/* Hardware workaround, gprc counts missed packets */
1415 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1416 	stats->gprc -= missed_rx;
1417 
1418 	if (hw->mac.type != ixgbe_mac_82598EB) {
1419 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1420 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1421 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1422 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1423 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1424 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1425 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1426 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1427 		stats->lxoffrxc += lxoffrxc;
1428 	} else {
1429 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1430 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1431 		stats->lxoffrxc += lxoffrxc;
1432 		/* 82598 only has a counter in the high register */
1433 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1434 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1435 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1436 	}
1437 
1438 	/*
1439 	 * For watchdog management we need to know if we have been paused
1440 	 * during the last interval, so capture that here.
1441 	*/
1442 	if (lxoffrxc)
1443 		adapter->shared->isc_pause_frames = 1;
1444 
1445 	/*
1446 	 * Workaround: mprc hardware is incorrectly counting
1447 	 * broadcasts, so for now we subtract those.
1448 	 */
1449 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1450 	stats->bprc += bprc;
1451 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1452 	if (hw->mac.type == ixgbe_mac_82598EB)
1453 		stats->mprc -= bprc;
1454 
1455 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1456 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1457 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1458 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1459 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1460 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1461 
1462 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1463 	stats->lxontxc += lxon;
1464 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1465 	stats->lxofftxc += lxoff;
1466 	total = lxon + lxoff;
1467 
1468 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1469 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1470 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1471 	stats->gptc -= total;
1472 	stats->mptc -= total;
1473 	stats->ptc64 -= total;
1474 	stats->gotc -= total * ETHER_MIN_LEN;
1475 
1476 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1477 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1478 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1479 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1480 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1481 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1482 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1483 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1484 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1485 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1486 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1487 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1488 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1489 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1490 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1491 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1492 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1493 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1494 	/* Only read FCOE on 82599 */
1495 	if (hw->mac.type != ixgbe_mac_82598EB) {
1496 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1497 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1498 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1499 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1500 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1501 	}
1502 
1503 	/* Fill out the OS statistics structure */
1504 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1505 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1506 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1507 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1508 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1509 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1510 	IXGBE_SET_COLLISIONS(adapter, 0);
1511 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1512 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1513 } /* ixgbe_update_stats_counters */
1514 
1515 /************************************************************************
1516  * ixgbe_add_hw_stats
1517  *
1518  *   Add sysctl variables, one per statistic, to the system.
1519  ************************************************************************/
1520 static void
1521 ixgbe_add_hw_stats(struct adapter *adapter)
1522 {
1523 	device_t               dev = iflib_get_dev(adapter->ctx);
1524 	struct ix_rx_queue     *rx_que;
1525 	struct ix_tx_queue     *tx_que;
1526 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1527 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1528 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1529 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1530 	struct sysctl_oid      *stat_node, *queue_node;
1531 	struct sysctl_oid_list *stat_list, *queue_list;
1532 	int                    i;
1533 
1534 #define QUEUE_NAME_LEN 32
1535 	char                   namebuf[QUEUE_NAME_LEN];
1536 
1537 	/* Driver Statistics */
1538 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1539 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1540 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1541 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1542 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1543 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1544 
1545 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1546 		struct tx_ring *txr = &tx_que->txr;
1547 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1548 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1549 		    CTLFLAG_RD, NULL, "Queue Name");
1550 		queue_list = SYSCTL_CHILDREN(queue_node);
1551 
1552 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1553 		    CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1554 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1555 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1556 		    CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1557 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1558 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1559 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1560 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1561 		    CTLFLAG_RD, &txr->total_packets,
1562 		    "Queue Packets Transmitted");
1563 	}
1564 
1565 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1566 		struct rx_ring *rxr = &rx_que->rxr;
1567 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1568 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1569 		    CTLFLAG_RD, NULL, "Queue Name");
1570 		queue_list = SYSCTL_CHILDREN(queue_node);
1571 
1572 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1573 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i], 0,
1574 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1575 		    "Interrupt Rate");
1576 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1577 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1578 		    "irqs on this queue");
1579 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1580 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1581 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1582 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1583 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1584 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1585 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1586 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1587 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1588 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1589 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1590 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1591 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1592 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1593 	}
1594 
1595 	/* MAC stats get their own sub node */
1596 
1597 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1598 	    CTLFLAG_RD, NULL, "MAC Statistics");
1599 	stat_list = SYSCTL_CHILDREN(stat_node);
1600 
1601 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1602 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1603 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1604 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1605 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1606 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1607 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1608 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1609 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1610 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1611 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1612 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1613 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1614 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1615 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1616 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1617 
1618 	/* Flow Control stats */
1619 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1620 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1621 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1622 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1623 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1624 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1625 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1626 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1627 
1628 	/* Packet Reception Stats */
1629 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1630 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1631 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1632 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1633 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1634 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1635 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1636 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1637 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1638 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1639 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1640 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1641 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1642 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1643 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1644 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1645 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1646 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1648 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1650 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1652 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1654 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1656 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1657 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1658 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1660 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1662 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1664 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1665 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1666 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1667 
1668 	/* Packet Transmission Stats */
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1670 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1672 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1674 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1676 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1678 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1680 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1681 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1682 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1683 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1684 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1685 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1686 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1687 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1688 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1689 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1690 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1691 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1692 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1693 } /* ixgbe_add_hw_stats */
1694 
1695 /************************************************************************
1696  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1697  *
1698  *   Retrieves the TDH value from the hardware
1699  ************************************************************************/
1700 static int
1701 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1702 {
1703 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1704 	int            error;
1705 	unsigned int   val;
1706 
1707 	if (!txr)
1708 		return (0);
1709 
1710 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1711 	error = sysctl_handle_int(oidp, &val, 0, req);
1712 	if (error || !req->newptr)
1713 		return error;
1714 
1715 	return (0);
1716 } /* ixgbe_sysctl_tdh_handler */
1717 
1718 /************************************************************************
1719  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1720  *
1721  *   Retrieves the TDT value from the hardware
1722  ************************************************************************/
1723 static int
1724 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1725 {
1726 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1727 	int            error;
1728 	unsigned int   val;
1729 
1730 	if (!txr)
1731 		return (0);
1732 
1733 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1734 	error = sysctl_handle_int(oidp, &val, 0, req);
1735 	if (error || !req->newptr)
1736 		return error;
1737 
1738 	return (0);
1739 } /* ixgbe_sysctl_tdt_handler */
1740 
1741 /************************************************************************
1742  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1743  *
1744  *   Retrieves the RDH value from the hardware
1745  ************************************************************************/
1746 static int
1747 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1748 {
1749 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1750 	int            error;
1751 	unsigned int   val;
1752 
1753 	if (!rxr)
1754 		return (0);
1755 
1756 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1757 	error = sysctl_handle_int(oidp, &val, 0, req);
1758 	if (error || !req->newptr)
1759 		return error;
1760 
1761 	return (0);
1762 } /* ixgbe_sysctl_rdh_handler */
1763 
1764 /************************************************************************
1765  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1766  *
1767  *   Retrieves the RDT value from the hardware
1768  ************************************************************************/
1769 static int
1770 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1771 {
1772 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1773 	int            error;
1774 	unsigned int   val;
1775 
1776 	if (!rxr)
1777 		return (0);
1778 
1779 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1780 	error = sysctl_handle_int(oidp, &val, 0, req);
1781 	if (error || !req->newptr)
1782 		return error;
1783 
1784 	return (0);
1785 } /* ixgbe_sysctl_rdt_handler */
1786 
1787 /************************************************************************
1788  * ixgbe_if_vlan_register
1789  *
1790  *   Run via vlan config EVENT, it enables us to use the
1791  *   HW Filter table since we can get the vlan id. This
1792  *   just creates the entry in the soft version of the
1793  *   VFTA, init will repopulate the real table.
1794  ************************************************************************/
1795 static void
1796 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1797 {
1798 	struct adapter *adapter = iflib_get_softc(ctx);
1799 	u16            index, bit;
1800 
1801 	index = (vtag >> 5) & 0x7F;
1802 	bit = vtag & 0x1F;
1803 	adapter->shadow_vfta[index] |= (1 << bit);
1804 	++adapter->num_vlans;
1805 	ixgbe_setup_vlan_hw_support(ctx);
1806 } /* ixgbe_if_vlan_register */
1807 
1808 /************************************************************************
1809  * ixgbe_if_vlan_unregister
1810  *
1811  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1812  ************************************************************************/
1813 static void
1814 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1815 {
1816 	struct adapter *adapter = iflib_get_softc(ctx);
1817 	u16            index, bit;
1818 
1819 	index = (vtag >> 5) & 0x7F;
1820 	bit = vtag & 0x1F;
1821 	adapter->shadow_vfta[index] &= ~(1 << bit);
1822 	--adapter->num_vlans;
1823 	/* Re-init to load the changes */
1824 	ixgbe_setup_vlan_hw_support(ctx);
1825 } /* ixgbe_if_vlan_unregister */
1826 
1827 /************************************************************************
1828  * ixgbe_setup_vlan_hw_support
1829  ************************************************************************/
1830 static void
1831 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1832 {
1833 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1834 	struct adapter  *adapter = iflib_get_softc(ctx);
1835 	struct ixgbe_hw *hw = &adapter->hw;
1836 	struct rx_ring  *rxr;
1837 	int             i;
1838 	u32             ctrl;
1839 
1840 
1841 	/*
1842 	 * We get here thru init_locked, meaning
1843 	 * a soft reset, this has already cleared
1844 	 * the VFTA and other state, so if there
1845 	 * have been no vlan's registered do nothing.
1846 	 */
1847 	if (adapter->num_vlans == 0)
1848 		return;
1849 
1850 	/* Setup the queues for vlans */
1851 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1852 		for (i = 0; i < adapter->num_rx_queues; i++) {
1853 			rxr = &adapter->rx_queues[i].rxr;
1854 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1855 			if (hw->mac.type != ixgbe_mac_82598EB) {
1856 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1857 				ctrl |= IXGBE_RXDCTL_VME;
1858 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1859 			}
1860 			rxr->vtag_strip = TRUE;
1861 		}
1862 	}
1863 
1864 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1865 		return;
1866 	/*
1867 	 * A soft reset zero's out the VFTA, so
1868 	 * we need to repopulate it now.
1869 	 */
1870 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1871 		if (adapter->shadow_vfta[i] != 0)
1872 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1873 			    adapter->shadow_vfta[i]);
1874 
1875 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1876 	/* Enable the Filter Table if enabled */
1877 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1878 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1879 		ctrl |= IXGBE_VLNCTRL_VFE;
1880 	}
1881 	if (hw->mac.type == ixgbe_mac_82598EB)
1882 		ctrl |= IXGBE_VLNCTRL_VME;
1883 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1884 } /* ixgbe_setup_vlan_hw_support */
1885 
1886 /************************************************************************
1887  * ixgbe_get_slot_info
1888  *
1889  *   Get the width and transaction speed of
1890  *   the slot this adapter is plugged into.
1891  ************************************************************************/
1892 static void
1893 ixgbe_get_slot_info(struct adapter *adapter)
1894 {
1895 	device_t        dev = iflib_get_dev(adapter->ctx);
1896 	struct ixgbe_hw *hw = &adapter->hw;
1897 	int             bus_info_valid = TRUE;
1898 	u32             offset;
1899 	u16             link;
1900 
1901 	/* Some devices are behind an internal bridge */
1902 	switch (hw->device_id) {
1903 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1904 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1905 		goto get_parent_info;
1906 	default:
1907 		break;
1908 	}
1909 
1910 	ixgbe_get_bus_info(hw);
1911 
1912 	/*
1913 	 * Some devices don't use PCI-E, but there is no need
1914 	 * to display "Unknown" for bus speed and width.
1915 	 */
1916 	switch (hw->mac.type) {
1917 	case ixgbe_mac_X550EM_x:
1918 	case ixgbe_mac_X550EM_a:
1919 		return;
1920 	default:
1921 		goto display;
1922 	}
1923 
1924 get_parent_info:
1925 	/*
1926 	 * For the Quad port adapter we need to parse back
1927 	 * up the PCI tree to find the speed of the expansion
1928 	 * slot into which this adapter is plugged. A bit more work.
1929 	 */
1930 	dev = device_get_parent(device_get_parent(dev));
1931 #ifdef IXGBE_DEBUG
1932 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1933 	    pci_get_slot(dev), pci_get_function(dev));
1934 #endif
1935 	dev = device_get_parent(device_get_parent(dev));
1936 #ifdef IXGBE_DEBUG
1937 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1938 	    pci_get_slot(dev), pci_get_function(dev));
1939 #endif
1940 	/* Now get the PCI Express Capabilities offset */
1941 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1942 		/*
1943 		 * Hmm...can't get PCI-Express capabilities.
1944 		 * Falling back to default method.
1945 		 */
1946 		bus_info_valid = FALSE;
1947 		ixgbe_get_bus_info(hw);
1948 		goto display;
1949 	}
1950 	/* ...and read the Link Status Register */
1951 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1952 	ixgbe_set_pci_config_data_generic(hw, link);
1953 
1954 display:
1955 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1956 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1957 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1958 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1959 	     "Unknown"),
1960 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1961 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1962 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1963 	     "Unknown"));
1964 
1965 	if (bus_info_valid) {
1966 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1967 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1968 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1969 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1970 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1971 		}
1972 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1973 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1974 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1975 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1976 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1977 		}
1978 	} else
1979 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1980 
1981 	return;
1982 } /* ixgbe_get_slot_info */
1983 
1984 /************************************************************************
1985  * ixgbe_if_msix_intr_assign
1986  *
1987  *   Setup MSI-X Interrupt resources and handlers
1988  ************************************************************************/
1989 static int
1990 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1991 {
1992 	struct adapter     *adapter = iflib_get_softc(ctx);
1993 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1994 	struct ix_tx_queue *tx_que;
1995 	int                error, rid, vector = 0;
1996 	int                cpu_id = 0;
1997 	char               buf[16];
1998 
1999 	/* Admin Que is vector 0*/
2000 	rid = vector + 1;
2001 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2002 		rid = vector + 1;
2003 
2004 		snprintf(buf, sizeof(buf), "rxq%d", i);
2005 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2006 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2007 
2008 		if (error) {
2009 			device_printf(iflib_get_dev(ctx),
2010 			    "Failed to allocate que int %d err: %d", i, error);
2011 			adapter->num_rx_queues = i + 1;
2012 			goto fail;
2013 		}
2014 
2015 		rx_que->msix = vector;
2016 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2017 			/*
2018 			 * The queue ID is used as the RSS layer bucket ID.
2019 			 * We look up the queue ID -> RSS CPU ID and select
2020 			 * that.
2021 			 */
2022 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2023 		} else {
2024 			/*
2025 			 * Bind the MSI-X vector, and thus the
2026 			 * rings to the corresponding cpu.
2027 			 *
2028 			 * This just happens to match the default RSS
2029 			 * round-robin bucket -> queue -> CPU allocation.
2030 			 */
2031 			if (adapter->num_rx_queues > 1)
2032 				cpu_id = i;
2033 		}
2034 
2035 	}
2036 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2037 		snprintf(buf, sizeof(buf), "txq%d", i);
2038 		tx_que = &adapter->tx_queues[i];
2039 		tx_que->msix = i % adapter->num_rx_queues;
2040 		iflib_softirq_alloc_generic(ctx,
2041 		    &adapter->rx_queues[tx_que->msix].que_irq,
2042 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2043 	}
2044 	rid = vector + 1;
2045 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2046 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2047 	if (error) {
2048 		device_printf(iflib_get_dev(ctx),
2049 		    "Failed to register admin handler");
2050 		return (error);
2051 	}
2052 
2053 	adapter->vector = vector;
2054 
2055 	return (0);
2056 fail:
2057 	iflib_irq_free(ctx, &adapter->irq);
2058 	rx_que = adapter->rx_queues;
2059 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2060 		iflib_irq_free(ctx, &rx_que->que_irq);
2061 
2062 	return (error);
2063 } /* ixgbe_if_msix_intr_assign */
2064 
2065 /*********************************************************************
2066  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2067  **********************************************************************/
2068 static int
2069 ixgbe_msix_que(void *arg)
2070 {
2071 	struct ix_rx_queue *que = arg;
2072 	struct adapter     *adapter = que->adapter;
2073 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2074 
2075 	/* Protect against spurious interrupts */
2076 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2077 		return (FILTER_HANDLED);
2078 
2079 	ixgbe_disable_queue(adapter, que->msix);
2080 	++que->irqs;
2081 
2082 	return (FILTER_SCHEDULE_THREAD);
2083 } /* ixgbe_msix_que */
2084 
2085 /************************************************************************
2086  * ixgbe_media_status - Media Ioctl callback
2087  *
2088  *   Called whenever the user queries the status of
2089  *   the interface using ifconfig.
2090  ************************************************************************/
2091 static void
2092 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2093 {
2094 	struct adapter  *adapter = iflib_get_softc(ctx);
2095 	struct ixgbe_hw *hw = &adapter->hw;
2096 	int             layer;
2097 
2098 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2099 
2100 	ifmr->ifm_status = IFM_AVALID;
2101 	ifmr->ifm_active = IFM_ETHER;
2102 
2103 	if (!adapter->link_active)
2104 		return;
2105 
2106 	ifmr->ifm_status |= IFM_ACTIVE;
2107 	layer = adapter->phy_layer;
2108 
2109 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2110 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2111 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2112 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2113 		switch (adapter->link_speed) {
2114 		case IXGBE_LINK_SPEED_10GB_FULL:
2115 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2116 			break;
2117 		case IXGBE_LINK_SPEED_1GB_FULL:
2118 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2119 			break;
2120 		case IXGBE_LINK_SPEED_100_FULL:
2121 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2122 			break;
2123 		case IXGBE_LINK_SPEED_10_FULL:
2124 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2125 			break;
2126 		}
2127 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2128 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2129 		switch (adapter->link_speed) {
2130 		case IXGBE_LINK_SPEED_10GB_FULL:
2131 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2132 			break;
2133 		}
2134 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2135 		switch (adapter->link_speed) {
2136 		case IXGBE_LINK_SPEED_10GB_FULL:
2137 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2138 			break;
2139 		case IXGBE_LINK_SPEED_1GB_FULL:
2140 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2141 			break;
2142 		}
2143 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2144 		switch (adapter->link_speed) {
2145 		case IXGBE_LINK_SPEED_10GB_FULL:
2146 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2147 			break;
2148 		case IXGBE_LINK_SPEED_1GB_FULL:
2149 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2150 			break;
2151 		}
2152 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2153 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2154 		switch (adapter->link_speed) {
2155 		case IXGBE_LINK_SPEED_10GB_FULL:
2156 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2157 			break;
2158 		case IXGBE_LINK_SPEED_1GB_FULL:
2159 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2160 			break;
2161 		}
2162 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2163 		switch (adapter->link_speed) {
2164 		case IXGBE_LINK_SPEED_10GB_FULL:
2165 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2166 			break;
2167 		}
2168 	/*
2169 	 * XXX: These need to use the proper media types once
2170 	 * they're added.
2171 	 */
2172 #ifndef IFM_ETH_XTYPE
2173 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2174 		switch (adapter->link_speed) {
2175 		case IXGBE_LINK_SPEED_10GB_FULL:
2176 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2177 			break;
2178 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2179 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2180 			break;
2181 		case IXGBE_LINK_SPEED_1GB_FULL:
2182 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2183 			break;
2184 		}
2185 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2186 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2187 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2188 		switch (adapter->link_speed) {
2189 		case IXGBE_LINK_SPEED_10GB_FULL:
2190 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2191 			break;
2192 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2193 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2194 			break;
2195 		case IXGBE_LINK_SPEED_1GB_FULL:
2196 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2197 			break;
2198 		}
2199 #else
2200 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2201 		switch (adapter->link_speed) {
2202 		case IXGBE_LINK_SPEED_10GB_FULL:
2203 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2204 			break;
2205 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2206 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2207 			break;
2208 		case IXGBE_LINK_SPEED_1GB_FULL:
2209 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2210 			break;
2211 		}
2212 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2213 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2214 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2215 		switch (adapter->link_speed) {
2216 		case IXGBE_LINK_SPEED_10GB_FULL:
2217 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2218 			break;
2219 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2220 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2221 			break;
2222 		case IXGBE_LINK_SPEED_1GB_FULL:
2223 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2224 			break;
2225 		}
2226 #endif
2227 
2228 	/* If nothing is recognized... */
2229 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2230 		ifmr->ifm_active |= IFM_UNKNOWN;
2231 
2232 	/* Display current flow control setting used on link */
2233 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2234 	    hw->fc.current_mode == ixgbe_fc_full)
2235 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2236 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2237 	    hw->fc.current_mode == ixgbe_fc_full)
2238 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2239 } /* ixgbe_media_status */
2240 
2241 /************************************************************************
2242  * ixgbe_media_change - Media Ioctl callback
2243  *
2244  *   Called when the user changes speed/duplex using
2245  *   media/mediopt option with ifconfig.
2246  ************************************************************************/
2247 static int
2248 ixgbe_if_media_change(if_ctx_t ctx)
2249 {
2250 	struct adapter   *adapter = iflib_get_softc(ctx);
2251 	struct ifmedia   *ifm = iflib_get_media(ctx);
2252 	struct ixgbe_hw  *hw = &adapter->hw;
2253 	ixgbe_link_speed speed = 0;
2254 
2255 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2256 
2257 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2258 		return (EINVAL);
2259 
2260 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2261 		return (EPERM);
2262 
2263 	/*
2264 	 * We don't actually need to check against the supported
2265 	 * media types of the adapter; ifmedia will take care of
2266 	 * that for us.
2267 	 */
2268 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2269 	case IFM_AUTO:
2270 	case IFM_10G_T:
2271 		speed |= IXGBE_LINK_SPEED_100_FULL;
2272 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2273 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2274 		break;
2275 	case IFM_10G_LRM:
2276 	case IFM_10G_LR:
2277 #ifndef IFM_ETH_XTYPE
2278 	case IFM_10G_SR: /* KR, too */
2279 	case IFM_10G_CX4: /* KX4 */
2280 #else
2281 	case IFM_10G_KR:
2282 	case IFM_10G_KX4:
2283 #endif
2284 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2285 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2286 		break;
2287 #ifndef IFM_ETH_XTYPE
2288 	case IFM_1000_CX: /* KX */
2289 #else
2290 	case IFM_1000_KX:
2291 #endif
2292 	case IFM_1000_LX:
2293 	case IFM_1000_SX:
2294 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2295 		break;
2296 	case IFM_1000_T:
2297 		speed |= IXGBE_LINK_SPEED_100_FULL;
2298 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2299 		break;
2300 	case IFM_10G_TWINAX:
2301 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2302 		break;
2303 	case IFM_100_TX:
2304 		speed |= IXGBE_LINK_SPEED_100_FULL;
2305 		break;
2306 	case IFM_10_T:
2307 		speed |= IXGBE_LINK_SPEED_10_FULL;
2308 		break;
2309 	default:
2310 		goto invalid;
2311 	}
2312 
2313 	hw->mac.autotry_restart = TRUE;
2314 	hw->mac.ops.setup_link(hw, speed, TRUE);
2315 	adapter->advertise =
2316 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2317 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2318 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2319 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2320 
2321 	return (0);
2322 
2323 invalid:
2324 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2325 
2326 	return (EINVAL);
2327 } /* ixgbe_if_media_change */
2328 
2329 /************************************************************************
2330  * ixgbe_set_promisc
2331  ************************************************************************/
2332 static int
2333 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2334 {
2335 	struct adapter *adapter = iflib_get_softc(ctx);
2336 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2337 	u32            rctl;
2338 	int            mcnt = 0;
2339 
2340 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2341 	rctl &= (~IXGBE_FCTRL_UPE);
2342 	if (ifp->if_flags & IFF_ALLMULTI)
2343 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2344 	else {
2345 		mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2346 	}
2347 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2348 		rctl &= (~IXGBE_FCTRL_MPE);
2349 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2350 
2351 	if (ifp->if_flags & IFF_PROMISC) {
2352 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2353 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2354 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2355 		rctl |= IXGBE_FCTRL_MPE;
2356 		rctl &= ~IXGBE_FCTRL_UPE;
2357 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2358 	}
2359 	return (0);
2360 } /* ixgbe_if_promisc_set */
2361 
2362 /************************************************************************
2363  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2364  ************************************************************************/
2365 static int
2366 ixgbe_msix_link(void *arg)
2367 {
2368 	struct adapter  *adapter = arg;
2369 	struct ixgbe_hw *hw = &adapter->hw;
2370 	u32             eicr, eicr_mask;
2371 	s32             retval;
2372 
2373 	++adapter->link_irq;
2374 
2375 	/* Pause other interrupts */
2376 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2377 
2378 	/* First get the cause */
2379 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2380 	/* Be sure the queue bits are not cleared */
2381 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2382 	/* Clear interrupt with write */
2383 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2384 
2385 	/* Link status change */
2386 	if (eicr & IXGBE_EICR_LSC) {
2387 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2388 		adapter->task_requests |= IXGBE_REQUEST_TASK_LSC;
2389 	}
2390 
2391 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2392 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2393 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2394 			/* This is probably overkill :) */
2395 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2396 				return (FILTER_HANDLED);
2397 			/* Disable the interrupt */
2398 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2399 			adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2400 		} else
2401 			if (eicr & IXGBE_EICR_ECC) {
2402 				device_printf(iflib_get_dev(adapter->ctx),
2403 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2404 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2405 			}
2406 
2407 		/* Check for over temp condition */
2408 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2409 			switch (adapter->hw.mac.type) {
2410 			case ixgbe_mac_X550EM_a:
2411 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2412 					break;
2413 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2414 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2415 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2416 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2417 				retval = hw->phy.ops.check_overtemp(hw);
2418 				if (retval != IXGBE_ERR_OVERTEMP)
2419 					break;
2420 				device_printf(iflib_get_dev(adapter->ctx),
2421 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2422 				device_printf(iflib_get_dev(adapter->ctx),
2423 				    "System shutdown required!\n");
2424 				break;
2425 			default:
2426 				if (!(eicr & IXGBE_EICR_TS))
2427 					break;
2428 				retval = hw->phy.ops.check_overtemp(hw);
2429 				if (retval != IXGBE_ERR_OVERTEMP)
2430 					break;
2431 				device_printf(iflib_get_dev(adapter->ctx),
2432 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2433 				device_printf(iflib_get_dev(adapter->ctx),
2434 				    "System shutdown required!\n");
2435 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2436 				break;
2437 			}
2438 		}
2439 
2440 		/* Check for VF message */
2441 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2442 		    (eicr & IXGBE_EICR_MAILBOX))
2443 			adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2444 	}
2445 
2446 	if (ixgbe_is_sfp(hw)) {
2447 		/* Pluggable optics-related interrupt */
2448 		if (hw->mac.type >= ixgbe_mac_X540)
2449 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2450 		else
2451 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2452 
2453 		if (eicr & eicr_mask) {
2454 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2455 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2456 		}
2457 
2458 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2459 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2460 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2461 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2462 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2463 		}
2464 	}
2465 
2466 	/* Check for fan failure */
2467 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2468 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2469 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2470 	}
2471 
2472 	/* External PHY interrupt */
2473 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2474 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2475 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2476 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2477 	}
2478 
2479 	return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2480 } /* ixgbe_msix_link */
2481 
2482 /************************************************************************
2483  * ixgbe_sysctl_interrupt_rate_handler
2484  ************************************************************************/
2485 static int
2486 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2487 {
2488 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2489 	int                error;
2490 	unsigned int       reg, usec, rate;
2491 
2492 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2493 	usec = ((reg & 0x0FF8) >> 3);
2494 	if (usec > 0)
2495 		rate = 500000 / usec;
2496 	else
2497 		rate = 0;
2498 	error = sysctl_handle_int(oidp, &rate, 0, req);
2499 	if (error || !req->newptr)
2500 		return error;
2501 	reg &= ~0xfff; /* default, no limitation */
2502 	ixgbe_max_interrupt_rate = 0;
2503 	if (rate > 0 && rate < 500000) {
2504 		if (rate < 1000)
2505 			rate = 1000;
2506 		ixgbe_max_interrupt_rate = rate;
2507 		reg |= ((4000000/rate) & 0xff8);
2508 	}
2509 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2510 
2511 	return (0);
2512 } /* ixgbe_sysctl_interrupt_rate_handler */
2513 
2514 /************************************************************************
2515  * ixgbe_add_device_sysctls
2516  ************************************************************************/
2517 static void
2518 ixgbe_add_device_sysctls(if_ctx_t ctx)
2519 {
2520 	struct adapter         *adapter = iflib_get_softc(ctx);
2521 	device_t               dev = iflib_get_dev(ctx);
2522 	struct ixgbe_hw        *hw = &adapter->hw;
2523 	struct sysctl_oid_list *child;
2524 	struct sysctl_ctx_list *ctx_list;
2525 
2526 	ctx_list = device_get_sysctl_ctx(dev);
2527 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2528 
2529 	/* Sysctls for all devices */
2530 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2531 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2532 	    IXGBE_SYSCTL_DESC_SET_FC);
2533 
2534 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2535 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2536 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2537 
2538 #ifdef IXGBE_DEBUG
2539 	/* testing sysctls (for all devices) */
2540 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2541 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2542 	    "I", "PCI Power State");
2543 
2544 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2545 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2546 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2547 #endif
2548 	/* for X550 series devices */
2549 	if (hw->mac.type >= ixgbe_mac_X550)
2550 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2551 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2552 		    "I", "DMA Coalesce");
2553 
2554 	/* for WoL-capable devices */
2555 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2556 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2557 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2558 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2559 
2560 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2561 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2562 		    "I", "Enable/Disable Wake Up Filters");
2563 	}
2564 
2565 	/* for X552/X557-AT devices */
2566 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2567 		struct sysctl_oid *phy_node;
2568 		struct sysctl_oid_list *phy_list;
2569 
2570 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2571 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2572 		phy_list = SYSCTL_CHILDREN(phy_node);
2573 
2574 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2575 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2576 		    "I", "Current External PHY Temperature (Celsius)");
2577 
2578 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2579 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2580 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2581 		    "External PHY High Temperature Event Occurred");
2582 	}
2583 
2584 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2585 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2586 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2587 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2588 	}
2589 } /* ixgbe_add_device_sysctls */
2590 
2591 /************************************************************************
2592  * ixgbe_allocate_pci_resources
2593  ************************************************************************/
2594 static int
2595 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2596 {
2597 	struct adapter *adapter = iflib_get_softc(ctx);
2598 	device_t        dev = iflib_get_dev(ctx);
2599 	int             rid;
2600 
2601 	rid = PCIR_BAR(0);
2602 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2603 	    RF_ACTIVE);
2604 
2605 	if (!(adapter->pci_mem)) {
2606 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2607 		return (ENXIO);
2608 	}
2609 
2610 	/* Save bus_space values for READ/WRITE_REG macros */
2611 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2612 	adapter->osdep.mem_bus_space_handle =
2613 	    rman_get_bushandle(adapter->pci_mem);
2614 	/* Set hw values for shared code */
2615 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2616 
2617 	return (0);
2618 } /* ixgbe_allocate_pci_resources */
2619 
2620 /************************************************************************
2621  * ixgbe_detach - Device removal routine
2622  *
2623  *   Called when the driver is being removed.
2624  *   Stops the adapter and deallocates all the resources
2625  *   that were allocated for driver operation.
2626  *
2627  *   return 0 on success, positive on failure
2628  ************************************************************************/
2629 static int
2630 ixgbe_if_detach(if_ctx_t ctx)
2631 {
2632 	struct adapter *adapter = iflib_get_softc(ctx);
2633 	device_t       dev = iflib_get_dev(ctx);
2634 	u32            ctrl_ext;
2635 
2636 	INIT_DEBUGOUT("ixgbe_detach: begin");
2637 
2638 	if (ixgbe_pci_iov_detach(dev) != 0) {
2639 		device_printf(dev, "SR-IOV in use; detach first.\n");
2640 		return (EBUSY);
2641 	}
2642 
2643 	ixgbe_setup_low_power_mode(ctx);
2644 
2645 	/* let hardware know driver is unloading */
2646 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2647 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2648 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2649 
2650 	ixgbe_free_pci_resources(ctx);
2651 	free(adapter->mta, M_IXGBE);
2652 
2653 	return (0);
2654 } /* ixgbe_if_detach */
2655 
2656 /************************************************************************
2657  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2658  *
2659  *   Prepare the adapter/port for LPLU and/or WoL
2660  ************************************************************************/
2661 static int
2662 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2663 {
2664 	struct adapter  *adapter = iflib_get_softc(ctx);
2665 	struct ixgbe_hw *hw = &adapter->hw;
2666 	device_t        dev = iflib_get_dev(ctx);
2667 	s32             error = 0;
2668 
2669 	if (!hw->wol_enabled)
2670 		ixgbe_set_phy_power(hw, FALSE);
2671 
2672 	/* Limit power management flow to X550EM baseT */
2673 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2674 	    hw->phy.ops.enter_lplu) {
2675 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2676 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2677 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2678 
2679 		/*
2680 		 * Clear Wake Up Status register to prevent any previous wakeup
2681 		 * events from waking us up immediately after we suspend.
2682 		 */
2683 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2684 
2685 		/*
2686 		 * Program the Wakeup Filter Control register with user filter
2687 		 * settings
2688 		 */
2689 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2690 
2691 		/* Enable wakeups and power management in Wakeup Control */
2692 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2693 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2694 
2695 		/* X550EM baseT adapters need a special LPLU flow */
2696 		hw->phy.reset_disable = TRUE;
2697 		ixgbe_if_stop(ctx);
2698 		error = hw->phy.ops.enter_lplu(hw);
2699 		if (error)
2700 			device_printf(dev, "Error entering LPLU: %d\n", error);
2701 		hw->phy.reset_disable = FALSE;
2702 	} else {
2703 		/* Just stop for other adapters */
2704 		ixgbe_if_stop(ctx);
2705 	}
2706 
2707 	return error;
2708 } /* ixgbe_setup_low_power_mode */
2709 
2710 /************************************************************************
2711  * ixgbe_shutdown - Shutdown entry point
2712  ************************************************************************/
2713 static int
2714 ixgbe_if_shutdown(if_ctx_t ctx)
2715 {
2716 	int error = 0;
2717 
2718 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2719 
2720 	error = ixgbe_setup_low_power_mode(ctx);
2721 
2722 	return (error);
2723 } /* ixgbe_if_shutdown */
2724 
2725 /************************************************************************
2726  * ixgbe_suspend
2727  *
2728  *   From D0 to D3
2729  ************************************************************************/
2730 static int
2731 ixgbe_if_suspend(if_ctx_t ctx)
2732 {
2733 	int error = 0;
2734 
2735 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2736 
2737 	error = ixgbe_setup_low_power_mode(ctx);
2738 
2739 	return (error);
2740 } /* ixgbe_if_suspend */
2741 
2742 /************************************************************************
2743  * ixgbe_resume
2744  *
2745  *   From D3 to D0
2746  ************************************************************************/
2747 static int
2748 ixgbe_if_resume(if_ctx_t ctx)
2749 {
2750 	struct adapter  *adapter = iflib_get_softc(ctx);
2751 	device_t        dev = iflib_get_dev(ctx);
2752 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2753 	struct ixgbe_hw *hw = &adapter->hw;
2754 	u32             wus;
2755 
2756 	INIT_DEBUGOUT("ixgbe_resume: begin");
2757 
2758 	/* Read & clear WUS register */
2759 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2760 	if (wus)
2761 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2762 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2763 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2764 	/* And clear WUFC until next low-power transition */
2765 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2766 
2767 	/*
2768 	 * Required after D3->D0 transition;
2769 	 * will re-advertise all previous advertised speeds
2770 	 */
2771 	if (ifp->if_flags & IFF_UP)
2772 		ixgbe_if_init(ctx);
2773 
2774 	return (0);
2775 } /* ixgbe_if_resume */
2776 
2777 /************************************************************************
2778  * ixgbe_if_mtu_set - Ioctl mtu entry point
2779  *
2780  *   Return 0 on success, EINVAL on failure
2781  ************************************************************************/
2782 static int
2783 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2784 {
2785 	struct adapter *adapter = iflib_get_softc(ctx);
2786 	int error = 0;
2787 
2788 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2789 
2790 	if (mtu > IXGBE_MAX_MTU) {
2791 		error = EINVAL;
2792 	} else {
2793 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2794 	}
2795 
2796 	return error;
2797 } /* ixgbe_if_mtu_set */
2798 
2799 /************************************************************************
2800  * ixgbe_if_crcstrip_set
2801  ************************************************************************/
2802 static void
2803 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2804 {
2805 	struct adapter *sc = iflib_get_softc(ctx);
2806 	struct ixgbe_hw *hw = &sc->hw;
2807 	/* crc stripping is set in two places:
2808 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2809 	 * IXGBE_RDRXCTL (set by the original driver in
2810 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2811 	 *	We disable the setting when netmap is compiled in).
2812 	 * We update the values here, but also in ixgbe.c because
2813 	 * init_locked sometimes is called outside our control.
2814 	 */
2815 	uint32_t hl, rxc;
2816 
2817 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2818 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2819 #ifdef NETMAP
2820 	if (netmap_verbose)
2821 		D("%s read  HLREG 0x%x rxc 0x%x",
2822 			onoff ? "enter" : "exit", hl, rxc);
2823 #endif
2824 	/* hw requirements ... */
2825 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2826 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2827 	if (onoff && !crcstrip) {
2828 		/* keep the crc. Fast rx */
2829 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2830 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2831 	} else {
2832 		/* reset default mode */
2833 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2834 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2835 	}
2836 #ifdef NETMAP
2837 	if (netmap_verbose)
2838 		D("%s write HLREG 0x%x rxc 0x%x",
2839 			onoff ? "enter" : "exit", hl, rxc);
2840 #endif
2841 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2842 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2843 } /* ixgbe_if_crcstrip_set */
2844 
2845 /*********************************************************************
2846  * ixgbe_if_init - Init entry point
2847  *
2848  *   Used in two ways: It is used by the stack as an init
2849  *   entry point in network interface structure. It is also
2850  *   used by the driver as a hw/sw initialization routine to
2851  *   get to a consistent state.
2852  *
2853  *   Return 0 on success, positive on failure
2854  **********************************************************************/
2855 void
2856 ixgbe_if_init(if_ctx_t ctx)
2857 {
2858 	struct adapter     *adapter = iflib_get_softc(ctx);
2859 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2860 	device_t           dev = iflib_get_dev(ctx);
2861 	struct ixgbe_hw *hw = &adapter->hw;
2862 	struct ix_rx_queue *rx_que;
2863 	struct ix_tx_queue *tx_que;
2864 	u32             txdctl, mhadd;
2865 	u32             rxdctl, rxctrl;
2866 	u32             ctrl_ext;
2867 
2868 	int             i, j, err;
2869 
2870 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2871 
2872 	/* Queue indices may change with IOV mode */
2873 	ixgbe_align_all_queue_indices(adapter);
2874 
2875 	/* reprogram the RAR[0] in case user changed it. */
2876 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2877 
2878 	/* Get the latest mac address, User can use a LAA */
2879 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2880 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2881 	hw->addr_ctrl.rar_used_count = 1;
2882 
2883 	ixgbe_init_hw(hw);
2884 
2885 	ixgbe_initialize_iov(adapter);
2886 
2887 	ixgbe_initialize_transmit_units(ctx);
2888 
2889 	/* Setup Multicast table */
2890 	ixgbe_if_multi_set(ctx);
2891 
2892 	/* Determine the correct mbuf pool, based on frame size */
2893 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
2894 
2895 	/* Configure RX settings */
2896 	ixgbe_initialize_receive_units(ctx);
2897 
2898 	/*
2899 	 * Initialize variable holding task enqueue requests
2900 	 * from MSI-X interrupts
2901 	 */
2902 	adapter->task_requests = 0;
2903 
2904 	/* Enable SDP & MSI-X interrupts based on adapter */
2905 	ixgbe_config_gpie(adapter);
2906 
2907 	/* Set MTU size */
2908 	if (ifp->if_mtu > ETHERMTU) {
2909 		/* aka IXGBE_MAXFRS on 82599 and newer */
2910 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2911 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2912 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2913 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2914 	}
2915 
2916 	/* Now enable all the queues */
2917 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2918 		struct tx_ring *txr = &tx_que->txr;
2919 
2920 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2921 		txdctl |= IXGBE_TXDCTL_ENABLE;
2922 		/* Set WTHRESH to 8, burst writeback */
2923 		txdctl |= (8 << 16);
2924 		/*
2925 		 * When the internal queue falls below PTHRESH (32),
2926 		 * start prefetching as long as there are at least
2927 		 * HTHRESH (1) buffers ready. The values are taken
2928 		 * from the Intel linux driver 3.8.21.
2929 		 * Prefetching enables tx line rate even with 1 queue.
2930 		 */
2931 		txdctl |= (32 << 0) | (1 << 8);
2932 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2933 	}
2934 
2935 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2936 		struct rx_ring *rxr = &rx_que->rxr;
2937 
2938 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2939 		if (hw->mac.type == ixgbe_mac_82598EB) {
2940 			/*
2941 			 * PTHRESH = 21
2942 			 * HTHRESH = 4
2943 			 * WTHRESH = 8
2944 			 */
2945 			rxdctl &= ~0x3FFFFF;
2946 			rxdctl |= 0x080420;
2947 		}
2948 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2949 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2950 		for (j = 0; j < 10; j++) {
2951 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2952 			    IXGBE_RXDCTL_ENABLE)
2953 				break;
2954 			else
2955 				msec_delay(1);
2956 		}
2957 		wmb();
2958 	}
2959 
2960 	/* Enable Receive engine */
2961 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2962 	if (hw->mac.type == ixgbe_mac_82598EB)
2963 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2964 	rxctrl |= IXGBE_RXCTRL_RXEN;
2965 	ixgbe_enable_rx_dma(hw, rxctrl);
2966 
2967 	/* Set up MSI/MSI-X routing */
2968 	if (ixgbe_enable_msix)  {
2969 		ixgbe_configure_ivars(adapter);
2970 		/* Set up auto-mask */
2971 		if (hw->mac.type == ixgbe_mac_82598EB)
2972 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2973 		else {
2974 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2975 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2976 		}
2977 	} else {  /* Simple settings for Legacy/MSI */
2978 		ixgbe_set_ivar(adapter, 0, 0, 0);
2979 		ixgbe_set_ivar(adapter, 0, 0, 1);
2980 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2981 	}
2982 
2983 	ixgbe_init_fdir(adapter);
2984 
2985 	/*
2986 	 * Check on any SFP devices that
2987 	 * need to be kick-started
2988 	 */
2989 	if (hw->phy.type == ixgbe_phy_none) {
2990 		err = hw->phy.ops.identify(hw);
2991 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2992 			device_printf(dev,
2993 			    "Unsupported SFP+ module type was detected.\n");
2994 			return;
2995 		}
2996 	}
2997 
2998 	/* Set moderation on the Link interrupt */
2999 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3000 
3001 	/* Enable power to the phy. */
3002 	ixgbe_set_phy_power(hw, TRUE);
3003 
3004 	/* Config/Enable Link */
3005 	ixgbe_config_link(ctx);
3006 
3007 	/* Hardware Packet Buffer & Flow Control setup */
3008 	ixgbe_config_delay_values(adapter);
3009 
3010 	/* Initialize the FC settings */
3011 	ixgbe_start_hw(hw);
3012 
3013 	/* Set up VLAN support and filter */
3014 	ixgbe_setup_vlan_hw_support(ctx);
3015 
3016 	/* Setup DMA Coalescing */
3017 	ixgbe_config_dmac(adapter);
3018 
3019 	/* And now turn on interrupts */
3020 	ixgbe_if_enable_intr(ctx);
3021 
3022 	/* Enable the use of the MBX by the VF's */
3023 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3024 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3025 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3026 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3027 	}
3028 
3029 } /* ixgbe_init_locked */
3030 
3031 /************************************************************************
3032  * ixgbe_set_ivar
3033  *
3034  *   Setup the correct IVAR register for a particular MSI-X interrupt
3035  *     (yes this is all very magic and confusing :)
3036  *    - entry is the register array entry
3037  *    - vector is the MSI-X vector for this queue
3038  *    - type is RX/TX/MISC
3039  ************************************************************************/
3040 static void
3041 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3042 {
3043 	struct ixgbe_hw *hw = &adapter->hw;
3044 	u32 ivar, index;
3045 
3046 	vector |= IXGBE_IVAR_ALLOC_VAL;
3047 
3048 	switch (hw->mac.type) {
3049 	case ixgbe_mac_82598EB:
3050 		if (type == -1)
3051 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3052 		else
3053 			entry += (type * 64);
3054 		index = (entry >> 2) & 0x1F;
3055 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3056 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3057 		ivar |= (vector << (8 * (entry & 0x3)));
3058 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3059 		break;
3060 	case ixgbe_mac_82599EB:
3061 	case ixgbe_mac_X540:
3062 	case ixgbe_mac_X550:
3063 	case ixgbe_mac_X550EM_x:
3064 	case ixgbe_mac_X550EM_a:
3065 		if (type == -1) { /* MISC IVAR */
3066 			index = (entry & 1) * 8;
3067 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3068 			ivar &= ~(0xFF << index);
3069 			ivar |= (vector << index);
3070 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3071 		} else {          /* RX/TX IVARS */
3072 			index = (16 * (entry & 1)) + (8 * type);
3073 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3074 			ivar &= ~(0xFF << index);
3075 			ivar |= (vector << index);
3076 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3077 		}
3078 	default:
3079 		break;
3080 	}
3081 } /* ixgbe_set_ivar */
3082 
3083 /************************************************************************
3084  * ixgbe_configure_ivars
3085  ************************************************************************/
3086 static void
3087 ixgbe_configure_ivars(struct adapter *adapter)
3088 {
3089 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3090 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3091 	u32                newitr;
3092 
3093 	if (ixgbe_max_interrupt_rate > 0)
3094 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3095 	else {
3096 		/*
3097 		 * Disable DMA coalescing if interrupt moderation is
3098 		 * disabled.
3099 		 */
3100 		adapter->dmac = 0;
3101 		newitr = 0;
3102 	}
3103 
3104 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3105 		struct rx_ring *rxr = &rx_que->rxr;
3106 
3107 		/* First the RX queue entry */
3108 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3109 
3110 		/* Set an Initial EITR value */
3111 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3112 	}
3113 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3114 		struct tx_ring *txr = &tx_que->txr;
3115 
3116 		/* ... and the TX */
3117 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3118 	}
3119 	/* For the Link interrupt */
3120 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3121 } /* ixgbe_configure_ivars */
3122 
3123 /************************************************************************
3124  * ixgbe_config_gpie
3125  ************************************************************************/
3126 static void
3127 ixgbe_config_gpie(struct adapter *adapter)
3128 {
3129 	struct ixgbe_hw *hw = &adapter->hw;
3130 	u32             gpie;
3131 
3132 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3133 
3134 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3135 		/* Enable Enhanced MSI-X mode */
3136 		gpie |= IXGBE_GPIE_MSIX_MODE
3137 		     |  IXGBE_GPIE_EIAME
3138 		     |  IXGBE_GPIE_PBA_SUPPORT
3139 		     |  IXGBE_GPIE_OCD;
3140 	}
3141 
3142 	/* Fan Failure Interrupt */
3143 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3144 		gpie |= IXGBE_SDP1_GPIEN;
3145 
3146 	/* Thermal Sensor Interrupt */
3147 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3148 		gpie |= IXGBE_SDP0_GPIEN_X540;
3149 
3150 	/* Link detection */
3151 	switch (hw->mac.type) {
3152 	case ixgbe_mac_82599EB:
3153 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3154 		break;
3155 	case ixgbe_mac_X550EM_x:
3156 	case ixgbe_mac_X550EM_a:
3157 		gpie |= IXGBE_SDP0_GPIEN_X540;
3158 		break;
3159 	default:
3160 		break;
3161 	}
3162 
3163 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3164 
3165 } /* ixgbe_config_gpie */
3166 
3167 /************************************************************************
3168  * ixgbe_config_delay_values
3169  *
3170  *   Requires adapter->max_frame_size to be set.
3171  ************************************************************************/
3172 static void
3173 ixgbe_config_delay_values(struct adapter *adapter)
3174 {
3175 	struct ixgbe_hw *hw = &adapter->hw;
3176 	u32             rxpb, frame, size, tmp;
3177 
3178 	frame = adapter->max_frame_size;
3179 
3180 	/* Calculate High Water */
3181 	switch (hw->mac.type) {
3182 	case ixgbe_mac_X540:
3183 	case ixgbe_mac_X550:
3184 	case ixgbe_mac_X550EM_x:
3185 	case ixgbe_mac_X550EM_a:
3186 		tmp = IXGBE_DV_X540(frame, frame);
3187 		break;
3188 	default:
3189 		tmp = IXGBE_DV(frame, frame);
3190 		break;
3191 	}
3192 	size = IXGBE_BT2KB(tmp);
3193 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3194 	hw->fc.high_water[0] = rxpb - size;
3195 
3196 	/* Now calculate Low Water */
3197 	switch (hw->mac.type) {
3198 	case ixgbe_mac_X540:
3199 	case ixgbe_mac_X550:
3200 	case ixgbe_mac_X550EM_x:
3201 	case ixgbe_mac_X550EM_a:
3202 		tmp = IXGBE_LOW_DV_X540(frame);
3203 		break;
3204 	default:
3205 		tmp = IXGBE_LOW_DV(frame);
3206 		break;
3207 	}
3208 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3209 
3210 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3211 	hw->fc.send_xon = TRUE;
3212 } /* ixgbe_config_delay_values */
3213 
3214 /************************************************************************
3215  * ixgbe_set_multi - Multicast Update
3216  *
3217  *   Called whenever multicast address list is updated.
3218  ************************************************************************/
3219 static u_int
3220 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count)
3221 {
3222 	struct adapter *adapter = arg;
3223 	struct ixgbe_mc_addr *mta = adapter->mta;
3224 
3225 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3226 		return (0);
3227 	bcopy(LLADDR(sdl), mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3228 	mta[count].vmdq = adapter->pool;
3229 
3230 	return (1);
3231 } /* ixgbe_mc_filter_apply */
3232 
3233 static void
3234 ixgbe_if_multi_set(if_ctx_t ctx)
3235 {
3236 	struct adapter       *adapter = iflib_get_softc(ctx);
3237 	struct ixgbe_mc_addr *mta;
3238 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3239 	u8                   *update_ptr;
3240 	u32                  fctrl;
3241 	u_int		     mcnt;
3242 
3243 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3244 
3245 	mta = adapter->mta;
3246 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3247 
3248 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3249 	    adapter);
3250 
3251 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3252 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3253 	if (ifp->if_flags & IFF_PROMISC)
3254 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3255 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3256 	    ifp->if_flags & IFF_ALLMULTI) {
3257 		fctrl |= IXGBE_FCTRL_MPE;
3258 		fctrl &= ~IXGBE_FCTRL_UPE;
3259 	} else
3260 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3261 
3262 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3263 
3264 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3265 		update_ptr = (u8 *)mta;
3266 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3267 		    ixgbe_mc_array_itr, TRUE);
3268 	}
3269 
3270 } /* ixgbe_if_multi_set */
3271 
3272 /************************************************************************
3273  * ixgbe_mc_array_itr
3274  *
3275  *   An iterator function needed by the multicast shared code.
3276  *   It feeds the shared code routine the addresses in the
3277  *   array of ixgbe_set_multi() one by one.
3278  ************************************************************************/
3279 static u8 *
3280 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3281 {
3282 	struct ixgbe_mc_addr *mta;
3283 
3284 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3285 	*vmdq = mta->vmdq;
3286 
3287 	*update_ptr = (u8*)(mta + 1);
3288 
3289 	return (mta->addr);
3290 } /* ixgbe_mc_array_itr */
3291 
3292 /************************************************************************
3293  * ixgbe_local_timer - Timer routine
3294  *
3295  *   Checks for link status, updates statistics,
3296  *   and runs the watchdog check.
3297  ************************************************************************/
3298 static void
3299 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3300 {
3301 	struct adapter *adapter = iflib_get_softc(ctx);
3302 
3303 	if (qid != 0)
3304 		return;
3305 
3306 	/* Check for pluggable optics */
3307 	if (adapter->sfp_probe)
3308 		if (!ixgbe_sfp_probe(ctx))
3309 			return; /* Nothing to do */
3310 
3311 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3312 	    &adapter->link_up, 0);
3313 
3314 	/* Fire off the adminq task */
3315 	iflib_admin_intr_deferred(ctx);
3316 
3317 } /* ixgbe_if_timer */
3318 
3319 /************************************************************************
3320  * ixgbe_sfp_probe
3321  *
3322  *   Determine if a port had optics inserted.
3323  ************************************************************************/
3324 static bool
3325 ixgbe_sfp_probe(if_ctx_t ctx)
3326 {
3327 	struct adapter  *adapter = iflib_get_softc(ctx);
3328 	struct ixgbe_hw *hw = &adapter->hw;
3329 	device_t        dev = iflib_get_dev(ctx);
3330 	bool            result = FALSE;
3331 
3332 	if ((hw->phy.type == ixgbe_phy_nl) &&
3333 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3334 		s32 ret = hw->phy.ops.identify_sfp(hw);
3335 		if (ret)
3336 			goto out;
3337 		ret = hw->phy.ops.reset(hw);
3338 		adapter->sfp_probe = FALSE;
3339 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3340 			device_printf(dev, "Unsupported SFP+ module detected!");
3341 			device_printf(dev,
3342 			    "Reload driver with supported module.\n");
3343 			goto out;
3344 		} else
3345 			device_printf(dev, "SFP+ module detected!\n");
3346 		/* We now have supported optics */
3347 		result = TRUE;
3348 	}
3349 out:
3350 
3351 	return (result);
3352 } /* ixgbe_sfp_probe */
3353 
3354 /************************************************************************
3355  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3356  ************************************************************************/
3357 static void
3358 ixgbe_handle_mod(void *context)
3359 {
3360 	if_ctx_t        ctx = context;
3361 	struct adapter  *adapter = iflib_get_softc(ctx);
3362 	struct ixgbe_hw *hw = &adapter->hw;
3363 	device_t        dev = iflib_get_dev(ctx);
3364 	u32             err, cage_full = 0;
3365 
3366 	if (adapter->hw.need_crosstalk_fix) {
3367 		switch (hw->mac.type) {
3368 		case ixgbe_mac_82599EB:
3369 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3370 			    IXGBE_ESDP_SDP2;
3371 			break;
3372 		case ixgbe_mac_X550EM_x:
3373 		case ixgbe_mac_X550EM_a:
3374 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3375 			    IXGBE_ESDP_SDP0;
3376 			break;
3377 		default:
3378 			break;
3379 		}
3380 
3381 		if (!cage_full)
3382 			goto handle_mod_out;
3383 	}
3384 
3385 	err = hw->phy.ops.identify_sfp(hw);
3386 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3387 		device_printf(dev,
3388 		    "Unsupported SFP+ module type was detected.\n");
3389 		goto handle_mod_out;
3390 	}
3391 
3392 	if (hw->mac.type == ixgbe_mac_82598EB)
3393 		err = hw->phy.ops.reset(hw);
3394 	else
3395 		err = hw->mac.ops.setup_sfp(hw);
3396 
3397 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3398 		device_printf(dev,
3399 		    "Setup failure - unsupported SFP+ module type.\n");
3400 		goto handle_mod_out;
3401 	}
3402 	adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3403 	return;
3404 
3405 handle_mod_out:
3406 	adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3407 } /* ixgbe_handle_mod */
3408 
3409 
3410 /************************************************************************
3411  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3412  ************************************************************************/
3413 static void
3414 ixgbe_handle_msf(void *context)
3415 {
3416 	if_ctx_t        ctx = context;
3417 	struct adapter  *adapter = iflib_get_softc(ctx);
3418 	struct ixgbe_hw *hw = &adapter->hw;
3419 	u32             autoneg;
3420 	bool            negotiate;
3421 
3422 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3423 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3424 
3425 	autoneg = hw->phy.autoneg_advertised;
3426 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3427 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3428 	if (hw->mac.ops.setup_link)
3429 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3430 
3431 	/* Adjust media types shown in ifconfig */
3432 	ifmedia_removeall(adapter->media);
3433 	ixgbe_add_media_types(adapter->ctx);
3434 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3435 } /* ixgbe_handle_msf */
3436 
3437 /************************************************************************
3438  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3439  ************************************************************************/
3440 static void
3441 ixgbe_handle_phy(void *context)
3442 {
3443 	if_ctx_t        ctx = context;
3444 	struct adapter  *adapter = iflib_get_softc(ctx);
3445 	struct ixgbe_hw *hw = &adapter->hw;
3446 	int             error;
3447 
3448 	error = hw->phy.ops.handle_lasi(hw);
3449 	if (error == IXGBE_ERR_OVERTEMP)
3450 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3451 	else if (error)
3452 		device_printf(adapter->dev,
3453 		    "Error handling LASI interrupt: %d\n", error);
3454 } /* ixgbe_handle_phy */
3455 
3456 /************************************************************************
3457  * ixgbe_if_stop - Stop the hardware
3458  *
3459  *   Disables all traffic on the adapter by issuing a
3460  *   global reset on the MAC and deallocates TX/RX buffers.
3461  ************************************************************************/
3462 static void
3463 ixgbe_if_stop(if_ctx_t ctx)
3464 {
3465 	struct adapter  *adapter = iflib_get_softc(ctx);
3466 	struct ixgbe_hw *hw = &adapter->hw;
3467 
3468 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3469 
3470 	ixgbe_reset_hw(hw);
3471 	hw->adapter_stopped = FALSE;
3472 	ixgbe_stop_adapter(hw);
3473 	if (hw->mac.type == ixgbe_mac_82599EB)
3474 		ixgbe_stop_mac_link_on_d3_82599(hw);
3475 	/* Turn off the laser - noop with no optics */
3476 	ixgbe_disable_tx_laser(hw);
3477 
3478 	/* Update the stack */
3479 	adapter->link_up = FALSE;
3480 	ixgbe_if_update_admin_status(ctx);
3481 
3482 	/* reprogram the RAR[0] in case user changed it. */
3483 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3484 
3485 	return;
3486 } /* ixgbe_if_stop */
3487 
3488 /************************************************************************
3489  * ixgbe_update_link_status - Update OS on link state
3490  *
3491  * Note: Only updates the OS on the cached link state.
3492  *       The real check of the hardware only happens with
3493  *       a link interrupt.
3494  ************************************************************************/
3495 static void
3496 ixgbe_if_update_admin_status(if_ctx_t ctx)
3497 {
3498 	struct adapter *adapter = iflib_get_softc(ctx);
3499 	device_t       dev = iflib_get_dev(ctx);
3500 
3501 	if (adapter->link_up) {
3502 		if (adapter->link_active == FALSE) {
3503 			if (bootverbose)
3504 				device_printf(dev, "Link is up %d Gbps %s \n",
3505 				    ((adapter->link_speed == 128) ? 10 : 1),
3506 				    "Full Duplex");
3507 			adapter->link_active = TRUE;
3508 			/* Update any Flow Control changes */
3509 			ixgbe_fc_enable(&adapter->hw);
3510 			/* Update DMA coalescing config */
3511 			ixgbe_config_dmac(adapter);
3512 			/* should actually be negotiated value */
3513 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3514 
3515 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3516 				ixgbe_ping_all_vfs(adapter);
3517 		}
3518 	} else { /* Link down */
3519 		if (adapter->link_active == TRUE) {
3520 			if (bootverbose)
3521 				device_printf(dev, "Link is Down\n");
3522 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3523 			adapter->link_active = FALSE;
3524 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3525 				ixgbe_ping_all_vfs(adapter);
3526 		}
3527 	}
3528 
3529 	/* Handle task requests from msix_link() */
3530 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3531 		ixgbe_handle_mod(ctx);
3532 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3533 		ixgbe_handle_msf(ctx);
3534 	if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3535 		ixgbe_handle_mbx(ctx);
3536 	if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3537 		ixgbe_reinit_fdir(ctx);
3538 	if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3539 		ixgbe_handle_phy(ctx);
3540 	adapter->task_requests = 0;
3541 
3542 	ixgbe_update_stats_counters(adapter);
3543 } /* ixgbe_if_update_admin_status */
3544 
3545 /************************************************************************
3546  * ixgbe_config_dmac - Configure DMA Coalescing
3547  ************************************************************************/
3548 static void
3549 ixgbe_config_dmac(struct adapter *adapter)
3550 {
3551 	struct ixgbe_hw          *hw = &adapter->hw;
3552 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3553 
3554 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3555 		return;
3556 
3557 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3558 	    dcfg->link_speed ^ adapter->link_speed) {
3559 		dcfg->watchdog_timer = adapter->dmac;
3560 		dcfg->fcoe_en = FALSE;
3561 		dcfg->link_speed = adapter->link_speed;
3562 		dcfg->num_tcs = 1;
3563 
3564 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3565 		    dcfg->watchdog_timer, dcfg->link_speed);
3566 
3567 		hw->mac.ops.dmac_config(hw);
3568 	}
3569 } /* ixgbe_config_dmac */
3570 
3571 /************************************************************************
3572  * ixgbe_if_enable_intr
3573  ************************************************************************/
3574 void
3575 ixgbe_if_enable_intr(if_ctx_t ctx)
3576 {
3577 	struct adapter     *adapter = iflib_get_softc(ctx);
3578 	struct ixgbe_hw    *hw = &adapter->hw;
3579 	struct ix_rx_queue *que = adapter->rx_queues;
3580 	u32                mask, fwsm;
3581 
3582 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3583 
3584 	switch (adapter->hw.mac.type) {
3585 	case ixgbe_mac_82599EB:
3586 		mask |= IXGBE_EIMS_ECC;
3587 		/* Temperature sensor on some adapters */
3588 		mask |= IXGBE_EIMS_GPI_SDP0;
3589 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3590 		mask |= IXGBE_EIMS_GPI_SDP1;
3591 		mask |= IXGBE_EIMS_GPI_SDP2;
3592 		break;
3593 	case ixgbe_mac_X540:
3594 		/* Detect if Thermal Sensor is enabled */
3595 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3596 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3597 			mask |= IXGBE_EIMS_TS;
3598 		mask |= IXGBE_EIMS_ECC;
3599 		break;
3600 	case ixgbe_mac_X550:
3601 		/* MAC thermal sensor is automatically enabled */
3602 		mask |= IXGBE_EIMS_TS;
3603 		mask |= IXGBE_EIMS_ECC;
3604 		break;
3605 	case ixgbe_mac_X550EM_x:
3606 	case ixgbe_mac_X550EM_a:
3607 		/* Some devices use SDP0 for important information */
3608 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3609 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3610 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3611 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3612 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3613 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3614 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3615 		mask |= IXGBE_EIMS_ECC;
3616 		break;
3617 	default:
3618 		break;
3619 	}
3620 
3621 	/* Enable Fan Failure detection */
3622 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3623 		mask |= IXGBE_EIMS_GPI_SDP1;
3624 	/* Enable SR-IOV */
3625 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3626 		mask |= IXGBE_EIMS_MAILBOX;
3627 	/* Enable Flow Director */
3628 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3629 		mask |= IXGBE_EIMS_FLOW_DIR;
3630 
3631 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3632 
3633 	/* With MSI-X we use auto clear */
3634 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3635 		mask = IXGBE_EIMS_ENABLE_MASK;
3636 		/* Don't autoclear Link */
3637 		mask &= ~IXGBE_EIMS_OTHER;
3638 		mask &= ~IXGBE_EIMS_LSC;
3639 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3640 			mask &= ~IXGBE_EIMS_MAILBOX;
3641 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3642 	}
3643 
3644 	/*
3645 	 * Now enable all queues, this is done separately to
3646 	 * allow for handling the extended (beyond 32) MSI-X
3647 	 * vectors that can be used by 82599
3648 	 */
3649 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3650 		ixgbe_enable_queue(adapter, que->msix);
3651 
3652 	IXGBE_WRITE_FLUSH(hw);
3653 
3654 } /* ixgbe_if_enable_intr */
3655 
3656 /************************************************************************
3657  * ixgbe_disable_intr
3658  ************************************************************************/
3659 static void
3660 ixgbe_if_disable_intr(if_ctx_t ctx)
3661 {
3662 	struct adapter *adapter = iflib_get_softc(ctx);
3663 
3664 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3665 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3666 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3667 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3668 	} else {
3669 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3670 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3671 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3672 	}
3673 	IXGBE_WRITE_FLUSH(&adapter->hw);
3674 
3675 } /* ixgbe_if_disable_intr */
3676 
3677 /************************************************************************
3678  * ixgbe_link_intr_enable
3679  ************************************************************************/
3680 static void
3681 ixgbe_link_intr_enable(if_ctx_t ctx)
3682 {
3683 	struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw;
3684 
3685 	/* Re-enable other interrupts */
3686 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3687 } /* ixgbe_link_intr_enable */
3688 
3689 /************************************************************************
3690  * ixgbe_if_rx_queue_intr_enable
3691  ************************************************************************/
3692 static int
3693 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3694 {
3695 	struct adapter     *adapter = iflib_get_softc(ctx);
3696 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3697 
3698 	ixgbe_enable_queue(adapter, que->msix);
3699 
3700 	return (0);
3701 } /* ixgbe_if_rx_queue_intr_enable */
3702 
3703 /************************************************************************
3704  * ixgbe_enable_queue
3705  ************************************************************************/
3706 static void
3707 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3708 {
3709 	struct ixgbe_hw *hw = &adapter->hw;
3710 	u64             queue = 1ULL << vector;
3711 	u32             mask;
3712 
3713 	if (hw->mac.type == ixgbe_mac_82598EB) {
3714 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3715 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3716 	} else {
3717 		mask = (queue & 0xFFFFFFFF);
3718 		if (mask)
3719 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3720 		mask = (queue >> 32);
3721 		if (mask)
3722 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3723 	}
3724 } /* ixgbe_enable_queue */
3725 
3726 /************************************************************************
3727  * ixgbe_disable_queue
3728  ************************************************************************/
3729 static void
3730 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3731 {
3732 	struct ixgbe_hw *hw = &adapter->hw;
3733 	u64             queue = 1ULL << vector;
3734 	u32             mask;
3735 
3736 	if (hw->mac.type == ixgbe_mac_82598EB) {
3737 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3738 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3739 	} else {
3740 		mask = (queue & 0xFFFFFFFF);
3741 		if (mask)
3742 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3743 		mask = (queue >> 32);
3744 		if (mask)
3745 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3746 	}
3747 } /* ixgbe_disable_queue */
3748 
3749 /************************************************************************
3750  * ixgbe_intr - Legacy Interrupt Service Routine
3751  ************************************************************************/
3752 int
3753 ixgbe_intr(void *arg)
3754 {
3755 	struct adapter     *adapter = arg;
3756 	struct ix_rx_queue *que = adapter->rx_queues;
3757 	struct ixgbe_hw    *hw = &adapter->hw;
3758 	if_ctx_t           ctx = adapter->ctx;
3759 	u32                eicr, eicr_mask;
3760 
3761 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3762 
3763 	++que->irqs;
3764 	if (eicr == 0) {
3765 		ixgbe_if_enable_intr(ctx);
3766 		return (FILTER_HANDLED);
3767 	}
3768 
3769 	/* Check for fan failure */
3770 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3771 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3772 		device_printf(adapter->dev,
3773 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3774 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3775 	}
3776 
3777 	/* Link status change */
3778 	if (eicr & IXGBE_EICR_LSC) {
3779 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3780 		iflib_admin_intr_deferred(ctx);
3781 	}
3782 
3783 	if (ixgbe_is_sfp(hw)) {
3784 		/* Pluggable optics-related interrupt */
3785 		if (hw->mac.type >= ixgbe_mac_X540)
3786 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3787 		else
3788 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3789 
3790 		if (eicr & eicr_mask) {
3791 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3792 			adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3793 		}
3794 
3795 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3796 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3797 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3798 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3799 			adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3800 		}
3801 	}
3802 
3803 	/* External PHY interrupt */
3804 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3805 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3806 		adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3807 
3808 	return (FILTER_SCHEDULE_THREAD);
3809 } /* ixgbe_intr */
3810 
3811 /************************************************************************
3812  * ixgbe_free_pci_resources
3813  ************************************************************************/
3814 static void
3815 ixgbe_free_pci_resources(if_ctx_t ctx)
3816 {
3817 	struct adapter *adapter = iflib_get_softc(ctx);
3818 	struct         ix_rx_queue *que = adapter->rx_queues;
3819 	device_t       dev = iflib_get_dev(ctx);
3820 
3821 	/* Release all MSI-X queue resources */
3822 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3823 		iflib_irq_free(ctx, &adapter->irq);
3824 
3825 	if (que != NULL) {
3826 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3827 			iflib_irq_free(ctx, &que->que_irq);
3828 		}
3829 	}
3830 
3831 	if (adapter->pci_mem != NULL)
3832 		bus_release_resource(dev, SYS_RES_MEMORY,
3833 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
3834 } /* ixgbe_free_pci_resources */
3835 
3836 /************************************************************************
3837  * ixgbe_sysctl_flowcntl
3838  *
3839  *   SYSCTL wrapper around setting Flow Control
3840  ************************************************************************/
3841 static int
3842 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3843 {
3844 	struct adapter *adapter;
3845 	int            error, fc;
3846 
3847 	adapter = (struct adapter *)arg1;
3848 	fc = adapter->hw.fc.current_mode;
3849 
3850 	error = sysctl_handle_int(oidp, &fc, 0, req);
3851 	if ((error) || (req->newptr == NULL))
3852 		return (error);
3853 
3854 	/* Don't bother if it's not changed */
3855 	if (fc == adapter->hw.fc.current_mode)
3856 		return (0);
3857 
3858 	return ixgbe_set_flowcntl(adapter, fc);
3859 } /* ixgbe_sysctl_flowcntl */
3860 
3861 /************************************************************************
3862  * ixgbe_set_flowcntl - Set flow control
3863  *
3864  *   Flow control values:
3865  *     0 - off
3866  *     1 - rx pause
3867  *     2 - tx pause
3868  *     3 - full
3869  ************************************************************************/
3870 static int
3871 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3872 {
3873 	switch (fc) {
3874 	case ixgbe_fc_rx_pause:
3875 	case ixgbe_fc_tx_pause:
3876 	case ixgbe_fc_full:
3877 		adapter->hw.fc.requested_mode = fc;
3878 		if (adapter->num_rx_queues > 1)
3879 			ixgbe_disable_rx_drop(adapter);
3880 		break;
3881 	case ixgbe_fc_none:
3882 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3883 		if (adapter->num_rx_queues > 1)
3884 			ixgbe_enable_rx_drop(adapter);
3885 		break;
3886 	default:
3887 		return (EINVAL);
3888 	}
3889 
3890 	/* Don't autoneg if forcing a value */
3891 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3892 	ixgbe_fc_enable(&adapter->hw);
3893 
3894 	return (0);
3895 } /* ixgbe_set_flowcntl */
3896 
3897 /************************************************************************
3898  * ixgbe_enable_rx_drop
3899  *
3900  *   Enable the hardware to drop packets when the buffer is
3901  *   full. This is useful with multiqueue, so that no single
3902  *   queue being full stalls the entire RX engine. We only
3903  *   enable this when Multiqueue is enabled AND Flow Control
3904  *   is disabled.
3905  ************************************************************************/
3906 static void
3907 ixgbe_enable_rx_drop(struct adapter *adapter)
3908 {
3909 	struct ixgbe_hw *hw = &adapter->hw;
3910 	struct rx_ring  *rxr;
3911 	u32             srrctl;
3912 
3913 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3914 		rxr = &adapter->rx_queues[i].rxr;
3915 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3916 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3917 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3918 	}
3919 
3920 	/* enable drop for each vf */
3921 	for (int i = 0; i < adapter->num_vfs; i++) {
3922 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3923 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3924 		                IXGBE_QDE_ENABLE));
3925 	}
3926 } /* ixgbe_enable_rx_drop */
3927 
3928 /************************************************************************
3929  * ixgbe_disable_rx_drop
3930  ************************************************************************/
3931 static void
3932 ixgbe_disable_rx_drop(struct adapter *adapter)
3933 {
3934 	struct ixgbe_hw *hw = &adapter->hw;
3935 	struct rx_ring  *rxr;
3936 	u32             srrctl;
3937 
3938 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3939 		rxr = &adapter->rx_queues[i].rxr;
3940 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3941 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3942 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3943 	}
3944 
3945 	/* disable drop for each vf */
3946 	for (int i = 0; i < adapter->num_vfs; i++) {
3947 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3948 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3949 	}
3950 } /* ixgbe_disable_rx_drop */
3951 
3952 /************************************************************************
3953  * ixgbe_sysctl_advertise
3954  *
3955  *   SYSCTL wrapper around setting advertised speed
3956  ************************************************************************/
3957 static int
3958 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3959 {
3960 	struct adapter *adapter;
3961 	int            error, advertise;
3962 
3963 	adapter = (struct adapter *)arg1;
3964 	advertise = adapter->advertise;
3965 
3966 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3967 	if ((error) || (req->newptr == NULL))
3968 		return (error);
3969 
3970 	return ixgbe_set_advertise(adapter, advertise);
3971 } /* ixgbe_sysctl_advertise */
3972 
3973 /************************************************************************
3974  * ixgbe_set_advertise - Control advertised link speed
3975  *
3976  *   Flags:
3977  *     0x1 - advertise 100 Mb
3978  *     0x2 - advertise 1G
3979  *     0x4 - advertise 10G
3980  *     0x8 - advertise 10 Mb (yes, Mb)
3981  ************************************************************************/
3982 static int
3983 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3984 {
3985 	device_t         dev = iflib_get_dev(adapter->ctx);
3986 	struct ixgbe_hw  *hw;
3987 	ixgbe_link_speed speed = 0;
3988 	ixgbe_link_speed link_caps = 0;
3989 	s32              err = IXGBE_NOT_IMPLEMENTED;
3990 	bool             negotiate = FALSE;
3991 
3992 	/* Checks to validate new value */
3993 	if (adapter->advertise == advertise) /* no change */
3994 		return (0);
3995 
3996 	hw = &adapter->hw;
3997 
3998 	/* No speed changes for backplane media */
3999 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4000 		return (ENODEV);
4001 
4002 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4003 	      (hw->phy.multispeed_fiber))) {
4004 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4005 		return (EINVAL);
4006 	}
4007 
4008 	if (advertise < 0x1 || advertise > 0xF) {
4009 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4010 		return (EINVAL);
4011 	}
4012 
4013 	if (hw->mac.ops.get_link_capabilities) {
4014 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4015 		    &negotiate);
4016 		if (err != IXGBE_SUCCESS) {
4017 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4018 			return (ENODEV);
4019 		}
4020 	}
4021 
4022 	/* Set new value and report new advertised mode */
4023 	if (advertise & 0x1) {
4024 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4025 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4026 			return (EINVAL);
4027 		}
4028 		speed |= IXGBE_LINK_SPEED_100_FULL;
4029 	}
4030 	if (advertise & 0x2) {
4031 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4032 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4033 			return (EINVAL);
4034 		}
4035 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4036 	}
4037 	if (advertise & 0x4) {
4038 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4039 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4040 			return (EINVAL);
4041 		}
4042 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4043 	}
4044 	if (advertise & 0x8) {
4045 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4046 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4047 			return (EINVAL);
4048 		}
4049 		speed |= IXGBE_LINK_SPEED_10_FULL;
4050 	}
4051 
4052 	hw->mac.autotry_restart = TRUE;
4053 	hw->mac.ops.setup_link(hw, speed, TRUE);
4054 	adapter->advertise = advertise;
4055 
4056 	return (0);
4057 } /* ixgbe_set_advertise */
4058 
4059 /************************************************************************
4060  * ixgbe_get_advertise - Get current advertised speed settings
4061  *
4062  *   Formatted for sysctl usage.
4063  *   Flags:
4064  *     0x1 - advertise 100 Mb
4065  *     0x2 - advertise 1G
4066  *     0x4 - advertise 10G
4067  *     0x8 - advertise 10 Mb (yes, Mb)
4068  ************************************************************************/
4069 static int
4070 ixgbe_get_advertise(struct adapter *adapter)
4071 {
4072 	struct ixgbe_hw  *hw = &adapter->hw;
4073 	int              speed;
4074 	ixgbe_link_speed link_caps = 0;
4075 	s32              err;
4076 	bool             negotiate = FALSE;
4077 
4078 	/*
4079 	 * Advertised speed means nothing unless it's copper or
4080 	 * multi-speed fiber
4081 	 */
4082 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4083 	    !(hw->phy.multispeed_fiber))
4084 		return (0);
4085 
4086 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4087 	if (err != IXGBE_SUCCESS)
4088 		return (0);
4089 
4090 	speed =
4091 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4092 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4093 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4094 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4095 
4096 	return speed;
4097 } /* ixgbe_get_advertise */
4098 
4099 /************************************************************************
4100  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4101  *
4102  *   Control values:
4103  *     0/1 - off / on (use default value of 1000)
4104  *
4105  *     Legal timer values are:
4106  *     50,100,250,500,1000,2000,5000,10000
4107  *
4108  *     Turning off interrupt moderation will also turn this off.
4109  ************************************************************************/
4110 static int
4111 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4112 {
4113 	struct adapter *adapter = (struct adapter *)arg1;
4114 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4115 	int            error;
4116 	u16            newval;
4117 
4118 	newval = adapter->dmac;
4119 	error = sysctl_handle_16(oidp, &newval, 0, req);
4120 	if ((error) || (req->newptr == NULL))
4121 		return (error);
4122 
4123 	switch (newval) {
4124 	case 0:
4125 		/* Disabled */
4126 		adapter->dmac = 0;
4127 		break;
4128 	case 1:
4129 		/* Enable and use default */
4130 		adapter->dmac = 1000;
4131 		break;
4132 	case 50:
4133 	case 100:
4134 	case 250:
4135 	case 500:
4136 	case 1000:
4137 	case 2000:
4138 	case 5000:
4139 	case 10000:
4140 		/* Legal values - allow */
4141 		adapter->dmac = newval;
4142 		break;
4143 	default:
4144 		/* Do nothing, illegal value */
4145 		return (EINVAL);
4146 	}
4147 
4148 	/* Re-initialize hardware if it's already running */
4149 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4150 		ifp->if_init(ifp);
4151 
4152 	return (0);
4153 } /* ixgbe_sysctl_dmac */
4154 
4155 #ifdef IXGBE_DEBUG
4156 /************************************************************************
4157  * ixgbe_sysctl_power_state
4158  *
4159  *   Sysctl to test power states
4160  *   Values:
4161  *     0      - set device to D0
4162  *     3      - set device to D3
4163  *     (none) - get current device power state
4164  ************************************************************************/
4165 static int
4166 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4167 {
4168 	struct adapter *adapter = (struct adapter *)arg1;
4169 	device_t       dev = adapter->dev;
4170 	int            curr_ps, new_ps, error = 0;
4171 
4172 	curr_ps = new_ps = pci_get_powerstate(dev);
4173 
4174 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4175 	if ((error) || (req->newptr == NULL))
4176 		return (error);
4177 
4178 	if (new_ps == curr_ps)
4179 		return (0);
4180 
4181 	if (new_ps == 3 && curr_ps == 0)
4182 		error = DEVICE_SUSPEND(dev);
4183 	else if (new_ps == 0 && curr_ps == 3)
4184 		error = DEVICE_RESUME(dev);
4185 	else
4186 		return (EINVAL);
4187 
4188 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4189 
4190 	return (error);
4191 } /* ixgbe_sysctl_power_state */
4192 #endif
4193 
4194 /************************************************************************
4195  * ixgbe_sysctl_wol_enable
4196  *
4197  *   Sysctl to enable/disable the WoL capability,
4198  *   if supported by the adapter.
4199  *
4200  *   Values:
4201  *     0 - disabled
4202  *     1 - enabled
4203  ************************************************************************/
4204 static int
4205 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4206 {
4207 	struct adapter  *adapter = (struct adapter *)arg1;
4208 	struct ixgbe_hw *hw = &adapter->hw;
4209 	int             new_wol_enabled;
4210 	int             error = 0;
4211 
4212 	new_wol_enabled = hw->wol_enabled;
4213 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4214 	if ((error) || (req->newptr == NULL))
4215 		return (error);
4216 	new_wol_enabled = !!(new_wol_enabled);
4217 	if (new_wol_enabled == hw->wol_enabled)
4218 		return (0);
4219 
4220 	if (new_wol_enabled > 0 && !adapter->wol_support)
4221 		return (ENODEV);
4222 	else
4223 		hw->wol_enabled = new_wol_enabled;
4224 
4225 	return (0);
4226 } /* ixgbe_sysctl_wol_enable */
4227 
4228 /************************************************************************
4229  * ixgbe_sysctl_wufc - Wake Up Filter Control
4230  *
4231  *   Sysctl to enable/disable the types of packets that the
4232  *   adapter will wake up on upon receipt.
4233  *   Flags:
4234  *     0x1  - Link Status Change
4235  *     0x2  - Magic Packet
4236  *     0x4  - Direct Exact
4237  *     0x8  - Directed Multicast
4238  *     0x10 - Broadcast
4239  *     0x20 - ARP/IPv4 Request Packet
4240  *     0x40 - Direct IPv4 Packet
4241  *     0x80 - Direct IPv6 Packet
4242  *
4243  *   Settings not listed above will cause the sysctl to return an error.
4244  ************************************************************************/
4245 static int
4246 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4247 {
4248 	struct adapter *adapter = (struct adapter *)arg1;
4249 	int            error = 0;
4250 	u32            new_wufc;
4251 
4252 	new_wufc = adapter->wufc;
4253 
4254 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4255 	if ((error) || (req->newptr == NULL))
4256 		return (error);
4257 	if (new_wufc == adapter->wufc)
4258 		return (0);
4259 
4260 	if (new_wufc & 0xffffff00)
4261 		return (EINVAL);
4262 
4263 	new_wufc &= 0xff;
4264 	new_wufc |= (0xffffff & adapter->wufc);
4265 	adapter->wufc = new_wufc;
4266 
4267 	return (0);
4268 } /* ixgbe_sysctl_wufc */
4269 
4270 #ifdef IXGBE_DEBUG
4271 /************************************************************************
4272  * ixgbe_sysctl_print_rss_config
4273  ************************************************************************/
4274 static int
4275 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4276 {
4277 	struct adapter  *adapter = (struct adapter *)arg1;
4278 	struct ixgbe_hw *hw = &adapter->hw;
4279 	device_t        dev = adapter->dev;
4280 	struct sbuf     *buf;
4281 	int             error = 0, reta_size;
4282 	u32             reg;
4283 
4284 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4285 	if (!buf) {
4286 		device_printf(dev, "Could not allocate sbuf for output.\n");
4287 		return (ENOMEM);
4288 	}
4289 
4290 	// TODO: use sbufs to make a string to print out
4291 	/* Set multiplier for RETA setup and table size based on MAC */
4292 	switch (adapter->hw.mac.type) {
4293 	case ixgbe_mac_X550:
4294 	case ixgbe_mac_X550EM_x:
4295 	case ixgbe_mac_X550EM_a:
4296 		reta_size = 128;
4297 		break;
4298 	default:
4299 		reta_size = 32;
4300 		break;
4301 	}
4302 
4303 	/* Print out the redirection table */
4304 	sbuf_cat(buf, "\n");
4305 	for (int i = 0; i < reta_size; i++) {
4306 		if (i < 32) {
4307 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4308 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4309 		} else {
4310 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4311 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4312 		}
4313 	}
4314 
4315 	// TODO: print more config
4316 
4317 	error = sbuf_finish(buf);
4318 	if (error)
4319 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4320 
4321 	sbuf_delete(buf);
4322 
4323 	return (0);
4324 } /* ixgbe_sysctl_print_rss_config */
4325 #endif /* IXGBE_DEBUG */
4326 
4327 /************************************************************************
4328  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4329  *
4330  *   For X552/X557-AT devices using an external PHY
4331  ************************************************************************/
4332 static int
4333 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4334 {
4335 	struct adapter  *adapter = (struct adapter *)arg1;
4336 	struct ixgbe_hw *hw = &adapter->hw;
4337 	u16             reg;
4338 
4339 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4340 		device_printf(iflib_get_dev(adapter->ctx),
4341 		    "Device has no supported external thermal sensor.\n");
4342 		return (ENODEV);
4343 	}
4344 
4345 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4346 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4347 		device_printf(iflib_get_dev(adapter->ctx),
4348 		    "Error reading from PHY's current temperature register\n");
4349 		return (EAGAIN);
4350 	}
4351 
4352 	/* Shift temp for output */
4353 	reg = reg >> 8;
4354 
4355 	return (sysctl_handle_16(oidp, NULL, reg, req));
4356 } /* ixgbe_sysctl_phy_temp */
4357 
4358 /************************************************************************
4359  * ixgbe_sysctl_phy_overtemp_occurred
4360  *
4361  *   Reports (directly from the PHY) whether the current PHY
4362  *   temperature is over the overtemp threshold.
4363  ************************************************************************/
4364 static int
4365 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4366 {
4367 	struct adapter  *adapter = (struct adapter *)arg1;
4368 	struct ixgbe_hw *hw = &adapter->hw;
4369 	u16             reg;
4370 
4371 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4372 		device_printf(iflib_get_dev(adapter->ctx),
4373 		    "Device has no supported external thermal sensor.\n");
4374 		return (ENODEV);
4375 	}
4376 
4377 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4378 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4379 		device_printf(iflib_get_dev(adapter->ctx),
4380 		    "Error reading from PHY's temperature status register\n");
4381 		return (EAGAIN);
4382 	}
4383 
4384 	/* Get occurrence bit */
4385 	reg = !!(reg & 0x4000);
4386 
4387 	return (sysctl_handle_16(oidp, 0, reg, req));
4388 } /* ixgbe_sysctl_phy_overtemp_occurred */
4389 
4390 /************************************************************************
4391  * ixgbe_sysctl_eee_state
4392  *
4393  *   Sysctl to set EEE power saving feature
4394  *   Values:
4395  *     0      - disable EEE
4396  *     1      - enable EEE
4397  *     (none) - get current device EEE state
4398  ************************************************************************/
4399 static int
4400 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4401 {
4402 	struct adapter *adapter = (struct adapter *)arg1;
4403 	device_t       dev = adapter->dev;
4404 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4405 	int            curr_eee, new_eee, error = 0;
4406 	s32            retval;
4407 
4408 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4409 
4410 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4411 	if ((error) || (req->newptr == NULL))
4412 		return (error);
4413 
4414 	/* Nothing to do */
4415 	if (new_eee == curr_eee)
4416 		return (0);
4417 
4418 	/* Not supported */
4419 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4420 		return (EINVAL);
4421 
4422 	/* Bounds checking */
4423 	if ((new_eee < 0) || (new_eee > 1))
4424 		return (EINVAL);
4425 
4426 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
4427 	if (retval) {
4428 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4429 		return (EINVAL);
4430 	}
4431 
4432 	/* Restart auto-neg */
4433 	ifp->if_init(ifp);
4434 
4435 	device_printf(dev, "New EEE state: %d\n", new_eee);
4436 
4437 	/* Cache new value */
4438 	if (new_eee)
4439 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4440 	else
4441 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4442 
4443 	return (error);
4444 } /* ixgbe_sysctl_eee_state */
4445 
4446 /************************************************************************
4447  * ixgbe_init_device_features
4448  ************************************************************************/
4449 static void
4450 ixgbe_init_device_features(struct adapter *adapter)
4451 {
4452 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4453 	                  | IXGBE_FEATURE_RSS
4454 	                  | IXGBE_FEATURE_MSI
4455 	                  | IXGBE_FEATURE_MSIX
4456 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4457 
4458 	/* Set capabilities first... */
4459 	switch (adapter->hw.mac.type) {
4460 	case ixgbe_mac_82598EB:
4461 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4462 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4463 		break;
4464 	case ixgbe_mac_X540:
4465 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4466 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4467 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4468 		    (adapter->hw.bus.func == 0))
4469 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4470 		break;
4471 	case ixgbe_mac_X550:
4472 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4473 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4474 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4475 		break;
4476 	case ixgbe_mac_X550EM_x:
4477 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4478 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4479 		break;
4480 	case ixgbe_mac_X550EM_a:
4481 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4482 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4483 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4484 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4485 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4486 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4487 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4488 		}
4489 		break;
4490 	case ixgbe_mac_82599EB:
4491 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4492 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4493 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4494 		    (adapter->hw.bus.func == 0))
4495 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4496 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4497 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4498 		break;
4499 	default:
4500 		break;
4501 	}
4502 
4503 	/* Enabled by default... */
4504 	/* Fan failure detection */
4505 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4506 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4507 	/* Netmap */
4508 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4509 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4510 	/* EEE */
4511 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4512 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4513 	/* Thermal Sensor */
4514 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4515 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4516 
4517 	/* Enabled via global sysctl... */
4518 	/* Flow Director */
4519 	if (ixgbe_enable_fdir) {
4520 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4521 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4522 		else
4523 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4524 	}
4525 	/*
4526 	 * Message Signal Interrupts - Extended (MSI-X)
4527 	 * Normal MSI is only enabled if MSI-X calls fail.
4528 	 */
4529 	if (!ixgbe_enable_msix)
4530 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4531 	/* Receive-Side Scaling (RSS) */
4532 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4533 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4534 
4535 	/* Disable features with unmet dependencies... */
4536 	/* No MSI-X */
4537 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4538 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4539 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4540 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4541 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4542 	}
4543 } /* ixgbe_init_device_features */
4544 
4545 /************************************************************************
4546  * ixgbe_check_fan_failure
4547  ************************************************************************/
4548 static void
4549 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4550 {
4551 	u32 mask;
4552 
4553 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4554 	    IXGBE_ESDP_SDP1;
4555 
4556 	if (reg & mask)
4557 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4558 } /* ixgbe_check_fan_failure */
4559