xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 28f4385e45a2681c14bd04b83fe1796eaefe8265)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.1-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
108 	/* required last entry */
109   PVID_END
110 };
111 
112 static void *ixgbe_register(device_t dev);
113 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
114 static int  ixgbe_if_attach_post(if_ctx_t ctx);
115 static int  ixgbe_if_detach(if_ctx_t ctx);
116 static int  ixgbe_if_shutdown(if_ctx_t ctx);
117 static int  ixgbe_if_suspend(if_ctx_t ctx);
118 static int  ixgbe_if_resume(if_ctx_t ctx);
119 
120 static void ixgbe_if_stop(if_ctx_t ctx);
121 void ixgbe_if_enable_intr(if_ctx_t ctx);
122 static void ixgbe_if_disable_intr(if_ctx_t ctx);
123 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
124 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
125 static int  ixgbe_if_media_change(if_ctx_t ctx);
126 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
127 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
128 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
129 static void ixgbe_if_multi_set(if_ctx_t ctx);
130 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
131 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
132                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
133 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
134                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
135 static void ixgbe_if_queues_free(if_ctx_t ctx);
136 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
137 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
138 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
139 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
140 static int  ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
141 int ixgbe_intr(void *arg);
142 
143 /************************************************************************
144  * Function prototypes
145  ************************************************************************/
146 #if __FreeBSD_version >= 1100036
147 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
148 #endif
149 
150 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
151 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
152 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
153 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
154 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
155 
156 static void ixgbe_config_dmac(struct adapter *adapter);
157 static void ixgbe_configure_ivars(struct adapter *adapter);
158 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
159                            s8 type);
160 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
161 static bool ixgbe_sfp_probe(if_ctx_t ctx);
162 
163 static void ixgbe_free_pci_resources(if_ctx_t ctx);
164 
165 static int  ixgbe_msix_link(void *arg);
166 static int  ixgbe_msix_que(void *arg);
167 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
168 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
169 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
170 
171 static int  ixgbe_setup_interface(if_ctx_t ctx);
172 static void ixgbe_init_device_features(struct adapter *adapter);
173 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
174 static void ixgbe_add_media_types(if_ctx_t ctx);
175 static void ixgbe_update_stats_counters(struct adapter *adapter);
176 static void ixgbe_config_link(struct adapter *adapter);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static void ixgbe_check_wol_support(struct adapter *adapter);
179 static void ixgbe_enable_rx_drop(struct adapter *);
180 static void ixgbe_disable_rx_drop(struct adapter *);
181 
182 static void ixgbe_add_hw_stats(struct adapter *adapter);
183 static int  ixgbe_set_flowcntl(struct adapter *, int);
184 static int  ixgbe_set_advertise(struct adapter *, int);
185 static int  ixgbe_get_advertise(struct adapter *);
186 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
187 static void ixgbe_config_gpie(struct adapter *adapter);
188 static void ixgbe_config_delay_values(struct adapter *adapter);
189 
190 /* Sysctl handlers */
191 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
192 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
196 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
197 #ifdef IXGBE_DEBUG
198 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
199 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
200 #endif
201 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
202 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
207 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
208 
209 /* Deferred interrupt tasklets */
210 static void ixgbe_handle_msf(void *);
211 static void ixgbe_handle_mod(void *);
212 static void ixgbe_handle_phy(void *);
213 
214 /************************************************************************
215  *  FreeBSD Device Interface Entry Points
216  ************************************************************************/
217 static device_method_t ix_methods[] = {
218 	/* Device interface */
219 	DEVMETHOD(device_register, ixgbe_register),
220 	DEVMETHOD(device_probe, iflib_device_probe),
221 	DEVMETHOD(device_attach, iflib_device_attach),
222 	DEVMETHOD(device_detach, iflib_device_detach),
223 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
224 	DEVMETHOD(device_suspend, iflib_device_suspend),
225 	DEVMETHOD(device_resume, iflib_device_resume),
226 #ifdef PCI_IOV
227 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
228 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
229 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
230 #endif /* PCI_IOV */
231 	DEVMETHOD_END
232 };
233 
234 static driver_t ix_driver = {
235 	"ix", ix_methods, sizeof(struct adapter),
236 };
237 
238 devclass_t ix_devclass;
239 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
240 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
241 MODULE_DEPEND(ix, pci, 1, 1, 1);
242 MODULE_DEPEND(ix, ether, 1, 1, 1);
243 MODULE_DEPEND(ix, iflib, 1, 1, 1);
244 
245 static device_method_t ixgbe_if_methods[] = {
246 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
247 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
248 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
249 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
250 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
251 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
252 	DEVMETHOD(ifdi_init, ixgbe_if_init),
253 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
254 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
255 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
256 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
257 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
259 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
260 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
261 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
262 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
263 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
264 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
265 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
266 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
267 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
268 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
269 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
270 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
271 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
272 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
273 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
274 #ifdef PCI_IOV
275 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
276 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
277 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
278 #endif /* PCI_IOV */
279 	DEVMETHOD_END
280 };
281 
282 /*
283  * TUNEABLE PARAMETERS:
284  */
285 
286 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
287 static driver_t ixgbe_if_driver = {
288   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
289 };
290 
291 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
292 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
293     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
294 
295 /* Flow control setting, default to full */
296 static int ixgbe_flow_control = ixgbe_fc_full;
297 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
299 
300 /* Advertise Speed, default to 0 (auto) */
301 static int ixgbe_advertise_speed = 0;
302 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
304 
305 /*
306  * Smart speed setting, default to on
307  * this only works as a compile option
308  * right now as its during attach, set
309  * this to 'ixgbe_smart_speed_off' to
310  * disable.
311  */
312 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
313 
314 /*
315  * MSI-X should be the default for best performance,
316  * but this allows it to be forced off for testing.
317  */
318 static int ixgbe_enable_msix = 1;
319 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320     "Enable MSI-X interrupts");
321 
322 /*
323  * Defining this on will allow the use
324  * of unsupported SFP+ modules, note that
325  * doing so you are on your own :)
326  */
327 static int allow_unsupported_sfp = FALSE;
328 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
329     &allow_unsupported_sfp, 0,
330     "Allow unsupported SFP modules...use at your own risk");
331 
332 /*
333  * Not sure if Flow Director is fully baked,
334  * so we'll default to turning it off.
335  */
336 static int ixgbe_enable_fdir = 0;
337 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
338     "Enable Flow Director");
339 
340 /* Receive-Side Scaling */
341 static int ixgbe_enable_rss = 1;
342 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
343     "Enable Receive-Side Scaling (RSS)");
344 
345 #if 0
346 /* Keep running tab on them for sanity check */
347 static int ixgbe_total_ports;
348 #endif
349 
350 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
351 
352 /*
353  * For Flow Director: this is the number of TX packets we sample
354  * for the filter pool, this means every 20th packet will be probed.
355  *
356  * This feature can be disabled by setting this to 0.
357  */
358 static int atr_sample_rate = 20;
359 
360 extern struct if_txrx ixgbe_txrx;
361 
362 static struct if_shared_ctx ixgbe_sctx_init = {
363 	.isc_magic = IFLIB_MAGIC,
364 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
365 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
366 	.isc_tx_maxsegsize = PAGE_SIZE,
367 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
368 	.isc_tso_maxsegsize = PAGE_SIZE,
369 	.isc_rx_maxsize = PAGE_SIZE*4,
370 	.isc_rx_nsegments = 1,
371 	.isc_rx_maxsegsize = PAGE_SIZE*4,
372 	.isc_nfl = 1,
373 	.isc_ntxqs = 1,
374 	.isc_nrxqs = 1,
375 
376 	.isc_admin_intrcnt = 1,
377 	.isc_vendor_info = ixgbe_vendor_info_array,
378 	.isc_driver_version = ixgbe_driver_version,
379 	.isc_driver = &ixgbe_if_driver,
380 
381 	.isc_nrxd_min = {MIN_RXD},
382 	.isc_ntxd_min = {MIN_TXD},
383 	.isc_nrxd_max = {MAX_RXD},
384 	.isc_ntxd_max = {MAX_TXD},
385 	.isc_nrxd_default = {DEFAULT_RXD},
386 	.isc_ntxd_default = {DEFAULT_TXD},
387 };
388 
389 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
390 
391 /************************************************************************
392  * ixgbe_if_tx_queues_alloc
393  ************************************************************************/
394 static int
395 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
396                          int ntxqs, int ntxqsets)
397 {
398 	struct adapter     *adapter = iflib_get_softc(ctx);
399 	if_softc_ctx_t     scctx = adapter->shared;
400 	struct ix_tx_queue *que;
401 	int                i, j, error;
402 
403 	MPASS(adapter->num_tx_queues > 0);
404 	MPASS(adapter->num_tx_queues == ntxqsets);
405 	MPASS(ntxqs == 1);
406 
407 	/* Allocate queue structure memory */
408 	adapter->tx_queues =
409 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
410 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
411 	if (!adapter->tx_queues) {
412 		device_printf(iflib_get_dev(ctx),
413 		    "Unable to allocate TX ring memory\n");
414 		return (ENOMEM);
415 	}
416 
417 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
418 		struct tx_ring *txr = &que->txr;
419 
420 		/* In case SR-IOV is enabled, align the index properly */
421 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
422 		    i);
423 
424 		txr->adapter = que->adapter = adapter;
425 		adapter->active_queues |= (u64)1 << txr->me;
426 
427 		/* Allocate report status array */
428 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
429 		if (txr->tx_rsq == NULL) {
430 			error = ENOMEM;
431 			goto fail;
432 		}
433 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
434 			txr->tx_rsq[j] = QIDX_INVALID;
435 		/* get the virtual and physical address of the hardware queues */
436 		txr->tail = IXGBE_TDT(txr->me);
437 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
438 		txr->tx_paddr = paddrs[i];
439 
440 		txr->bytes = 0;
441 		txr->total_packets = 0;
442 
443 		/* Set the rate at which we sample packets */
444 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
445 			txr->atr_sample = atr_sample_rate;
446 
447 	}
448 
449 	iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod,
450 	    "mod_task");
451 	iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf,
452 	    "msf_task");
453 	iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy,
454 	    "phy_task");
455 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
456 		iflib_config_gtask_init(ctx, &adapter->mbx_task,
457 		    ixgbe_handle_mbx, "mbx_task");
458 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
459 		iflib_config_gtask_init(ctx, &adapter->fdir_task,
460 		    ixgbe_reinit_fdir, "fdir_task");
461 
462 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
463 	    adapter->num_tx_queues);
464 
465 	return (0);
466 
467 fail:
468 	ixgbe_if_queues_free(ctx);
469 
470 	return (error);
471 } /* ixgbe_if_tx_queues_alloc */
472 
473 /************************************************************************
474  * ixgbe_if_rx_queues_alloc
475  ************************************************************************/
476 static int
477 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
478                          int nrxqs, int nrxqsets)
479 {
480 	struct adapter     *adapter = iflib_get_softc(ctx);
481 	struct ix_rx_queue *que;
482 	int                i;
483 
484 	MPASS(adapter->num_rx_queues > 0);
485 	MPASS(adapter->num_rx_queues == nrxqsets);
486 	MPASS(nrxqs == 1);
487 
488 	/* Allocate queue structure memory */
489 	adapter->rx_queues =
490 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
491 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
492 	if (!adapter->rx_queues) {
493 		device_printf(iflib_get_dev(ctx),
494 		    "Unable to allocate TX ring memory\n");
495 		return (ENOMEM);
496 	}
497 
498 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
499 		struct rx_ring *rxr = &que->rxr;
500 
501 		/* In case SR-IOV is enabled, align the index properly */
502 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
503 		    i);
504 
505 		rxr->adapter = que->adapter = adapter;
506 
507 		/* get the virtual and physical address of the hw queues */
508 		rxr->tail = IXGBE_RDT(rxr->me);
509 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
510 		rxr->rx_paddr = paddrs[i];
511 		rxr->bytes = 0;
512 		rxr->que = que;
513 	}
514 
515 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
516 	    adapter->num_rx_queues);
517 
518 	return (0);
519 } /* ixgbe_if_rx_queues_alloc */
520 
521 /************************************************************************
522  * ixgbe_if_queues_free
523  ************************************************************************/
524 static void
525 ixgbe_if_queues_free(if_ctx_t ctx)
526 {
527 	struct adapter     *adapter = iflib_get_softc(ctx);
528 	struct ix_tx_queue *tx_que = adapter->tx_queues;
529 	struct ix_rx_queue *rx_que = adapter->rx_queues;
530 	int                i;
531 
532 	if (tx_que != NULL) {
533 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
534 			struct tx_ring *txr = &tx_que->txr;
535 			if (txr->tx_rsq == NULL)
536 				break;
537 
538 			free(txr->tx_rsq, M_IXGBE);
539 			txr->tx_rsq = NULL;
540 		}
541 
542 		free(adapter->tx_queues, M_IXGBE);
543 		adapter->tx_queues = NULL;
544 	}
545 	if (rx_que != NULL) {
546 		free(adapter->rx_queues, M_IXGBE);
547 		adapter->rx_queues = NULL;
548 	}
549 } /* ixgbe_if_queues_free */
550 
551 /************************************************************************
552  * ixgbe_initialize_rss_mapping
553  ************************************************************************/
554 static void
555 ixgbe_initialize_rss_mapping(struct adapter *adapter)
556 {
557 	struct ixgbe_hw *hw = &adapter->hw;
558 	u32             reta = 0, mrqc, rss_key[10];
559 	int             queue_id, table_size, index_mult;
560 	int             i, j;
561 	u32             rss_hash_config;
562 
563 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
564 		/* Fetch the configured RSS key */
565 		rss_getkey((uint8_t *)&rss_key);
566 	} else {
567 		/* set up random bits */
568 		arc4rand(&rss_key, sizeof(rss_key), 0);
569 	}
570 
571 	/* Set multiplier for RETA setup and table size based on MAC */
572 	index_mult = 0x1;
573 	table_size = 128;
574 	switch (adapter->hw.mac.type) {
575 	case ixgbe_mac_82598EB:
576 		index_mult = 0x11;
577 		break;
578 	case ixgbe_mac_X550:
579 	case ixgbe_mac_X550EM_x:
580 	case ixgbe_mac_X550EM_a:
581 		table_size = 512;
582 		break;
583 	default:
584 		break;
585 	}
586 
587 	/* Set up the redirection table */
588 	for (i = 0, j = 0; i < table_size; i++, j++) {
589 		if (j == adapter->num_rx_queues)
590 			j = 0;
591 
592 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
593 			/*
594 			 * Fetch the RSS bucket id for the given indirection
595 			 * entry. Cap it at the number of configured buckets
596 			 * (which is num_rx_queues.)
597 			 */
598 			queue_id = rss_get_indirection_to_bucket(i);
599 			queue_id = queue_id % adapter->num_rx_queues;
600 		} else
601 			queue_id = (j * index_mult);
602 
603 		/*
604 		 * The low 8 bits are for hash value (n+0);
605 		 * The next 8 bits are for hash value (n+1), etc.
606 		 */
607 		reta = reta >> 8;
608 		reta = reta | (((uint32_t)queue_id) << 24);
609 		if ((i & 3) == 3) {
610 			if (i < 128)
611 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
612 			else
613 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
614 				    reta);
615 			reta = 0;
616 		}
617 	}
618 
619 	/* Now fill our hash function seeds */
620 	for (i = 0; i < 10; i++)
621 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
622 
623 	/* Perform hash on these packet types */
624 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
625 		rss_hash_config = rss_gethashconfig();
626 	else {
627 		/*
628 		 * Disable UDP - IP fragments aren't currently being handled
629 		 * and so we end up with a mix of 2-tuple and 4-tuple
630 		 * traffic.
631 		 */
632 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
633 		                | RSS_HASHTYPE_RSS_TCP_IPV4
634 		                | RSS_HASHTYPE_RSS_IPV6
635 		                | RSS_HASHTYPE_RSS_TCP_IPV6
636 		                | RSS_HASHTYPE_RSS_IPV6_EX
637 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
638 	}
639 
640 	mrqc = IXGBE_MRQC_RSSEN;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
649 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
650 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
651 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
652 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
653 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
654 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
655 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
656 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
657 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
658 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
659 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
660 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
661 } /* ixgbe_initialize_rss_mapping */
662 
663 /************************************************************************
664  * ixgbe_initialize_receive_units - Setup receive registers and features.
665  ************************************************************************/
666 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
667 
668 static void
669 ixgbe_initialize_receive_units(if_ctx_t ctx)
670 {
671 	struct adapter     *adapter = iflib_get_softc(ctx);
672 	if_softc_ctx_t     scctx = adapter->shared;
673 	struct ixgbe_hw    *hw = &adapter->hw;
674 	struct ifnet       *ifp = iflib_get_ifp(ctx);
675 	struct ix_rx_queue *que;
676 	int                i, j;
677 	u32                bufsz, fctrl, srrctl, rxcsum;
678 	u32                hlreg;
679 
680 	/*
681 	 * Make sure receives are disabled while
682 	 * setting up the descriptor ring
683 	 */
684 	ixgbe_disable_rx(hw);
685 
686 	/* Enable broadcasts */
687 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
688 	fctrl |= IXGBE_FCTRL_BAM;
689 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
690 		fctrl |= IXGBE_FCTRL_DPF;
691 		fctrl |= IXGBE_FCTRL_PMCF;
692 	}
693 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
694 
695 	/* Set for Jumbo Frames? */
696 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
697 	if (ifp->if_mtu > ETHERMTU)
698 		hlreg |= IXGBE_HLREG0_JUMBOEN;
699 	else
700 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
701 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
702 
703 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
704 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
705 
706 	/* Setup the Base and Length of the Rx Descriptor Ring */
707 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
708 		struct rx_ring *rxr = &que->rxr;
709 		u64            rdba = rxr->rx_paddr;
710 
711 		j = rxr->me;
712 
713 		/* Setup the Base and Length of the Rx Descriptor Ring */
714 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
715 		    (rdba & 0x00000000ffffffffULL));
716 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
717 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
718 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
719 
720 		/* Set up the SRRCTL register */
721 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
722 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
723 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
724 		srrctl |= bufsz;
725 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
726 
727 		/*
728 		 * Set DROP_EN iff we have no flow control and >1 queue.
729 		 * Note that srrctl was cleared shortly before during reset,
730 		 * so we do not need to clear the bit, but do it just in case
731 		 * this code is moved elsewhere.
732 		 */
733 		if (adapter->num_rx_queues > 1 &&
734 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
735 			srrctl |= IXGBE_SRRCTL_DROP_EN;
736 		} else {
737 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
738 		}
739 
740 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
741 
742 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
743 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
744 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
745 
746 		/* Set the driver rx tail address */
747 		rxr->tail =  IXGBE_RDT(rxr->me);
748 	}
749 
750 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
751 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
752 		            | IXGBE_PSRTYPE_UDPHDR
753 		            | IXGBE_PSRTYPE_IPV4HDR
754 		            | IXGBE_PSRTYPE_IPV6HDR;
755 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
756 	}
757 
758 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
759 
760 	ixgbe_initialize_rss_mapping(adapter);
761 
762 	if (adapter->num_rx_queues > 1) {
763 		/* RSS and RX IPP Checksum are mutually exclusive */
764 		rxcsum |= IXGBE_RXCSUM_PCSD;
765 	}
766 
767 	if (ifp->if_capenable & IFCAP_RXCSUM)
768 		rxcsum |= IXGBE_RXCSUM_PCSD;
769 
770 	/* This is useful for calculating UDP/IP fragment checksums */
771 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
772 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
773 
774 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
775 
776 } /* ixgbe_initialize_receive_units */
777 
778 /************************************************************************
779  * ixgbe_initialize_transmit_units - Enable transmit units.
780  ************************************************************************/
781 static void
782 ixgbe_initialize_transmit_units(if_ctx_t ctx)
783 {
784 	struct adapter     *adapter = iflib_get_softc(ctx);
785 	struct ixgbe_hw    *hw = &adapter->hw;
786 	if_softc_ctx_t     scctx = adapter->shared;
787 	struct ix_tx_queue *que;
788 	int i;
789 
790 	/* Setup the Base and Length of the Tx Descriptor Ring */
791 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
792 	    i++, que++) {
793 		struct tx_ring	   *txr = &que->txr;
794 		u64 tdba = txr->tx_paddr;
795 		u32 txctrl = 0;
796 		int j = txr->me;
797 
798 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
799 		    (tdba & 0x00000000ffffffffULL));
800 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
801 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
802 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
803 
804 		/* Setup the HW Tx Head and Tail descriptor pointers */
805 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
806 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
807 
808 		/* Cache the tail address */
809 		txr->tx_rs_cidx = txr->tx_rs_pidx;
810 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
811 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
812 			txr->tx_rsq[k] = QIDX_INVALID;
813 
814 		/* Disable Head Writeback */
815 		/*
816 		 * Note: for X550 series devices, these registers are actually
817 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
818 		 * fields remain the same.
819 		 */
820 		switch (hw->mac.type) {
821 		case ixgbe_mac_82598EB:
822 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
823 			break;
824 		default:
825 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
826 			break;
827 		}
828 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
829 		switch (hw->mac.type) {
830 		case ixgbe_mac_82598EB:
831 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
832 			break;
833 		default:
834 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
835 			break;
836 		}
837 
838 	}
839 
840 	if (hw->mac.type != ixgbe_mac_82598EB) {
841 		u32 dmatxctl, rttdcs;
842 
843 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
844 		dmatxctl |= IXGBE_DMATXCTL_TE;
845 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
846 		/* Disable arbiter to set MTQC */
847 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
848 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
849 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
850 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
851 		    ixgbe_get_mtqc(adapter->iov_mode));
852 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
853 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
854 	}
855 
856 } /* ixgbe_initialize_transmit_units */
857 
858 /************************************************************************
859  * ixgbe_register
860  ************************************************************************/
861 static void *
862 ixgbe_register(device_t dev)
863 {
864 	return (ixgbe_sctx);
865 } /* ixgbe_register */
866 
867 /************************************************************************
868  * ixgbe_if_attach_pre - Device initialization routine, part 1
869  *
870  *   Called when the driver is being loaded.
871  *   Identifies the type of hardware, initializes the hardware,
872  *   and initializes iflib structures.
873  *
874  *   return 0 on success, positive on failure
875  ************************************************************************/
876 static int
877 ixgbe_if_attach_pre(if_ctx_t ctx)
878 {
879 	struct adapter  *adapter;
880 	device_t        dev;
881 	if_softc_ctx_t  scctx;
882 	struct ixgbe_hw *hw;
883 	int             error = 0;
884 	u32             ctrl_ext;
885 
886 	INIT_DEBUGOUT("ixgbe_attach: begin");
887 
888 	/* Allocate, clear, and link in our adapter structure */
889 	dev = iflib_get_dev(ctx);
890 	adapter = iflib_get_softc(ctx);
891 	adapter->hw.back = adapter;
892 	adapter->ctx = ctx;
893 	adapter->dev = dev;
894 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
895 	adapter->media = iflib_get_media(ctx);
896 	hw = &adapter->hw;
897 
898 	/* Determine hardware revision */
899 	hw->vendor_id = pci_get_vendor(dev);
900 	hw->device_id = pci_get_device(dev);
901 	hw->revision_id = pci_get_revid(dev);
902 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
903 	hw->subsystem_device_id = pci_get_subdevice(dev);
904 
905 	/* Do base PCI setup - map BAR0 */
906 	if (ixgbe_allocate_pci_resources(ctx)) {
907 		device_printf(dev, "Allocation of PCI resources failed\n");
908 		return (ENXIO);
909 	}
910 
911 	/* let hardware know driver is loaded */
912 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
913 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
914 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
915 
916 	/*
917 	 * Initialize the shared code
918 	 */
919 	if (ixgbe_init_shared_code(hw) != 0) {
920 		device_printf(dev, "Unable to initialize the shared code\n");
921 		error = ENXIO;
922 		goto err_pci;
923 	}
924 
925 	if (hw->mbx.ops.init_params)
926 		hw->mbx.ops.init_params(hw);
927 
928 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
929 
930 	if (hw->mac.type != ixgbe_mac_82598EB)
931 		hw->phy.smart_speed = ixgbe_smart_speed;
932 
933 	ixgbe_init_device_features(adapter);
934 
935 	/* Enable WoL (if supported) */
936 	ixgbe_check_wol_support(adapter);
937 
938 	/* Verify adapter fan is still functional (if applicable) */
939 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
940 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
941 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
942 	}
943 
944 	/* Ensure SW/FW semaphore is free */
945 	ixgbe_init_swfw_semaphore(hw);
946 
947 	/* Set an initial default flow control value */
948 	hw->fc.requested_mode = ixgbe_flow_control;
949 
950 	hw->phy.reset_if_overtemp = TRUE;
951 	error = ixgbe_reset_hw(hw);
952 	hw->phy.reset_if_overtemp = FALSE;
953 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
954 		/*
955 		 * No optics in this port, set up
956 		 * so the timer routine will probe
957 		 * for later insertion.
958 		 */
959 		adapter->sfp_probe = TRUE;
960 		error = 0;
961 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
962 		device_printf(dev, "Unsupported SFP+ module detected!\n");
963 		error = EIO;
964 		goto err_pci;
965 	} else if (error) {
966 		device_printf(dev, "Hardware initialization failed\n");
967 		error = EIO;
968 		goto err_pci;
969 	}
970 
971 	/* Make sure we have a good EEPROM before we read from it */
972 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
973 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
974 		error = EIO;
975 		goto err_pci;
976 	}
977 
978 	error = ixgbe_start_hw(hw);
979 	switch (error) {
980 	case IXGBE_ERR_EEPROM_VERSION:
981 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
982 		break;
983 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
984 		device_printf(dev, "Unsupported SFP+ Module\n");
985 		error = EIO;
986 		goto err_pci;
987 	case IXGBE_ERR_SFP_NOT_PRESENT:
988 		device_printf(dev, "No SFP+ Module found\n");
989 		/* falls thru */
990 	default:
991 		break;
992 	}
993 
994 	/* Most of the iflib initialization... */
995 
996 	iflib_set_mac(ctx, hw->mac.addr);
997 	switch (adapter->hw.mac.type) {
998 	case ixgbe_mac_X550:
999 	case ixgbe_mac_X550EM_x:
1000 	case ixgbe_mac_X550EM_a:
1001 		scctx->isc_rss_table_size = 512;
1002 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1003 		break;
1004 	default:
1005 		scctx->isc_rss_table_size = 128;
1006 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1007 	}
1008 
1009 	/* Allow legacy interrupts */
1010 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1011 
1012 	scctx->isc_txqsizes[0] =
1013 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1014 	    sizeof(u32), DBA_ALIGN),
1015 	scctx->isc_rxqsizes[0] =
1016 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1017 	    DBA_ALIGN);
1018 
1019 	/* XXX */
1020 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1021 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1022 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1023 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1024 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1025 	} else {
1026 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1027 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1028 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1029 	}
1030 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1031 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1032 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1033 
1034 	scctx->isc_txrx = &ixgbe_txrx;
1035 
1036 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1037 
1038 	return (0);
1039 
1040 err_pci:
1041 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1042 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1043 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1044 	ixgbe_free_pci_resources(ctx);
1045 
1046 	return (error);
1047 } /* ixgbe_if_attach_pre */
1048 
1049  /*********************************************************************
1050  * ixgbe_if_attach_post - Device initialization routine, part 2
1051  *
1052  *   Called during driver load, but after interrupts and
1053  *   resources have been allocated and configured.
1054  *   Sets up some data structures not relevant to iflib.
1055  *
1056  *   return 0 on success, positive on failure
1057  *********************************************************************/
1058 static int
1059 ixgbe_if_attach_post(if_ctx_t ctx)
1060 {
1061 	device_t dev;
1062 	struct adapter  *adapter;
1063 	struct ixgbe_hw *hw;
1064 	int             error = 0;
1065 
1066 	dev = iflib_get_dev(ctx);
1067 	adapter = iflib_get_softc(ctx);
1068 	hw = &adapter->hw;
1069 
1070 
1071 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1072 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1073 		device_printf(dev, "Device does not support legacy interrupts");
1074 		error = ENXIO;
1075 		goto err;
1076 	}
1077 
1078 	/* Allocate multicast array memory. */
1079 	adapter->mta = malloc(sizeof(*adapter->mta) *
1080 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1081 	if (adapter->mta == NULL) {
1082 		device_printf(dev, "Can not allocate multicast setup array\n");
1083 		error = ENOMEM;
1084 		goto err;
1085 	}
1086 
1087 	/* hw.ix defaults init */
1088 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1089 
1090 	/* Enable the optics for 82599 SFP+ fiber */
1091 	ixgbe_enable_tx_laser(hw);
1092 
1093 	/* Enable power to the phy. */
1094 	ixgbe_set_phy_power(hw, TRUE);
1095 
1096 	ixgbe_initialize_iov(adapter);
1097 
1098 	error = ixgbe_setup_interface(ctx);
1099 	if (error) {
1100 		device_printf(dev, "Interface setup failed: %d\n", error);
1101 		goto err;
1102 	}
1103 
1104 	ixgbe_if_update_admin_status(ctx);
1105 
1106 	/* Initialize statistics */
1107 	ixgbe_update_stats_counters(adapter);
1108 	ixgbe_add_hw_stats(adapter);
1109 
1110 	/* Check PCIE slot type/speed/width */
1111 	ixgbe_get_slot_info(adapter);
1112 
1113 	/*
1114 	 * Do time init and sysctl init here, but
1115 	 * only on the first port of a bypass adapter.
1116 	 */
1117 	ixgbe_bypass_init(adapter);
1118 
1119 	/* Set an initial dmac value */
1120 	adapter->dmac = 0;
1121 	/* Set initial advertised speeds (if applicable) */
1122 	adapter->advertise = ixgbe_get_advertise(adapter);
1123 
1124 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1125 		ixgbe_define_iov_schemas(dev, &error);
1126 
1127 	/* Add sysctls */
1128 	ixgbe_add_device_sysctls(ctx);
1129 
1130 	return (0);
1131 err:
1132 	return (error);
1133 } /* ixgbe_if_attach_post */
1134 
1135 /************************************************************************
1136  * ixgbe_check_wol_support
1137  *
1138  *   Checks whether the adapter's ports are capable of
1139  *   Wake On LAN by reading the adapter's NVM.
1140  *
1141  *   Sets each port's hw->wol_enabled value depending
1142  *   on the value read here.
1143  ************************************************************************/
1144 static void
1145 ixgbe_check_wol_support(struct adapter *adapter)
1146 {
1147 	struct ixgbe_hw *hw = &adapter->hw;
1148 	u16             dev_caps = 0;
1149 
1150 	/* Find out WoL support for port */
1151 	adapter->wol_support = hw->wol_enabled = 0;
1152 	ixgbe_get_device_caps(hw, &dev_caps);
1153 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1154 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1155 	     hw->bus.func == 0))
1156 		adapter->wol_support = hw->wol_enabled = 1;
1157 
1158 	/* Save initial wake up filter configuration */
1159 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1160 
1161 	return;
1162 } /* ixgbe_check_wol_support */
1163 
1164 /************************************************************************
1165  * ixgbe_setup_interface
1166  *
1167  *   Setup networking device structure and register an interface.
1168  ************************************************************************/
1169 static int
1170 ixgbe_setup_interface(if_ctx_t ctx)
1171 {
1172 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1173 	struct adapter *adapter = iflib_get_softc(ctx);
1174 
1175 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1176 
1177 	if_setbaudrate(ifp, IF_Gbps(10));
1178 
1179 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1180 
1181 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1182 
1183 	ixgbe_add_media_types(ctx);
1184 
1185 	/* Autoselect media by default */
1186 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1187 
1188 	return (0);
1189 } /* ixgbe_setup_interface */
1190 
1191 /************************************************************************
1192  * ixgbe_if_get_counter
1193  ************************************************************************/
1194 static uint64_t
1195 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1196 {
1197 	struct adapter *adapter = iflib_get_softc(ctx);
1198 	if_t           ifp = iflib_get_ifp(ctx);
1199 
1200 	switch (cnt) {
1201 	case IFCOUNTER_IPACKETS:
1202 		return (adapter->ipackets);
1203 	case IFCOUNTER_OPACKETS:
1204 		return (adapter->opackets);
1205 	case IFCOUNTER_IBYTES:
1206 		return (adapter->ibytes);
1207 	case IFCOUNTER_OBYTES:
1208 		return (adapter->obytes);
1209 	case IFCOUNTER_IMCASTS:
1210 		return (adapter->imcasts);
1211 	case IFCOUNTER_OMCASTS:
1212 		return (adapter->omcasts);
1213 	case IFCOUNTER_COLLISIONS:
1214 		return (0);
1215 	case IFCOUNTER_IQDROPS:
1216 		return (adapter->iqdrops);
1217 	case IFCOUNTER_OQDROPS:
1218 		return (0);
1219 	case IFCOUNTER_IERRORS:
1220 		return (adapter->ierrors);
1221 	default:
1222 		return (if_get_counter_default(ifp, cnt));
1223 	}
1224 } /* ixgbe_if_get_counter */
1225 
1226 /************************************************************************
1227  * ixgbe_if_i2c_req
1228  ************************************************************************/
1229 static int
1230 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1231 {
1232 	struct adapter		*adapter = iflib_get_softc(ctx);
1233 	struct ixgbe_hw 	*hw = &adapter->hw;
1234 	int 			i;
1235 
1236 
1237 	if (hw->phy.ops.read_i2c_byte == NULL)
1238 		return (ENXIO);
1239 	for (i = 0; i < req->len; i++)
1240 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1241 		    req->dev_addr, &req->data[i]);
1242 	return (0);
1243 } /* ixgbe_if_i2c_req */
1244 
1245 /************************************************************************
1246  * ixgbe_add_media_types
1247  ************************************************************************/
1248 static void
1249 ixgbe_add_media_types(if_ctx_t ctx)
1250 {
1251 	struct adapter  *adapter = iflib_get_softc(ctx);
1252 	struct ixgbe_hw *hw = &adapter->hw;
1253 	device_t        dev = iflib_get_dev(ctx);
1254 	u64             layer;
1255 
1256 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1257 
1258 	/* Media types with matching FreeBSD media defines */
1259 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1260 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1261 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1262 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1263 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1264 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1265 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1266 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1267 
1268 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1269 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1270 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1271 		    NULL);
1272 
1273 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1274 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1275 		if (hw->phy.multispeed_fiber)
1276 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1277 			    NULL);
1278 	}
1279 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1280 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1281 		if (hw->phy.multispeed_fiber)
1282 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1283 			    NULL);
1284 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1285 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1286 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1287 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1288 
1289 #ifdef IFM_ETH_XTYPE
1290 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1291 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1292 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1293 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1294 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1295 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1296 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1297 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1298 #else
1299 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1300 		device_printf(dev, "Media supported: 10GbaseKR\n");
1301 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1302 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1303 	}
1304 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1305 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1306 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1307 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1308 	}
1309 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1310 		device_printf(dev, "Media supported: 1000baseKX\n");
1311 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1312 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1313 	}
1314 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1315 		device_printf(dev, "Media supported: 2500baseKX\n");
1316 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1317 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1318 	}
1319 #endif
1320 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1321 		device_printf(dev, "Media supported: 1000baseBX\n");
1322 
1323 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1324 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1325 		    0, NULL);
1326 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1327 	}
1328 
1329 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1330 } /* ixgbe_add_media_types */
1331 
1332 /************************************************************************
1333  * ixgbe_is_sfp
1334  ************************************************************************/
1335 static inline bool
1336 ixgbe_is_sfp(struct ixgbe_hw *hw)
1337 {
1338 	switch (hw->mac.type) {
1339 	case ixgbe_mac_82598EB:
1340 		if (hw->phy.type == ixgbe_phy_nl)
1341 			return (TRUE);
1342 		return (FALSE);
1343 	case ixgbe_mac_82599EB:
1344 		switch (hw->mac.ops.get_media_type(hw)) {
1345 		case ixgbe_media_type_fiber:
1346 		case ixgbe_media_type_fiber_qsfp:
1347 			return (TRUE);
1348 		default:
1349 			return (FALSE);
1350 		}
1351 	case ixgbe_mac_X550EM_x:
1352 	case ixgbe_mac_X550EM_a:
1353 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1354 			return (TRUE);
1355 		return (FALSE);
1356 	default:
1357 		return (FALSE);
1358 	}
1359 } /* ixgbe_is_sfp */
1360 
1361 /************************************************************************
1362  * ixgbe_config_link
1363  ************************************************************************/
1364 static void
1365 ixgbe_config_link(struct adapter *adapter)
1366 {
1367 	struct ixgbe_hw *hw = &adapter->hw;
1368 	u32             autoneg, err = 0;
1369 	bool            sfp, negotiate;
1370 
1371 	sfp = ixgbe_is_sfp(hw);
1372 
1373 	if (sfp) {
1374 		GROUPTASK_ENQUEUE(&adapter->mod_task);
1375 	} else {
1376 		if (hw->mac.ops.check_link)
1377 			err = ixgbe_check_link(hw, &adapter->link_speed,
1378 			    &adapter->link_up, FALSE);
1379 		if (err)
1380 			return;
1381 		autoneg = hw->phy.autoneg_advertised;
1382 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1383 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1384 			    &negotiate);
1385 		if (err)
1386 			return;
1387 		if (hw->mac.ops.setup_link)
1388 			err = hw->mac.ops.setup_link(hw, autoneg,
1389 			    adapter->link_up);
1390 	}
1391 
1392 } /* ixgbe_config_link */
1393 
1394 /************************************************************************
1395  * ixgbe_update_stats_counters - Update board statistics counters.
1396  ************************************************************************/
1397 static void
1398 ixgbe_update_stats_counters(struct adapter *adapter)
1399 {
1400 	struct ixgbe_hw       *hw = &adapter->hw;
1401 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1402 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1403 	u64                   total_missed_rx = 0;
1404 
1405 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1406 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1407 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1408 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1409 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1410 
1411 	for (int i = 0; i < 16; i++) {
1412 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1413 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1414 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1415 	}
1416 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1417 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1418 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1419 
1420 	/* Hardware workaround, gprc counts missed packets */
1421 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1422 	stats->gprc -= missed_rx;
1423 
1424 	if (hw->mac.type != ixgbe_mac_82598EB) {
1425 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1426 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1427 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1428 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1429 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1430 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1431 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1432 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1433 	} else {
1434 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1435 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1436 		/* 82598 only has a counter in the high register */
1437 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1438 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1439 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1440 	}
1441 
1442 	/*
1443 	 * Workaround: mprc hardware is incorrectly counting
1444 	 * broadcasts, so for now we subtract those.
1445 	 */
1446 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1447 	stats->bprc += bprc;
1448 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1449 	if (hw->mac.type == ixgbe_mac_82598EB)
1450 		stats->mprc -= bprc;
1451 
1452 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1453 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1454 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1455 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1456 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1457 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1458 
1459 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1460 	stats->lxontxc += lxon;
1461 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1462 	stats->lxofftxc += lxoff;
1463 	total = lxon + lxoff;
1464 
1465 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1466 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1467 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1468 	stats->gptc -= total;
1469 	stats->mptc -= total;
1470 	stats->ptc64 -= total;
1471 	stats->gotc -= total * ETHER_MIN_LEN;
1472 
1473 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1474 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1475 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1476 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1477 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1478 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1479 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1480 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1481 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1482 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1483 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1484 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1485 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1486 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1487 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1488 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1489 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1490 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1491 	/* Only read FCOE on 82599 */
1492 	if (hw->mac.type != ixgbe_mac_82598EB) {
1493 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1494 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1495 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1496 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1497 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1498 	}
1499 
1500 	/* Fill out the OS statistics structure */
1501 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1502 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1503 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1504 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1505 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1506 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1507 	IXGBE_SET_COLLISIONS(adapter, 0);
1508 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1509 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1510 } /* ixgbe_update_stats_counters */
1511 
1512 /************************************************************************
1513  * ixgbe_add_hw_stats
1514  *
1515  *   Add sysctl variables, one per statistic, to the system.
1516  ************************************************************************/
1517 static void
1518 ixgbe_add_hw_stats(struct adapter *adapter)
1519 {
1520 	device_t               dev = iflib_get_dev(adapter->ctx);
1521 	struct ix_rx_queue     *rx_que;
1522 	struct ix_tx_queue     *tx_que;
1523 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1524 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1525 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1526 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1527 	struct sysctl_oid      *stat_node, *queue_node;
1528 	struct sysctl_oid_list *stat_list, *queue_list;
1529 	int                    i;
1530 
1531 #define QUEUE_NAME_LEN 32
1532 	char                   namebuf[QUEUE_NAME_LEN];
1533 
1534 	/* Driver Statistics */
1535 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1536 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1537 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1538 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1539 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1540 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1541 
1542 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1543 		struct tx_ring *txr = &tx_que->txr;
1544 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1545 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1546 		    CTLFLAG_RD, NULL, "Queue Name");
1547 		queue_list = SYSCTL_CHILDREN(queue_node);
1548 
1549 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1550 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1551 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1552 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1553 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1554 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1555 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1556 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1557 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1558 		    CTLFLAG_RD, &txr->total_packets,
1559 		    "Queue Packets Transmitted");
1560 	}
1561 
1562 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1563 		struct rx_ring *rxr = &rx_que->rxr;
1564 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1565 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1566 		    CTLFLAG_RD, NULL, "Queue Name");
1567 		queue_list = SYSCTL_CHILDREN(queue_node);
1568 
1569 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1570 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1571 		    sizeof(&adapter->rx_queues[i]),
1572 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1573 		    "Interrupt Rate");
1574 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1575 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1576 		    "irqs on this queue");
1577 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1578 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1579 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1580 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1581 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1582 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1583 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1584 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1585 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1586 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1587 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1588 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1589 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1590 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1591 	}
1592 
1593 	/* MAC stats get their own sub node */
1594 
1595 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1596 	    CTLFLAG_RD, NULL, "MAC Statistics");
1597 	stat_list = SYSCTL_CHILDREN(stat_node);
1598 
1599 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1600 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1601 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1602 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1603 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1604 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1605 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1606 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1607 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1608 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1609 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1610 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1611 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1612 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1613 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1614 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1615 
1616 	/* Flow Control stats */
1617 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1618 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1619 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1620 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1621 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1622 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1623 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1624 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1625 
1626 	/* Packet Reception Stats */
1627 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1628 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1629 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1630 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1631 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1632 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1633 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1634 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1635 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1636 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1637 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1638 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1639 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1640 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1641 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1642 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1643 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1644 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1645 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1646 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1648 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1650 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1652 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1653 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1654 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1656 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1657 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1658 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1660 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1662 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1664 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1665 
1666 	/* Packet Transmission Stats */
1667 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1668 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1670 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1672 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1674 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1676 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1678 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1679 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1680 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1681 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1682 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1683 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1684 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1685 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1686 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1687 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1688 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1689 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1690 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1691 } /* ixgbe_add_hw_stats */
1692 
1693 /************************************************************************
1694  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1695  *
1696  *   Retrieves the TDH value from the hardware
1697  ************************************************************************/
1698 static int
1699 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1700 {
1701 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1702 	int            error;
1703 	unsigned int   val;
1704 
1705 	if (!txr)
1706 		return (0);
1707 
1708 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1709 	error = sysctl_handle_int(oidp, &val, 0, req);
1710 	if (error || !req->newptr)
1711 		return error;
1712 
1713 	return (0);
1714 } /* ixgbe_sysctl_tdh_handler */
1715 
1716 /************************************************************************
1717  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1718  *
1719  *   Retrieves the TDT value from the hardware
1720  ************************************************************************/
1721 static int
1722 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1723 {
1724 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1725 	int            error;
1726 	unsigned int   val;
1727 
1728 	if (!txr)
1729 		return (0);
1730 
1731 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1732 	error = sysctl_handle_int(oidp, &val, 0, req);
1733 	if (error || !req->newptr)
1734 		return error;
1735 
1736 	return (0);
1737 } /* ixgbe_sysctl_tdt_handler */
1738 
1739 /************************************************************************
1740  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1741  *
1742  *   Retrieves the RDH value from the hardware
1743  ************************************************************************/
1744 static int
1745 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1746 {
1747 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1748 	int            error;
1749 	unsigned int   val;
1750 
1751 	if (!rxr)
1752 		return (0);
1753 
1754 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1755 	error = sysctl_handle_int(oidp, &val, 0, req);
1756 	if (error || !req->newptr)
1757 		return error;
1758 
1759 	return (0);
1760 } /* ixgbe_sysctl_rdh_handler */
1761 
1762 /************************************************************************
1763  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1764  *
1765  *   Retrieves the RDT value from the hardware
1766  ************************************************************************/
1767 static int
1768 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1769 {
1770 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1771 	int            error;
1772 	unsigned int   val;
1773 
1774 	if (!rxr)
1775 		return (0);
1776 
1777 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1778 	error = sysctl_handle_int(oidp, &val, 0, req);
1779 	if (error || !req->newptr)
1780 		return error;
1781 
1782 	return (0);
1783 } /* ixgbe_sysctl_rdt_handler */
1784 
1785 /************************************************************************
1786  * ixgbe_if_vlan_register
1787  *
1788  *   Run via vlan config EVENT, it enables us to use the
1789  *   HW Filter table since we can get the vlan id. This
1790  *   just creates the entry in the soft version of the
1791  *   VFTA, init will repopulate the real table.
1792  ************************************************************************/
1793 static void
1794 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1795 {
1796 	struct adapter *adapter = iflib_get_softc(ctx);
1797 	u16            index, bit;
1798 
1799 	index = (vtag >> 5) & 0x7F;
1800 	bit = vtag & 0x1F;
1801 	adapter->shadow_vfta[index] |= (1 << bit);
1802 	++adapter->num_vlans;
1803 	ixgbe_setup_vlan_hw_support(ctx);
1804 } /* ixgbe_if_vlan_register */
1805 
1806 /************************************************************************
1807  * ixgbe_if_vlan_unregister
1808  *
1809  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1810  ************************************************************************/
1811 static void
1812 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1813 {
1814 	struct adapter *adapter = iflib_get_softc(ctx);
1815 	u16            index, bit;
1816 
1817 	index = (vtag >> 5) & 0x7F;
1818 	bit = vtag & 0x1F;
1819 	adapter->shadow_vfta[index] &= ~(1 << bit);
1820 	--adapter->num_vlans;
1821 	/* Re-init to load the changes */
1822 	ixgbe_setup_vlan_hw_support(ctx);
1823 } /* ixgbe_if_vlan_unregister */
1824 
1825 /************************************************************************
1826  * ixgbe_setup_vlan_hw_support
1827  ************************************************************************/
1828 static void
1829 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1830 {
1831 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1832 	struct adapter  *adapter = iflib_get_softc(ctx);
1833 	struct ixgbe_hw *hw = &adapter->hw;
1834 	struct rx_ring  *rxr;
1835 	int             i;
1836 	u32             ctrl;
1837 
1838 
1839 	/*
1840 	 * We get here thru init_locked, meaning
1841 	 * a soft reset, this has already cleared
1842 	 * the VFTA and other state, so if there
1843 	 * have been no vlan's registered do nothing.
1844 	 */
1845 	if (adapter->num_vlans == 0)
1846 		return;
1847 
1848 	/* Setup the queues for vlans */
1849 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1850 		for (i = 0; i < adapter->num_rx_queues; i++) {
1851 			rxr = &adapter->rx_queues[i].rxr;
1852 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1853 			if (hw->mac.type != ixgbe_mac_82598EB) {
1854 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1855 				ctrl |= IXGBE_RXDCTL_VME;
1856 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1857 			}
1858 			rxr->vtag_strip = TRUE;
1859 		}
1860 	}
1861 
1862 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1863 		return;
1864 	/*
1865 	 * A soft reset zero's out the VFTA, so
1866 	 * we need to repopulate it now.
1867 	 */
1868 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1869 		if (adapter->shadow_vfta[i] != 0)
1870 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1871 			    adapter->shadow_vfta[i]);
1872 
1873 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1874 	/* Enable the Filter Table if enabled */
1875 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1876 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1877 		ctrl |= IXGBE_VLNCTRL_VFE;
1878 	}
1879 	if (hw->mac.type == ixgbe_mac_82598EB)
1880 		ctrl |= IXGBE_VLNCTRL_VME;
1881 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1882 } /* ixgbe_setup_vlan_hw_support */
1883 
1884 /************************************************************************
1885  * ixgbe_get_slot_info
1886  *
1887  *   Get the width and transaction speed of
1888  *   the slot this adapter is plugged into.
1889  ************************************************************************/
1890 static void
1891 ixgbe_get_slot_info(struct adapter *adapter)
1892 {
1893 	device_t        dev = iflib_get_dev(adapter->ctx);
1894 	struct ixgbe_hw *hw = &adapter->hw;
1895 	int             bus_info_valid = TRUE;
1896 	u32             offset;
1897 	u16             link;
1898 
1899 	/* Some devices are behind an internal bridge */
1900 	switch (hw->device_id) {
1901 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1902 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1903 		goto get_parent_info;
1904 	default:
1905 		break;
1906 	}
1907 
1908 	ixgbe_get_bus_info(hw);
1909 
1910 	/*
1911 	 * Some devices don't use PCI-E, but there is no need
1912 	 * to display "Unknown" for bus speed and width.
1913 	 */
1914 	switch (hw->mac.type) {
1915 	case ixgbe_mac_X550EM_x:
1916 	case ixgbe_mac_X550EM_a:
1917 		return;
1918 	default:
1919 		goto display;
1920 	}
1921 
1922 get_parent_info:
1923 	/*
1924 	 * For the Quad port adapter we need to parse back
1925 	 * up the PCI tree to find the speed of the expansion
1926 	 * slot into which this adapter is plugged. A bit more work.
1927 	 */
1928 	dev = device_get_parent(device_get_parent(dev));
1929 #ifdef IXGBE_DEBUG
1930 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1931 	    pci_get_slot(dev), pci_get_function(dev));
1932 #endif
1933 	dev = device_get_parent(device_get_parent(dev));
1934 #ifdef IXGBE_DEBUG
1935 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1936 	    pci_get_slot(dev), pci_get_function(dev));
1937 #endif
1938 	/* Now get the PCI Express Capabilities offset */
1939 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1940 		/*
1941 		 * Hmm...can't get PCI-Express capabilities.
1942 		 * Falling back to default method.
1943 		 */
1944 		bus_info_valid = FALSE;
1945 		ixgbe_get_bus_info(hw);
1946 		goto display;
1947 	}
1948 	/* ...and read the Link Status Register */
1949 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1950 	ixgbe_set_pci_config_data_generic(hw, link);
1951 
1952 display:
1953 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1954 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1955 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1956 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1957 	     "Unknown"),
1958 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1959 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1960 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1961 	     "Unknown"));
1962 
1963 	if (bus_info_valid) {
1964 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1965 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1966 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1967 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1968 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1969 		}
1970 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1971 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1972 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1973 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1974 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1975 		}
1976 	} else
1977 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1978 
1979 	return;
1980 } /* ixgbe_get_slot_info */
1981 
1982 /************************************************************************
1983  * ixgbe_if_msix_intr_assign
1984  *
1985  *   Setup MSI-X Interrupt resources and handlers
1986  ************************************************************************/
1987 static int
1988 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1989 {
1990 	struct adapter     *adapter = iflib_get_softc(ctx);
1991 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1992 	struct ix_tx_queue *tx_que;
1993 	int                error, rid, vector = 0;
1994 	int                cpu_id = 0;
1995 	char               buf[16];
1996 
1997 	/* Admin Que is vector 0*/
1998 	rid = vector + 1;
1999 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
2000 		rid = vector + 1;
2001 
2002 		snprintf(buf, sizeof(buf), "rxq%d", i);
2003 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2004 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2005 
2006 		if (error) {
2007 			device_printf(iflib_get_dev(ctx),
2008 			    "Failed to allocate que int %d err: %d", i, error);
2009 			adapter->num_rx_queues = i + 1;
2010 			goto fail;
2011 		}
2012 
2013 		rx_que->msix = vector;
2014 		adapter->active_queues |= (u64)(1 << rx_que->msix);
2015 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2016 			/*
2017 			 * The queue ID is used as the RSS layer bucket ID.
2018 			 * We look up the queue ID -> RSS CPU ID and select
2019 			 * that.
2020 			 */
2021 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2022 		} else {
2023 			/*
2024 			 * Bind the msix vector, and thus the
2025 			 * rings to the corresponding cpu.
2026 			 *
2027 			 * This just happens to match the default RSS
2028 			 * round-robin bucket -> queue -> CPU allocation.
2029 			 */
2030 			if (adapter->num_rx_queues > 1)
2031 				cpu_id = i;
2032 		}
2033 
2034 	}
2035 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2036 		snprintf(buf, sizeof(buf), "txq%d", i);
2037 		tx_que = &adapter->tx_queues[i];
2038 		tx_que->msix = i % adapter->num_rx_queues;
2039 		iflib_softirq_alloc_generic(ctx,
2040 		    &adapter->rx_queues[tx_que->msix].que_irq,
2041 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2042 	}
2043 	rid = vector + 1;
2044 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2045 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2046 	if (error) {
2047 		device_printf(iflib_get_dev(ctx),
2048 		    "Failed to register admin handler");
2049 		return (error);
2050 	}
2051 
2052 	adapter->vector = vector;
2053 
2054 	return (0);
2055 fail:
2056 	iflib_irq_free(ctx, &adapter->irq);
2057 	rx_que = adapter->rx_queues;
2058 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2059 		iflib_irq_free(ctx, &rx_que->que_irq);
2060 
2061 	return (error);
2062 } /* ixgbe_if_msix_intr_assign */
2063 
2064 /*********************************************************************
2065  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2066  **********************************************************************/
2067 static int
2068 ixgbe_msix_que(void *arg)
2069 {
2070 	struct ix_rx_queue *que = arg;
2071 	struct adapter     *adapter = que->adapter;
2072 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2073 
2074 	/* Protect against spurious interrupts */
2075 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2076 		return 0;
2077 
2078 	ixgbe_disable_queue(adapter, que->msix);
2079 	++que->irqs;
2080 
2081 	return (FILTER_SCHEDULE_THREAD);
2082 } /* ixgbe_msix_que */
2083 
2084 /************************************************************************
2085  * ixgbe_media_status - Media Ioctl callback
2086  *
2087  *   Called whenever the user queries the status of
2088  *   the interface using ifconfig.
2089  ************************************************************************/
2090 static void
2091 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2092 {
2093 	struct adapter  *adapter = iflib_get_softc(ctx);
2094 	struct ixgbe_hw *hw = &adapter->hw;
2095 	int             layer;
2096 
2097 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2098 
2099 	iflib_admin_intr_deferred(ctx);
2100 
2101 	ifmr->ifm_status = IFM_AVALID;
2102 	ifmr->ifm_active = IFM_ETHER;
2103 
2104 	if (!adapter->link_active)
2105 		return;
2106 
2107 	ifmr->ifm_status |= IFM_ACTIVE;
2108 	layer = adapter->phy_layer;
2109 
2110 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2111 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2112 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2113 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2114 		switch (adapter->link_speed) {
2115 		case IXGBE_LINK_SPEED_10GB_FULL:
2116 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2117 			break;
2118 		case IXGBE_LINK_SPEED_1GB_FULL:
2119 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2120 			break;
2121 		case IXGBE_LINK_SPEED_100_FULL:
2122 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2123 			break;
2124 		case IXGBE_LINK_SPEED_10_FULL:
2125 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2126 			break;
2127 		}
2128 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2129 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2130 		switch (adapter->link_speed) {
2131 		case IXGBE_LINK_SPEED_10GB_FULL:
2132 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2133 			break;
2134 		}
2135 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2136 		switch (adapter->link_speed) {
2137 		case IXGBE_LINK_SPEED_10GB_FULL:
2138 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2139 			break;
2140 		case IXGBE_LINK_SPEED_1GB_FULL:
2141 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2142 			break;
2143 		}
2144 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2145 		switch (adapter->link_speed) {
2146 		case IXGBE_LINK_SPEED_10GB_FULL:
2147 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2148 			break;
2149 		case IXGBE_LINK_SPEED_1GB_FULL:
2150 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2151 			break;
2152 		}
2153 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2154 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2155 		switch (adapter->link_speed) {
2156 		case IXGBE_LINK_SPEED_10GB_FULL:
2157 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2158 			break;
2159 		case IXGBE_LINK_SPEED_1GB_FULL:
2160 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2161 			break;
2162 		}
2163 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2164 		switch (adapter->link_speed) {
2165 		case IXGBE_LINK_SPEED_10GB_FULL:
2166 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2167 			break;
2168 		}
2169 	/*
2170 	 * XXX: These need to use the proper media types once
2171 	 * they're added.
2172 	 */
2173 #ifndef IFM_ETH_XTYPE
2174 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2175 		switch (adapter->link_speed) {
2176 		case IXGBE_LINK_SPEED_10GB_FULL:
2177 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2178 			break;
2179 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2180 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2181 			break;
2182 		case IXGBE_LINK_SPEED_1GB_FULL:
2183 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2184 			break;
2185 		}
2186 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2187 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2188 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2189 		switch (adapter->link_speed) {
2190 		case IXGBE_LINK_SPEED_10GB_FULL:
2191 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2192 			break;
2193 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2194 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2195 			break;
2196 		case IXGBE_LINK_SPEED_1GB_FULL:
2197 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2198 			break;
2199 		}
2200 #else
2201 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2202 		switch (adapter->link_speed) {
2203 		case IXGBE_LINK_SPEED_10GB_FULL:
2204 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2205 			break;
2206 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2207 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2208 			break;
2209 		case IXGBE_LINK_SPEED_1GB_FULL:
2210 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2211 			break;
2212 		}
2213 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2214 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2215 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2216 		switch (adapter->link_speed) {
2217 		case IXGBE_LINK_SPEED_10GB_FULL:
2218 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2219 			break;
2220 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2221 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2222 			break;
2223 		case IXGBE_LINK_SPEED_1GB_FULL:
2224 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2225 			break;
2226 		}
2227 #endif
2228 
2229 	/* If nothing is recognized... */
2230 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2231 		ifmr->ifm_active |= IFM_UNKNOWN;
2232 
2233 	/* Display current flow control setting used on link */
2234 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2235 	    hw->fc.current_mode == ixgbe_fc_full)
2236 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2237 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2238 	    hw->fc.current_mode == ixgbe_fc_full)
2239 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2240 } /* ixgbe_media_status */
2241 
2242 /************************************************************************
2243  * ixgbe_media_change - Media Ioctl callback
2244  *
2245  *   Called when the user changes speed/duplex using
2246  *   media/mediopt option with ifconfig.
2247  ************************************************************************/
2248 static int
2249 ixgbe_if_media_change(if_ctx_t ctx)
2250 {
2251 	struct adapter   *adapter = iflib_get_softc(ctx);
2252 	struct ifmedia   *ifm = iflib_get_media(ctx);
2253 	struct ixgbe_hw  *hw = &adapter->hw;
2254 	ixgbe_link_speed speed = 0;
2255 
2256 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2257 
2258 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2259 		return (EINVAL);
2260 
2261 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2262 		return (EPERM);
2263 
2264 	/*
2265 	 * We don't actually need to check against the supported
2266 	 * media types of the adapter; ifmedia will take care of
2267 	 * that for us.
2268 	 */
2269 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2270 	case IFM_AUTO:
2271 	case IFM_10G_T:
2272 		speed |= IXGBE_LINK_SPEED_100_FULL;
2273 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2274 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2275 		break;
2276 	case IFM_10G_LRM:
2277 	case IFM_10G_LR:
2278 #ifndef IFM_ETH_XTYPE
2279 	case IFM_10G_SR: /* KR, too */
2280 	case IFM_10G_CX4: /* KX4 */
2281 #else
2282 	case IFM_10G_KR:
2283 	case IFM_10G_KX4:
2284 #endif
2285 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2286 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2287 		break;
2288 #ifndef IFM_ETH_XTYPE
2289 	case IFM_1000_CX: /* KX */
2290 #else
2291 	case IFM_1000_KX:
2292 #endif
2293 	case IFM_1000_LX:
2294 	case IFM_1000_SX:
2295 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2296 		break;
2297 	case IFM_1000_T:
2298 		speed |= IXGBE_LINK_SPEED_100_FULL;
2299 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2300 		break;
2301 	case IFM_10G_TWINAX:
2302 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2303 		break;
2304 	case IFM_100_TX:
2305 		speed |= IXGBE_LINK_SPEED_100_FULL;
2306 		break;
2307 	case IFM_10_T:
2308 		speed |= IXGBE_LINK_SPEED_10_FULL;
2309 		break;
2310 	default:
2311 		goto invalid;
2312 	}
2313 
2314 	hw->mac.autotry_restart = TRUE;
2315 	hw->mac.ops.setup_link(hw, speed, TRUE);
2316 	adapter->advertise =
2317 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2318 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2319 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2320 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2321 
2322 	return (0);
2323 
2324 invalid:
2325 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2326 
2327 	return (EINVAL);
2328 } /* ixgbe_if_media_change */
2329 
2330 /************************************************************************
2331  * ixgbe_set_promisc
2332  ************************************************************************/
2333 static int
2334 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2335 {
2336 	struct adapter *adapter = iflib_get_softc(ctx);
2337 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2338 	u32            rctl;
2339 	int            mcnt = 0;
2340 
2341 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2342 	rctl &= (~IXGBE_FCTRL_UPE);
2343 	if (ifp->if_flags & IFF_ALLMULTI)
2344 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2345 	else {
2346 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2347 	}
2348 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2349 		rctl &= (~IXGBE_FCTRL_MPE);
2350 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2351 
2352 	if (ifp->if_flags & IFF_PROMISC) {
2353 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2354 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2355 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2356 		rctl |= IXGBE_FCTRL_MPE;
2357 		rctl &= ~IXGBE_FCTRL_UPE;
2358 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2359 	}
2360 	return (0);
2361 } /* ixgbe_if_promisc_set */
2362 
2363 /************************************************************************
2364  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2365  ************************************************************************/
2366 static int
2367 ixgbe_msix_link(void *arg)
2368 {
2369 	struct adapter  *adapter = arg;
2370 	struct ixgbe_hw *hw = &adapter->hw;
2371 	u32             eicr, eicr_mask;
2372 	s32             retval;
2373 
2374 	++adapter->link_irq;
2375 
2376 	/* Pause other interrupts */
2377 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2378 
2379 	/* First get the cause */
2380 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2381 	/* Be sure the queue bits are not cleared */
2382 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2383 	/* Clear interrupt with write */
2384 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2385 
2386 	/* Link status change */
2387 	if (eicr & IXGBE_EICR_LSC) {
2388 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2389 		iflib_admin_intr_deferred(adapter->ctx);
2390 	}
2391 
2392 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2393 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2394 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2395 			/* This is probably overkill :) */
2396 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2397 				return (FILTER_HANDLED);
2398 			/* Disable the interrupt */
2399 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2400 			GROUPTASK_ENQUEUE(&adapter->fdir_task);
2401 		} else
2402 			if (eicr & IXGBE_EICR_ECC) {
2403 				device_printf(iflib_get_dev(adapter->ctx),
2404 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2405 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2406 			}
2407 
2408 		/* Check for over temp condition */
2409 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2410 			switch (adapter->hw.mac.type) {
2411 			case ixgbe_mac_X550EM_a:
2412 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2413 					break;
2414 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2415 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2416 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2417 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2418 				retval = hw->phy.ops.check_overtemp(hw);
2419 				if (retval != IXGBE_ERR_OVERTEMP)
2420 					break;
2421 				device_printf(iflib_get_dev(adapter->ctx),
2422 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2423 				device_printf(iflib_get_dev(adapter->ctx),
2424 				    "System shutdown required!\n");
2425 				break;
2426 			default:
2427 				if (!(eicr & IXGBE_EICR_TS))
2428 					break;
2429 				retval = hw->phy.ops.check_overtemp(hw);
2430 				if (retval != IXGBE_ERR_OVERTEMP)
2431 					break;
2432 				device_printf(iflib_get_dev(adapter->ctx),
2433 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2434 				device_printf(iflib_get_dev(adapter->ctx),
2435 				    "System shutdown required!\n");
2436 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2437 				break;
2438 			}
2439 		}
2440 
2441 		/* Check for VF message */
2442 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2443 		    (eicr & IXGBE_EICR_MAILBOX))
2444 			GROUPTASK_ENQUEUE(&adapter->mbx_task);
2445 	}
2446 
2447 	if (ixgbe_is_sfp(hw)) {
2448 		/* Pluggable optics-related interrupt */
2449 		if (hw->mac.type >= ixgbe_mac_X540)
2450 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2451 		else
2452 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2453 
2454 		if (eicr & eicr_mask) {
2455 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2456 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2457 				GROUPTASK_ENQUEUE(&adapter->mod_task);
2458 		}
2459 
2460 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2461 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2462 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2463 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2464 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2465 				GROUPTASK_ENQUEUE(&adapter->msf_task);
2466 		}
2467 	}
2468 
2469 	/* Check for fan failure */
2470 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2471 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2472 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2473 	}
2474 
2475 	/* External PHY interrupt */
2476 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2477 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2478 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2479 		GROUPTASK_ENQUEUE(&adapter->phy_task);
2480 	}
2481 
2482 	/* Re-enable other interrupts */
2483 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2484 
2485 	return (FILTER_HANDLED);
2486 } /* ixgbe_msix_link */
2487 
2488 /************************************************************************
2489  * ixgbe_sysctl_interrupt_rate_handler
2490  ************************************************************************/
2491 static int
2492 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2493 {
2494 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2495 	int                error;
2496 	unsigned int       reg, usec, rate;
2497 
2498 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2499 	usec = ((reg & 0x0FF8) >> 3);
2500 	if (usec > 0)
2501 		rate = 500000 / usec;
2502 	else
2503 		rate = 0;
2504 	error = sysctl_handle_int(oidp, &rate, 0, req);
2505 	if (error || !req->newptr)
2506 		return error;
2507 	reg &= ~0xfff; /* default, no limitation */
2508 	ixgbe_max_interrupt_rate = 0;
2509 	if (rate > 0 && rate < 500000) {
2510 		if (rate < 1000)
2511 			rate = 1000;
2512 		ixgbe_max_interrupt_rate = rate;
2513 		reg |= ((4000000/rate) & 0xff8);
2514 	}
2515 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2516 
2517 	return (0);
2518 } /* ixgbe_sysctl_interrupt_rate_handler */
2519 
2520 /************************************************************************
2521  * ixgbe_add_device_sysctls
2522  ************************************************************************/
2523 static void
2524 ixgbe_add_device_sysctls(if_ctx_t ctx)
2525 {
2526 	struct adapter         *adapter = iflib_get_softc(ctx);
2527 	device_t               dev = iflib_get_dev(ctx);
2528 	struct ixgbe_hw        *hw = &adapter->hw;
2529 	struct sysctl_oid_list *child;
2530 	struct sysctl_ctx_list *ctx_list;
2531 
2532 	ctx_list = device_get_sysctl_ctx(dev);
2533 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2534 
2535 	/* Sysctls for all devices */
2536 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2537 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2538 	    IXGBE_SYSCTL_DESC_SET_FC);
2539 
2540 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2541 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2542 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2543 
2544 #ifdef IXGBE_DEBUG
2545 	/* testing sysctls (for all devices) */
2546 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2547 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2548 	    "I", "PCI Power State");
2549 
2550 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2551 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2552 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2553 #endif
2554 	/* for X550 series devices */
2555 	if (hw->mac.type >= ixgbe_mac_X550)
2556 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2557 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2558 		    "I", "DMA Coalesce");
2559 
2560 	/* for WoL-capable devices */
2561 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2562 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2563 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2564 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2565 
2566 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2567 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2568 		    "I", "Enable/Disable Wake Up Filters");
2569 	}
2570 
2571 	/* for X552/X557-AT devices */
2572 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2573 		struct sysctl_oid *phy_node;
2574 		struct sysctl_oid_list *phy_list;
2575 
2576 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2577 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2578 		phy_list = SYSCTL_CHILDREN(phy_node);
2579 
2580 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2581 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2582 		    "I", "Current External PHY Temperature (Celsius)");
2583 
2584 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2585 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2586 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2587 		    "External PHY High Temperature Event Occurred");
2588 	}
2589 
2590 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2591 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2592 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2593 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2594 	}
2595 } /* ixgbe_add_device_sysctls */
2596 
2597 /************************************************************************
2598  * ixgbe_allocate_pci_resources
2599  ************************************************************************/
2600 static int
2601 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2602 {
2603 	struct adapter *adapter = iflib_get_softc(ctx);
2604 	device_t        dev = iflib_get_dev(ctx);
2605 	int             rid;
2606 
2607 	rid = PCIR_BAR(0);
2608 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2609 	    RF_ACTIVE);
2610 
2611 	if (!(adapter->pci_mem)) {
2612 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2613 		return (ENXIO);
2614 	}
2615 
2616 	/* Save bus_space values for READ/WRITE_REG macros */
2617 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2618 	adapter->osdep.mem_bus_space_handle =
2619 	    rman_get_bushandle(adapter->pci_mem);
2620 	/* Set hw values for shared code */
2621 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2622 
2623 	return (0);
2624 } /* ixgbe_allocate_pci_resources */
2625 
2626 /************************************************************************
2627  * ixgbe_detach - Device removal routine
2628  *
2629  *   Called when the driver is being removed.
2630  *   Stops the adapter and deallocates all the resources
2631  *   that were allocated for driver operation.
2632  *
2633  *   return 0 on success, positive on failure
2634  ************************************************************************/
2635 static int
2636 ixgbe_if_detach(if_ctx_t ctx)
2637 {
2638 	struct adapter *adapter = iflib_get_softc(ctx);
2639 	device_t       dev = iflib_get_dev(ctx);
2640 	u32            ctrl_ext;
2641 
2642 	INIT_DEBUGOUT("ixgbe_detach: begin");
2643 
2644 	if (ixgbe_pci_iov_detach(dev) != 0) {
2645 		device_printf(dev, "SR-IOV in use; detach first.\n");
2646 		return (EBUSY);
2647 	}
2648 
2649 	iflib_config_gtask_deinit(&adapter->mod_task);
2650 	iflib_config_gtask_deinit(&adapter->msf_task);
2651 	iflib_config_gtask_deinit(&adapter->phy_task);
2652 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2653 		iflib_config_gtask_deinit(&adapter->mbx_task);
2654 
2655 	ixgbe_setup_low_power_mode(ctx);
2656 
2657 	/* let hardware know driver is unloading */
2658 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2659 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2660 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2661 
2662 	ixgbe_free_pci_resources(ctx);
2663 	free(adapter->mta, M_IXGBE);
2664 
2665 	return (0);
2666 } /* ixgbe_if_detach */
2667 
2668 /************************************************************************
2669  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2670  *
2671  *   Prepare the adapter/port for LPLU and/or WoL
2672  ************************************************************************/
2673 static int
2674 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2675 {
2676 	struct adapter  *adapter = iflib_get_softc(ctx);
2677 	struct ixgbe_hw *hw = &adapter->hw;
2678 	device_t        dev = iflib_get_dev(ctx);
2679 	s32             error = 0;
2680 
2681 	if (!hw->wol_enabled)
2682 		ixgbe_set_phy_power(hw, FALSE);
2683 
2684 	/* Limit power management flow to X550EM baseT */
2685 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2686 	    hw->phy.ops.enter_lplu) {
2687 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2688 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2689 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2690 
2691 		/*
2692 		 * Clear Wake Up Status register to prevent any previous wakeup
2693 		 * events from waking us up immediately after we suspend.
2694 		 */
2695 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2696 
2697 		/*
2698 		 * Program the Wakeup Filter Control register with user filter
2699 		 * settings
2700 		 */
2701 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2702 
2703 		/* Enable wakeups and power management in Wakeup Control */
2704 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2705 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2706 
2707 		/* X550EM baseT adapters need a special LPLU flow */
2708 		hw->phy.reset_disable = TRUE;
2709 		ixgbe_if_stop(ctx);
2710 		error = hw->phy.ops.enter_lplu(hw);
2711 		if (error)
2712 			device_printf(dev, "Error entering LPLU: %d\n", error);
2713 		hw->phy.reset_disable = FALSE;
2714 	} else {
2715 		/* Just stop for other adapters */
2716 		ixgbe_if_stop(ctx);
2717 	}
2718 
2719 	return error;
2720 } /* ixgbe_setup_low_power_mode */
2721 
2722 /************************************************************************
2723  * ixgbe_shutdown - Shutdown entry point
2724  ************************************************************************/
2725 static int
2726 ixgbe_if_shutdown(if_ctx_t ctx)
2727 {
2728 	int error = 0;
2729 
2730 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2731 
2732 	error = ixgbe_setup_low_power_mode(ctx);
2733 
2734 	return (error);
2735 } /* ixgbe_if_shutdown */
2736 
2737 /************************************************************************
2738  * ixgbe_suspend
2739  *
2740  *   From D0 to D3
2741  ************************************************************************/
2742 static int
2743 ixgbe_if_suspend(if_ctx_t ctx)
2744 {
2745 	int error = 0;
2746 
2747 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2748 
2749 	error = ixgbe_setup_low_power_mode(ctx);
2750 
2751 	return (error);
2752 } /* ixgbe_if_suspend */
2753 
2754 /************************************************************************
2755  * ixgbe_resume
2756  *
2757  *   From D3 to D0
2758  ************************************************************************/
2759 static int
2760 ixgbe_if_resume(if_ctx_t ctx)
2761 {
2762 	struct adapter  *adapter = iflib_get_softc(ctx);
2763 	device_t        dev = iflib_get_dev(ctx);
2764 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2765 	struct ixgbe_hw *hw = &adapter->hw;
2766 	u32             wus;
2767 
2768 	INIT_DEBUGOUT("ixgbe_resume: begin");
2769 
2770 	/* Read & clear WUS register */
2771 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2772 	if (wus)
2773 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2774 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2775 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2776 	/* And clear WUFC until next low-power transition */
2777 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2778 
2779 	/*
2780 	 * Required after D3->D0 transition;
2781 	 * will re-advertise all previous advertised speeds
2782 	 */
2783 	if (ifp->if_flags & IFF_UP)
2784 		ixgbe_if_init(ctx);
2785 
2786 	return (0);
2787 } /* ixgbe_if_resume */
2788 
2789 /************************************************************************
2790  * ixgbe_if_mtu_set - Ioctl mtu entry point
2791  *
2792  *   Return 0 on success, EINVAL on failure
2793  ************************************************************************/
2794 static int
2795 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2796 {
2797 	struct adapter *adapter = iflib_get_softc(ctx);
2798 	int error = 0;
2799 
2800 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2801 
2802 	if (mtu > IXGBE_MAX_MTU) {
2803 		error = EINVAL;
2804 	} else {
2805 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2806 	}
2807 
2808 	return error;
2809 } /* ixgbe_if_mtu_set */
2810 
2811 /************************************************************************
2812  * ixgbe_if_crcstrip_set
2813  ************************************************************************/
2814 static void
2815 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2816 {
2817 	struct adapter *sc = iflib_get_softc(ctx);
2818 	struct ixgbe_hw *hw = &sc->hw;
2819 	/* crc stripping is set in two places:
2820 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2821 	 * IXGBE_RDRXCTL (set by the original driver in
2822 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2823 	 *	We disable the setting when netmap is compiled in).
2824 	 * We update the values here, but also in ixgbe.c because
2825 	 * init_locked sometimes is called outside our control.
2826 	 */
2827 	uint32_t hl, rxc;
2828 
2829 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2830 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2831 #ifdef NETMAP
2832 	if (netmap_verbose)
2833 		D("%s read  HLREG 0x%x rxc 0x%x",
2834 			onoff ? "enter" : "exit", hl, rxc);
2835 #endif
2836 	/* hw requirements ... */
2837 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2838 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2839 	if (onoff && !crcstrip) {
2840 		/* keep the crc. Fast rx */
2841 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2842 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2843 	} else {
2844 		/* reset default mode */
2845 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2846 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2847 	}
2848 #ifdef NETMAP
2849 	if (netmap_verbose)
2850 		D("%s write HLREG 0x%x rxc 0x%x",
2851 			onoff ? "enter" : "exit", hl, rxc);
2852 #endif
2853 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2854 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2855 } /* ixgbe_if_crcstrip_set */
2856 
2857 /*********************************************************************
2858  * ixgbe_if_init - Init entry point
2859  *
2860  *   Used in two ways: It is used by the stack as an init
2861  *   entry point in network interface structure. It is also
2862  *   used by the driver as a hw/sw initialization routine to
2863  *   get to a consistent state.
2864  *
2865  *   Return 0 on success, positive on failure
2866  **********************************************************************/
2867 void
2868 ixgbe_if_init(if_ctx_t ctx)
2869 {
2870 	struct adapter     *adapter = iflib_get_softc(ctx);
2871 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2872 	device_t           dev = iflib_get_dev(ctx);
2873 	struct ixgbe_hw *hw = &adapter->hw;
2874 	struct ix_rx_queue *rx_que;
2875 	struct ix_tx_queue *tx_que;
2876 	u32             txdctl, mhadd;
2877 	u32             rxdctl, rxctrl;
2878 	u32             ctrl_ext;
2879 
2880 	int             i, j, err;
2881 
2882 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2883 
2884 	/* Queue indices may change with IOV mode */
2885 	ixgbe_align_all_queue_indices(adapter);
2886 
2887 	/* reprogram the RAR[0] in case user changed it. */
2888 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2889 
2890 	/* Get the latest mac address, User can use a LAA */
2891 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2892 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2893 	hw->addr_ctrl.rar_used_count = 1;
2894 
2895 	ixgbe_init_hw(hw);
2896 
2897 	ixgbe_initialize_iov(adapter);
2898 
2899 	ixgbe_initialize_transmit_units(ctx);
2900 
2901 	/* Setup Multicast table */
2902 	ixgbe_if_multi_set(ctx);
2903 
2904 	/* Determine the correct mbuf pool, based on frame size */
2905 	if (adapter->max_frame_size <= MCLBYTES)
2906 		adapter->rx_mbuf_sz = MCLBYTES;
2907 	else
2908 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2909 
2910 	/* Configure RX settings */
2911 	ixgbe_initialize_receive_units(ctx);
2912 
2913 	/* Enable SDP & MSI-X interrupts based on adapter */
2914 	ixgbe_config_gpie(adapter);
2915 
2916 	/* Set MTU size */
2917 	if (ifp->if_mtu > ETHERMTU) {
2918 		/* aka IXGBE_MAXFRS on 82599 and newer */
2919 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2920 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2921 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2922 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2923 	}
2924 
2925 	/* Now enable all the queues */
2926 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2927 		struct tx_ring *txr = &tx_que->txr;
2928 
2929 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2930 		txdctl |= IXGBE_TXDCTL_ENABLE;
2931 		/* Set WTHRESH to 8, burst writeback */
2932 		txdctl |= (8 << 16);
2933 		/*
2934 		 * When the internal queue falls below PTHRESH (32),
2935 		 * start prefetching as long as there are at least
2936 		 * HTHRESH (1) buffers ready. The values are taken
2937 		 * from the Intel linux driver 3.8.21.
2938 		 * Prefetching enables tx line rate even with 1 queue.
2939 		 */
2940 		txdctl |= (32 << 0) | (1 << 8);
2941 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2942 	}
2943 
2944 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2945 		struct rx_ring *rxr = &rx_que->rxr;
2946 
2947 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2948 		if (hw->mac.type == ixgbe_mac_82598EB) {
2949 			/*
2950 			 * PTHRESH = 21
2951 			 * HTHRESH = 4
2952 			 * WTHRESH = 8
2953 			 */
2954 			rxdctl &= ~0x3FFFFF;
2955 			rxdctl |= 0x080420;
2956 		}
2957 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2958 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2959 		for (j = 0; j < 10; j++) {
2960 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2961 			    IXGBE_RXDCTL_ENABLE)
2962 				break;
2963 			else
2964 				msec_delay(1);
2965 		}
2966 		wmb();
2967 	}
2968 
2969 	/* Enable Receive engine */
2970 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2971 	if (hw->mac.type == ixgbe_mac_82598EB)
2972 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2973 	rxctrl |= IXGBE_RXCTRL_RXEN;
2974 	ixgbe_enable_rx_dma(hw, rxctrl);
2975 
2976 	/* Set up MSI/MSI-X routing */
2977 	if (ixgbe_enable_msix)  {
2978 		ixgbe_configure_ivars(adapter);
2979 		/* Set up auto-mask */
2980 		if (hw->mac.type == ixgbe_mac_82598EB)
2981 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2982 		else {
2983 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2984 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2985 		}
2986 	} else {  /* Simple settings for Legacy/MSI */
2987 		ixgbe_set_ivar(adapter, 0, 0, 0);
2988 		ixgbe_set_ivar(adapter, 0, 0, 1);
2989 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2990 	}
2991 
2992 	ixgbe_init_fdir(adapter);
2993 
2994 	/*
2995 	 * Check on any SFP devices that
2996 	 * need to be kick-started
2997 	 */
2998 	if (hw->phy.type == ixgbe_phy_none) {
2999 		err = hw->phy.ops.identify(hw);
3000 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3001 			device_printf(dev,
3002 			    "Unsupported SFP+ module type was detected.\n");
3003 			return;
3004 		}
3005 	}
3006 
3007 	/* Set moderation on the Link interrupt */
3008 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3009 
3010 	/* Enable power to the phy. */
3011 	ixgbe_set_phy_power(hw, TRUE);
3012 
3013 	/* Config/Enable Link */
3014 	ixgbe_config_link(adapter);
3015 
3016 	/* Hardware Packet Buffer & Flow Control setup */
3017 	ixgbe_config_delay_values(adapter);
3018 
3019 	/* Initialize the FC settings */
3020 	ixgbe_start_hw(hw);
3021 
3022 	/* Set up VLAN support and filter */
3023 	ixgbe_setup_vlan_hw_support(ctx);
3024 
3025 	/* Setup DMA Coalescing */
3026 	ixgbe_config_dmac(adapter);
3027 
3028 	/* And now turn on interrupts */
3029 	ixgbe_if_enable_intr(ctx);
3030 
3031 	/* Enable the use of the MBX by the VF's */
3032 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3033 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3034 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3035 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3036 	}
3037 
3038 } /* ixgbe_init_locked */
3039 
3040 /************************************************************************
3041  * ixgbe_set_ivar
3042  *
3043  *   Setup the correct IVAR register for a particular MSI-X interrupt
3044  *     (yes this is all very magic and confusing :)
3045  *    - entry is the register array entry
3046  *    - vector is the MSI-X vector for this queue
3047  *    - type is RX/TX/MISC
3048  ************************************************************************/
3049 static void
3050 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3051 {
3052 	struct ixgbe_hw *hw = &adapter->hw;
3053 	u32 ivar, index;
3054 
3055 	vector |= IXGBE_IVAR_ALLOC_VAL;
3056 
3057 	switch (hw->mac.type) {
3058 	case ixgbe_mac_82598EB:
3059 		if (type == -1)
3060 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3061 		else
3062 			entry += (type * 64);
3063 		index = (entry >> 2) & 0x1F;
3064 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3065 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3066 		ivar |= (vector << (8 * (entry & 0x3)));
3067 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3068 		break;
3069 	case ixgbe_mac_82599EB:
3070 	case ixgbe_mac_X540:
3071 	case ixgbe_mac_X550:
3072 	case ixgbe_mac_X550EM_x:
3073 	case ixgbe_mac_X550EM_a:
3074 		if (type == -1) { /* MISC IVAR */
3075 			index = (entry & 1) * 8;
3076 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3077 			ivar &= ~(0xFF << index);
3078 			ivar |= (vector << index);
3079 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3080 		} else {          /* RX/TX IVARS */
3081 			index = (16 * (entry & 1)) + (8 * type);
3082 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3083 			ivar &= ~(0xFF << index);
3084 			ivar |= (vector << index);
3085 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3086 		}
3087 	default:
3088 		break;
3089 	}
3090 } /* ixgbe_set_ivar */
3091 
3092 /************************************************************************
3093  * ixgbe_configure_ivars
3094  ************************************************************************/
3095 static void
3096 ixgbe_configure_ivars(struct adapter *adapter)
3097 {
3098 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3099 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3100 	u32                newitr;
3101 
3102 	if (ixgbe_max_interrupt_rate > 0)
3103 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3104 	else {
3105 		/*
3106 		 * Disable DMA coalescing if interrupt moderation is
3107 		 * disabled.
3108 		 */
3109 		adapter->dmac = 0;
3110 		newitr = 0;
3111 	}
3112 
3113 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3114 		struct rx_ring *rxr = &rx_que->rxr;
3115 
3116 		/* First the RX queue entry */
3117 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3118 
3119 		/* Set an Initial EITR value */
3120 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3121 	}
3122 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3123 		struct tx_ring *txr = &tx_que->txr;
3124 
3125 		/* ... and the TX */
3126 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3127 	}
3128 	/* For the Link interrupt */
3129 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3130 } /* ixgbe_configure_ivars */
3131 
3132 /************************************************************************
3133  * ixgbe_config_gpie
3134  ************************************************************************/
3135 static void
3136 ixgbe_config_gpie(struct adapter *adapter)
3137 {
3138 	struct ixgbe_hw *hw = &adapter->hw;
3139 	u32             gpie;
3140 
3141 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3142 
3143 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3144 		/* Enable Enhanced MSI-X mode */
3145 		gpie |= IXGBE_GPIE_MSIX_MODE
3146 		     |  IXGBE_GPIE_EIAME
3147 		     |  IXGBE_GPIE_PBA_SUPPORT
3148 		     |  IXGBE_GPIE_OCD;
3149 	}
3150 
3151 	/* Fan Failure Interrupt */
3152 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3153 		gpie |= IXGBE_SDP1_GPIEN;
3154 
3155 	/* Thermal Sensor Interrupt */
3156 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3157 		gpie |= IXGBE_SDP0_GPIEN_X540;
3158 
3159 	/* Link detection */
3160 	switch (hw->mac.type) {
3161 	case ixgbe_mac_82599EB:
3162 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3163 		break;
3164 	case ixgbe_mac_X550EM_x:
3165 	case ixgbe_mac_X550EM_a:
3166 		gpie |= IXGBE_SDP0_GPIEN_X540;
3167 		break;
3168 	default:
3169 		break;
3170 	}
3171 
3172 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3173 
3174 } /* ixgbe_config_gpie */
3175 
3176 /************************************************************************
3177  * ixgbe_config_delay_values
3178  *
3179  *   Requires adapter->max_frame_size to be set.
3180  ************************************************************************/
3181 static void
3182 ixgbe_config_delay_values(struct adapter *adapter)
3183 {
3184 	struct ixgbe_hw *hw = &adapter->hw;
3185 	u32             rxpb, frame, size, tmp;
3186 
3187 	frame = adapter->max_frame_size;
3188 
3189 	/* Calculate High Water */
3190 	switch (hw->mac.type) {
3191 	case ixgbe_mac_X540:
3192 	case ixgbe_mac_X550:
3193 	case ixgbe_mac_X550EM_x:
3194 	case ixgbe_mac_X550EM_a:
3195 		tmp = IXGBE_DV_X540(frame, frame);
3196 		break;
3197 	default:
3198 		tmp = IXGBE_DV(frame, frame);
3199 		break;
3200 	}
3201 	size = IXGBE_BT2KB(tmp);
3202 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3203 	hw->fc.high_water[0] = rxpb - size;
3204 
3205 	/* Now calculate Low Water */
3206 	switch (hw->mac.type) {
3207 	case ixgbe_mac_X540:
3208 	case ixgbe_mac_X550:
3209 	case ixgbe_mac_X550EM_x:
3210 	case ixgbe_mac_X550EM_a:
3211 		tmp = IXGBE_LOW_DV_X540(frame);
3212 		break;
3213 	default:
3214 		tmp = IXGBE_LOW_DV(frame);
3215 		break;
3216 	}
3217 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3218 
3219 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3220 	hw->fc.send_xon = TRUE;
3221 } /* ixgbe_config_delay_values */
3222 
3223 /************************************************************************
3224  * ixgbe_set_multi - Multicast Update
3225  *
3226  *   Called whenever multicast address list is updated.
3227  ************************************************************************/
3228 static int
3229 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3230 {
3231 	struct adapter *adapter = arg;
3232 	struct ixgbe_mc_addr *mta = adapter->mta;
3233 
3234 	if (ifma->ifma_addr->sa_family != AF_LINK)
3235 		return (0);
3236 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3237 		return (0);
3238 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3239 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3240 	mta[count].vmdq = adapter->pool;
3241 
3242 	return (1);
3243 } /* ixgbe_mc_filter_apply */
3244 
3245 static void
3246 ixgbe_if_multi_set(if_ctx_t ctx)
3247 {
3248 	struct adapter       *adapter = iflib_get_softc(ctx);
3249 	struct ixgbe_mc_addr *mta;
3250 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3251 	u8                   *update_ptr;
3252 	int                  mcnt = 0;
3253 	u32                  fctrl;
3254 
3255 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3256 
3257 	mta = adapter->mta;
3258 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3259 
3260 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3261 
3262 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3263 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3264 	if (ifp->if_flags & IFF_PROMISC)
3265 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3266 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3267 	    ifp->if_flags & IFF_ALLMULTI) {
3268 		fctrl |= IXGBE_FCTRL_MPE;
3269 		fctrl &= ~IXGBE_FCTRL_UPE;
3270 	} else
3271 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3272 
3273 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3274 
3275 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3276 		update_ptr = (u8 *)mta;
3277 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3278 		    ixgbe_mc_array_itr, TRUE);
3279 	}
3280 
3281 } /* ixgbe_if_multi_set */
3282 
3283 /************************************************************************
3284  * ixgbe_mc_array_itr
3285  *
3286  *   An iterator function needed by the multicast shared code.
3287  *   It feeds the shared code routine the addresses in the
3288  *   array of ixgbe_set_multi() one by one.
3289  ************************************************************************/
3290 static u8 *
3291 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3292 {
3293 	struct ixgbe_mc_addr *mta;
3294 
3295 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3296 	*vmdq = mta->vmdq;
3297 
3298 	*update_ptr = (u8*)(mta + 1);
3299 
3300 	return (mta->addr);
3301 } /* ixgbe_mc_array_itr */
3302 
3303 /************************************************************************
3304  * ixgbe_local_timer - Timer routine
3305  *
3306  *   Checks for link status, updates statistics,
3307  *   and runs the watchdog check.
3308  ************************************************************************/
3309 static void
3310 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3311 {
3312 	struct adapter *adapter = iflib_get_softc(ctx);
3313 
3314 	if (qid != 0)
3315 		return;
3316 
3317 	/* Check for pluggable optics */
3318 	if (adapter->sfp_probe)
3319 		if (!ixgbe_sfp_probe(ctx))
3320 			return; /* Nothing to do */
3321 
3322 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3323 	    &adapter->link_up, 0);
3324 
3325 	/* Fire off the adminq task */
3326 	iflib_admin_intr_deferred(ctx);
3327 
3328 } /* ixgbe_if_timer */
3329 
3330 /************************************************************************
3331  * ixgbe_sfp_probe
3332  *
3333  *   Determine if a port had optics inserted.
3334  ************************************************************************/
3335 static bool
3336 ixgbe_sfp_probe(if_ctx_t ctx)
3337 {
3338 	struct adapter  *adapter = iflib_get_softc(ctx);
3339 	struct ixgbe_hw *hw = &adapter->hw;
3340 	device_t        dev = iflib_get_dev(ctx);
3341 	bool            result = FALSE;
3342 
3343 	if ((hw->phy.type == ixgbe_phy_nl) &&
3344 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3345 		s32 ret = hw->phy.ops.identify_sfp(hw);
3346 		if (ret)
3347 			goto out;
3348 		ret = hw->phy.ops.reset(hw);
3349 		adapter->sfp_probe = FALSE;
3350 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3351 			device_printf(dev, "Unsupported SFP+ module detected!");
3352 			device_printf(dev,
3353 			    "Reload driver with supported module.\n");
3354 			goto out;
3355 		} else
3356 			device_printf(dev, "SFP+ module detected!\n");
3357 		/* We now have supported optics */
3358 		result = TRUE;
3359 	}
3360 out:
3361 
3362 	return (result);
3363 } /* ixgbe_sfp_probe */
3364 
3365 /************************************************************************
3366  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3367  ************************************************************************/
3368 static void
3369 ixgbe_handle_mod(void *context)
3370 {
3371 	if_ctx_t        ctx = context;
3372 	struct adapter  *adapter = iflib_get_softc(ctx);
3373 	struct ixgbe_hw *hw = &adapter->hw;
3374 	device_t        dev = iflib_get_dev(ctx);
3375 	u32             err, cage_full = 0;
3376 
3377 	adapter->sfp_reinit = 1;
3378 	if (adapter->hw.need_crosstalk_fix) {
3379 		switch (hw->mac.type) {
3380 		case ixgbe_mac_82599EB:
3381 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3382 			    IXGBE_ESDP_SDP2;
3383 			break;
3384 		case ixgbe_mac_X550EM_x:
3385 		case ixgbe_mac_X550EM_a:
3386 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3387 			    IXGBE_ESDP_SDP0;
3388 			break;
3389 		default:
3390 			break;
3391 		}
3392 
3393 		if (!cage_full)
3394 			goto handle_mod_out;
3395 	}
3396 
3397 	err = hw->phy.ops.identify_sfp(hw);
3398 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3399 		device_printf(dev,
3400 		    "Unsupported SFP+ module type was detected.\n");
3401 		goto handle_mod_out;
3402 	}
3403 
3404 	if (hw->mac.type == ixgbe_mac_82598EB)
3405 		err = hw->phy.ops.reset(hw);
3406 	else
3407 		err = hw->mac.ops.setup_sfp(hw);
3408 
3409 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3410 		device_printf(dev,
3411 		    "Setup failure - unsupported SFP+ module type.\n");
3412 		goto handle_mod_out;
3413 	}
3414 	GROUPTASK_ENQUEUE(&adapter->msf_task);
3415 	return;
3416 
3417 handle_mod_out:
3418 	adapter->sfp_reinit = 0;
3419 } /* ixgbe_handle_mod */
3420 
3421 
3422 /************************************************************************
3423  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3424  ************************************************************************/
3425 static void
3426 ixgbe_handle_msf(void *context)
3427 {
3428 	if_ctx_t        ctx = context;
3429 	struct adapter  *adapter = iflib_get_softc(ctx);
3430 	struct ixgbe_hw *hw = &adapter->hw;
3431 	u32             autoneg;
3432 	bool            negotiate;
3433 
3434 	if (adapter->sfp_reinit != 1)
3435 		return;
3436 
3437 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3438 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3439 
3440 	autoneg = hw->phy.autoneg_advertised;
3441 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3442 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3443 	if (hw->mac.ops.setup_link)
3444 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3445 
3446 	/* Adjust media types shown in ifconfig */
3447 	ifmedia_removeall(adapter->media);
3448 	ixgbe_add_media_types(adapter->ctx);
3449 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3450 
3451 	adapter->sfp_reinit = 0;
3452 } /* ixgbe_handle_msf */
3453 
3454 /************************************************************************
3455  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3456  ************************************************************************/
3457 static void
3458 ixgbe_handle_phy(void *context)
3459 {
3460 	if_ctx_t        ctx = context;
3461 	struct adapter  *adapter = iflib_get_softc(ctx);
3462 	struct ixgbe_hw *hw = &adapter->hw;
3463 	int             error;
3464 
3465 	error = hw->phy.ops.handle_lasi(hw);
3466 	if (error == IXGBE_ERR_OVERTEMP)
3467 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3468 	else if (error)
3469 		device_printf(adapter->dev,
3470 		    "Error handling LASI interrupt: %d\n", error);
3471 } /* ixgbe_handle_phy */
3472 
3473 /************************************************************************
3474  * ixgbe_if_stop - Stop the hardware
3475  *
3476  *   Disables all traffic on the adapter by issuing a
3477  *   global reset on the MAC and deallocates TX/RX buffers.
3478  ************************************************************************/
3479 static void
3480 ixgbe_if_stop(if_ctx_t ctx)
3481 {
3482 	struct adapter  *adapter = iflib_get_softc(ctx);
3483 	struct ixgbe_hw *hw = &adapter->hw;
3484 
3485 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3486 
3487 	ixgbe_reset_hw(hw);
3488 	hw->adapter_stopped = FALSE;
3489 	ixgbe_stop_adapter(hw);
3490 	if (hw->mac.type == ixgbe_mac_82599EB)
3491 		ixgbe_stop_mac_link_on_d3_82599(hw);
3492 	/* Turn off the laser - noop with no optics */
3493 	ixgbe_disable_tx_laser(hw);
3494 
3495 	/* Update the stack */
3496 	adapter->link_up = FALSE;
3497 	ixgbe_if_update_admin_status(ctx);
3498 
3499 	/* reprogram the RAR[0] in case user changed it. */
3500 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3501 
3502 	return;
3503 } /* ixgbe_if_stop */
3504 
3505 /************************************************************************
3506  * ixgbe_update_link_status - Update OS on link state
3507  *
3508  * Note: Only updates the OS on the cached link state.
3509  *       The real check of the hardware only happens with
3510  *       a link interrupt.
3511  ************************************************************************/
3512 static void
3513 ixgbe_if_update_admin_status(if_ctx_t ctx)
3514 {
3515 	struct adapter *adapter = iflib_get_softc(ctx);
3516 	device_t       dev = iflib_get_dev(ctx);
3517 
3518 	if (adapter->link_up) {
3519 		if (adapter->link_active == FALSE) {
3520 			if (bootverbose)
3521 				device_printf(dev, "Link is up %d Gbps %s \n",
3522 				    ((adapter->link_speed == 128) ? 10 : 1),
3523 				    "Full Duplex");
3524 			adapter->link_active = TRUE;
3525 			/* Update any Flow Control changes */
3526 			ixgbe_fc_enable(&adapter->hw);
3527 			/* Update DMA coalescing config */
3528 			ixgbe_config_dmac(adapter);
3529 			/* should actually be negotiated value */
3530 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3531 
3532 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3533 				ixgbe_ping_all_vfs(adapter);
3534 		}
3535 	} else { /* Link down */
3536 		if (adapter->link_active == TRUE) {
3537 			if (bootverbose)
3538 				device_printf(dev, "Link is Down\n");
3539 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3540 			adapter->link_active = FALSE;
3541 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3542 				ixgbe_ping_all_vfs(adapter);
3543 		}
3544 	}
3545 
3546 	ixgbe_update_stats_counters(adapter);
3547 
3548 	/* Re-enable link interrupts */
3549        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3550 } /* ixgbe_if_update_admin_status */
3551 
3552 /************************************************************************
3553  * ixgbe_config_dmac - Configure DMA Coalescing
3554  ************************************************************************/
3555 static void
3556 ixgbe_config_dmac(struct adapter *adapter)
3557 {
3558 	struct ixgbe_hw          *hw = &adapter->hw;
3559 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3560 
3561 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3562 		return;
3563 
3564 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3565 	    dcfg->link_speed ^ adapter->link_speed) {
3566 		dcfg->watchdog_timer = adapter->dmac;
3567 		dcfg->fcoe_en = FALSE;
3568 		dcfg->link_speed = adapter->link_speed;
3569 		dcfg->num_tcs = 1;
3570 
3571 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3572 		    dcfg->watchdog_timer, dcfg->link_speed);
3573 
3574 		hw->mac.ops.dmac_config(hw);
3575 	}
3576 } /* ixgbe_config_dmac */
3577 
3578 /************************************************************************
3579  * ixgbe_if_enable_intr
3580  ************************************************************************/
3581 void
3582 ixgbe_if_enable_intr(if_ctx_t ctx)
3583 {
3584 	struct adapter     *adapter = iflib_get_softc(ctx);
3585 	struct ixgbe_hw    *hw = &adapter->hw;
3586 	struct ix_rx_queue *que = adapter->rx_queues;
3587 	u32                mask, fwsm;
3588 
3589 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3590 
3591 	switch (adapter->hw.mac.type) {
3592 	case ixgbe_mac_82599EB:
3593 		mask |= IXGBE_EIMS_ECC;
3594 		/* Temperature sensor on some adapters */
3595 		mask |= IXGBE_EIMS_GPI_SDP0;
3596 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3597 		mask |= IXGBE_EIMS_GPI_SDP1;
3598 		mask |= IXGBE_EIMS_GPI_SDP2;
3599 		break;
3600 	case ixgbe_mac_X540:
3601 		/* Detect if Thermal Sensor is enabled */
3602 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3603 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3604 			mask |= IXGBE_EIMS_TS;
3605 		mask |= IXGBE_EIMS_ECC;
3606 		break;
3607 	case ixgbe_mac_X550:
3608 		/* MAC thermal sensor is automatically enabled */
3609 		mask |= IXGBE_EIMS_TS;
3610 		mask |= IXGBE_EIMS_ECC;
3611 		break;
3612 	case ixgbe_mac_X550EM_x:
3613 	case ixgbe_mac_X550EM_a:
3614 		/* Some devices use SDP0 for important information */
3615 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3616 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3617 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3618 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3619 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3620 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3621 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3622 		mask |= IXGBE_EIMS_ECC;
3623 		break;
3624 	default:
3625 		break;
3626 	}
3627 
3628 	/* Enable Fan Failure detection */
3629 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3630 		mask |= IXGBE_EIMS_GPI_SDP1;
3631 	/* Enable SR-IOV */
3632 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3633 		mask |= IXGBE_EIMS_MAILBOX;
3634 	/* Enable Flow Director */
3635 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3636 		mask |= IXGBE_EIMS_FLOW_DIR;
3637 
3638 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3639 
3640 	/* With MSI-X we use auto clear */
3641 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3642 		mask = IXGBE_EIMS_ENABLE_MASK;
3643 		/* Don't autoclear Link */
3644 		mask &= ~IXGBE_EIMS_OTHER;
3645 		mask &= ~IXGBE_EIMS_LSC;
3646 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3647 			mask &= ~IXGBE_EIMS_MAILBOX;
3648 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3649 	}
3650 
3651 	/*
3652 	 * Now enable all queues, this is done separately to
3653 	 * allow for handling the extended (beyond 32) MSI-X
3654 	 * vectors that can be used by 82599
3655 	 */
3656 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3657 		ixgbe_enable_queue(adapter, que->msix);
3658 
3659 	IXGBE_WRITE_FLUSH(hw);
3660 
3661 } /* ixgbe_if_enable_intr */
3662 
3663 /************************************************************************
3664  * ixgbe_disable_intr
3665  ************************************************************************/
3666 static void
3667 ixgbe_if_disable_intr(if_ctx_t ctx)
3668 {
3669 	struct adapter *adapter = iflib_get_softc(ctx);
3670 
3671 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3672 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3673 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3674 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3675 	} else {
3676 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3677 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3678 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3679 	}
3680 	IXGBE_WRITE_FLUSH(&adapter->hw);
3681 
3682 } /* ixgbe_if_disable_intr */
3683 
3684 /************************************************************************
3685  * ixgbe_if_rx_queue_intr_enable
3686  ************************************************************************/
3687 static int
3688 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3689 {
3690 	struct adapter     *adapter = iflib_get_softc(ctx);
3691 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3692 
3693 	ixgbe_enable_queue(adapter, que->rxr.me);
3694 
3695 	return (0);
3696 } /* ixgbe_if_rx_queue_intr_enable */
3697 
3698 /************************************************************************
3699  * ixgbe_enable_queue
3700  ************************************************************************/
3701 static void
3702 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3703 {
3704 	struct ixgbe_hw *hw = &adapter->hw;
3705 	u64             queue = (u64)(1 << vector);
3706 	u32             mask;
3707 
3708 	if (hw->mac.type == ixgbe_mac_82598EB) {
3709 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3710 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3711 	} else {
3712 		mask = (queue & 0xFFFFFFFF);
3713 		if (mask)
3714 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3715 		mask = (queue >> 32);
3716 		if (mask)
3717 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3718 	}
3719 } /* ixgbe_enable_queue */
3720 
3721 /************************************************************************
3722  * ixgbe_disable_queue
3723  ************************************************************************/
3724 static void
3725 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3726 {
3727 	struct ixgbe_hw *hw = &adapter->hw;
3728 	u64             queue = (u64)(1 << vector);
3729 	u32             mask;
3730 
3731 	if (hw->mac.type == ixgbe_mac_82598EB) {
3732 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3733 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3734 	} else {
3735 		mask = (queue & 0xFFFFFFFF);
3736 		if (mask)
3737 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3738 		mask = (queue >> 32);
3739 		if (mask)
3740 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3741 	}
3742 } /* ixgbe_disable_queue */
3743 
3744 /************************************************************************
3745  * ixgbe_intr - Legacy Interrupt Service Routine
3746  ************************************************************************/
3747 int
3748 ixgbe_intr(void *arg)
3749 {
3750 	struct adapter     *adapter = arg;
3751 	struct ix_rx_queue *que = adapter->rx_queues;
3752 	struct ixgbe_hw    *hw = &adapter->hw;
3753 	if_ctx_t           ctx = adapter->ctx;
3754 	u32                eicr, eicr_mask;
3755 
3756 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3757 
3758 	++que->irqs;
3759 	if (eicr == 0) {
3760 		ixgbe_if_enable_intr(ctx);
3761 		return (FILTER_HANDLED);
3762 	}
3763 
3764 	/* Check for fan failure */
3765 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3766 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3767 		device_printf(adapter->dev,
3768 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3769 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3770 	}
3771 
3772 	/* Link status change */
3773 	if (eicr & IXGBE_EICR_LSC) {
3774 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3775 		iflib_admin_intr_deferred(ctx);
3776 	}
3777 
3778 	if (ixgbe_is_sfp(hw)) {
3779 		/* Pluggable optics-related interrupt */
3780 		if (hw->mac.type >= ixgbe_mac_X540)
3781 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3782 		else
3783 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3784 
3785 		if (eicr & eicr_mask) {
3786 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3787 			GROUPTASK_ENQUEUE(&adapter->mod_task);
3788 		}
3789 
3790 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3791 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3792 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3793 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3794 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
3795 				GROUPTASK_ENQUEUE(&adapter->msf_task);
3796 		}
3797 	}
3798 
3799 	/* External PHY interrupt */
3800 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3801 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3802 		GROUPTASK_ENQUEUE(&adapter->phy_task);
3803 
3804 	return (FILTER_SCHEDULE_THREAD);
3805 } /* ixgbe_intr */
3806 
3807 /************************************************************************
3808  * ixgbe_free_pci_resources
3809  ************************************************************************/
3810 static void
3811 ixgbe_free_pci_resources(if_ctx_t ctx)
3812 {
3813 	struct adapter *adapter = iflib_get_softc(ctx);
3814 	struct         ix_rx_queue *que = adapter->rx_queues;
3815 	device_t       dev = iflib_get_dev(ctx);
3816 
3817 	/* Release all msix queue resources */
3818 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3819 		iflib_irq_free(ctx, &adapter->irq);
3820 
3821 	if (que != NULL) {
3822 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3823 			iflib_irq_free(ctx, &que->que_irq);
3824 		}
3825 	}
3826 
3827 	/*
3828 	 * Free link/admin interrupt
3829 	 */
3830 	if (adapter->pci_mem != NULL)
3831 		bus_release_resource(dev, SYS_RES_MEMORY,
3832 		                     PCIR_BAR(0), adapter->pci_mem);
3833 
3834 } /* ixgbe_free_pci_resources */
3835 
3836 /************************************************************************
3837  * ixgbe_sysctl_flowcntl
3838  *
3839  *   SYSCTL wrapper around setting Flow Control
3840  ************************************************************************/
3841 static int
3842 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3843 {
3844 	struct adapter *adapter;
3845 	int            error, fc;
3846 
3847 	adapter = (struct adapter *)arg1;
3848 	fc = adapter->hw.fc.current_mode;
3849 
3850 	error = sysctl_handle_int(oidp, &fc, 0, req);
3851 	if ((error) || (req->newptr == NULL))
3852 		return (error);
3853 
3854 	/* Don't bother if it's not changed */
3855 	if (fc == adapter->hw.fc.current_mode)
3856 		return (0);
3857 
3858 	return ixgbe_set_flowcntl(adapter, fc);
3859 } /* ixgbe_sysctl_flowcntl */
3860 
3861 /************************************************************************
3862  * ixgbe_set_flowcntl - Set flow control
3863  *
3864  *   Flow control values:
3865  *     0 - off
3866  *     1 - rx pause
3867  *     2 - tx pause
3868  *     3 - full
3869  ************************************************************************/
3870 static int
3871 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3872 {
3873 	switch (fc) {
3874 	case ixgbe_fc_rx_pause:
3875 	case ixgbe_fc_tx_pause:
3876 	case ixgbe_fc_full:
3877 		adapter->hw.fc.requested_mode = fc;
3878 		if (adapter->num_rx_queues > 1)
3879 			ixgbe_disable_rx_drop(adapter);
3880 		break;
3881 	case ixgbe_fc_none:
3882 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3883 		if (adapter->num_rx_queues > 1)
3884 			ixgbe_enable_rx_drop(adapter);
3885 		break;
3886 	default:
3887 		return (EINVAL);
3888 	}
3889 
3890 	/* Don't autoneg if forcing a value */
3891 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3892 	ixgbe_fc_enable(&adapter->hw);
3893 
3894 	return (0);
3895 } /* ixgbe_set_flowcntl */
3896 
3897 /************************************************************************
3898  * ixgbe_enable_rx_drop
3899  *
3900  *   Enable the hardware to drop packets when the buffer is
3901  *   full. This is useful with multiqueue, so that no single
3902  *   queue being full stalls the entire RX engine. We only
3903  *   enable this when Multiqueue is enabled AND Flow Control
3904  *   is disabled.
3905  ************************************************************************/
3906 static void
3907 ixgbe_enable_rx_drop(struct adapter *adapter)
3908 {
3909 	struct ixgbe_hw *hw = &adapter->hw;
3910 	struct rx_ring  *rxr;
3911 	u32             srrctl;
3912 
3913 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3914 		rxr = &adapter->rx_queues[i].rxr;
3915 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3916 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3917 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3918 	}
3919 
3920 	/* enable drop for each vf */
3921 	for (int i = 0; i < adapter->num_vfs; i++) {
3922 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3923 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3924 		                IXGBE_QDE_ENABLE));
3925 	}
3926 } /* ixgbe_enable_rx_drop */
3927 
3928 /************************************************************************
3929  * ixgbe_disable_rx_drop
3930  ************************************************************************/
3931 static void
3932 ixgbe_disable_rx_drop(struct adapter *adapter)
3933 {
3934 	struct ixgbe_hw *hw = &adapter->hw;
3935 	struct rx_ring  *rxr;
3936 	u32             srrctl;
3937 
3938 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3939 		rxr = &adapter->rx_queues[i].rxr;
3940 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3941 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3942 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3943 	}
3944 
3945 	/* disable drop for each vf */
3946 	for (int i = 0; i < adapter->num_vfs; i++) {
3947 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3948 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3949 	}
3950 } /* ixgbe_disable_rx_drop */
3951 
3952 /************************************************************************
3953  * ixgbe_sysctl_advertise
3954  *
3955  *   SYSCTL wrapper around setting advertised speed
3956  ************************************************************************/
3957 static int
3958 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3959 {
3960 	struct adapter *adapter;
3961 	int            error, advertise;
3962 
3963 	adapter = (struct adapter *)arg1;
3964 	advertise = adapter->advertise;
3965 
3966 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3967 	if ((error) || (req->newptr == NULL))
3968 		return (error);
3969 
3970 	return ixgbe_set_advertise(adapter, advertise);
3971 } /* ixgbe_sysctl_advertise */
3972 
3973 /************************************************************************
3974  * ixgbe_set_advertise - Control advertised link speed
3975  *
3976  *   Flags:
3977  *     0x1 - advertise 100 Mb
3978  *     0x2 - advertise 1G
3979  *     0x4 - advertise 10G
3980  *     0x8 - advertise 10 Mb (yes, Mb)
3981  ************************************************************************/
3982 static int
3983 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3984 {
3985 	device_t         dev = iflib_get_dev(adapter->ctx);
3986 	struct ixgbe_hw  *hw;
3987 	ixgbe_link_speed speed = 0;
3988 	ixgbe_link_speed link_caps = 0;
3989 	s32              err = IXGBE_NOT_IMPLEMENTED;
3990 	bool             negotiate = FALSE;
3991 
3992 	/* Checks to validate new value */
3993 	if (adapter->advertise == advertise) /* no change */
3994 		return (0);
3995 
3996 	hw = &adapter->hw;
3997 
3998 	/* No speed changes for backplane media */
3999 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4000 		return (ENODEV);
4001 
4002 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4003 	      (hw->phy.multispeed_fiber))) {
4004 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4005 		return (EINVAL);
4006 	}
4007 
4008 	if (advertise < 0x1 || advertise > 0xF) {
4009 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4010 		return (EINVAL);
4011 	}
4012 
4013 	if (hw->mac.ops.get_link_capabilities) {
4014 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4015 		    &negotiate);
4016 		if (err != IXGBE_SUCCESS) {
4017 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4018 			return (ENODEV);
4019 		}
4020 	}
4021 
4022 	/* Set new value and report new advertised mode */
4023 	if (advertise & 0x1) {
4024 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4025 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4026 			return (EINVAL);
4027 		}
4028 		speed |= IXGBE_LINK_SPEED_100_FULL;
4029 	}
4030 	if (advertise & 0x2) {
4031 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4032 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4033 			return (EINVAL);
4034 		}
4035 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4036 	}
4037 	if (advertise & 0x4) {
4038 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4039 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4040 			return (EINVAL);
4041 		}
4042 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4043 	}
4044 	if (advertise & 0x8) {
4045 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4046 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4047 			return (EINVAL);
4048 		}
4049 		speed |= IXGBE_LINK_SPEED_10_FULL;
4050 	}
4051 
4052 	hw->mac.autotry_restart = TRUE;
4053 	hw->mac.ops.setup_link(hw, speed, TRUE);
4054 	adapter->advertise = advertise;
4055 
4056 	return (0);
4057 } /* ixgbe_set_advertise */
4058 
4059 /************************************************************************
4060  * ixgbe_get_advertise - Get current advertised speed settings
4061  *
4062  *   Formatted for sysctl usage.
4063  *   Flags:
4064  *     0x1 - advertise 100 Mb
4065  *     0x2 - advertise 1G
4066  *     0x4 - advertise 10G
4067  *     0x8 - advertise 10 Mb (yes, Mb)
4068  ************************************************************************/
4069 static int
4070 ixgbe_get_advertise(struct adapter *adapter)
4071 {
4072 	struct ixgbe_hw  *hw = &adapter->hw;
4073 	int              speed;
4074 	ixgbe_link_speed link_caps = 0;
4075 	s32              err;
4076 	bool             negotiate = FALSE;
4077 
4078 	/*
4079 	 * Advertised speed means nothing unless it's copper or
4080 	 * multi-speed fiber
4081 	 */
4082 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4083 	    !(hw->phy.multispeed_fiber))
4084 		return (0);
4085 
4086 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4087 	if (err != IXGBE_SUCCESS)
4088 		return (0);
4089 
4090 	speed =
4091 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4092 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4093 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4094 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4095 
4096 	return speed;
4097 } /* ixgbe_get_advertise */
4098 
4099 /************************************************************************
4100  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4101  *
4102  *   Control values:
4103  *     0/1 - off / on (use default value of 1000)
4104  *
4105  *     Legal timer values are:
4106  *     50,100,250,500,1000,2000,5000,10000
4107  *
4108  *     Turning off interrupt moderation will also turn this off.
4109  ************************************************************************/
4110 static int
4111 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4112 {
4113 	struct adapter *adapter = (struct adapter *)arg1;
4114 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4115 	int            error;
4116 	u16            newval;
4117 
4118 	newval = adapter->dmac;
4119 	error = sysctl_handle_16(oidp, &newval, 0, req);
4120 	if ((error) || (req->newptr == NULL))
4121 		return (error);
4122 
4123 	switch (newval) {
4124 	case 0:
4125 		/* Disabled */
4126 		adapter->dmac = 0;
4127 		break;
4128 	case 1:
4129 		/* Enable and use default */
4130 		adapter->dmac = 1000;
4131 		break;
4132 	case 50:
4133 	case 100:
4134 	case 250:
4135 	case 500:
4136 	case 1000:
4137 	case 2000:
4138 	case 5000:
4139 	case 10000:
4140 		/* Legal values - allow */
4141 		adapter->dmac = newval;
4142 		break;
4143 	default:
4144 		/* Do nothing, illegal value */
4145 		return (EINVAL);
4146 	}
4147 
4148 	/* Re-initialize hardware if it's already running */
4149 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4150 		ifp->if_init(ifp);
4151 
4152 	return (0);
4153 } /* ixgbe_sysctl_dmac */
4154 
4155 #ifdef IXGBE_DEBUG
4156 /************************************************************************
4157  * ixgbe_sysctl_power_state
4158  *
4159  *   Sysctl to test power states
4160  *   Values:
4161  *     0      - set device to D0
4162  *     3      - set device to D3
4163  *     (none) - get current device power state
4164  ************************************************************************/
4165 static int
4166 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4167 {
4168 	struct adapter *adapter = (struct adapter *)arg1;
4169 	device_t       dev = adapter->dev;
4170 	int            curr_ps, new_ps, error = 0;
4171 
4172 	curr_ps = new_ps = pci_get_powerstate(dev);
4173 
4174 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4175 	if ((error) || (req->newptr == NULL))
4176 		return (error);
4177 
4178 	if (new_ps == curr_ps)
4179 		return (0);
4180 
4181 	if (new_ps == 3 && curr_ps == 0)
4182 		error = DEVICE_SUSPEND(dev);
4183 	else if (new_ps == 0 && curr_ps == 3)
4184 		error = DEVICE_RESUME(dev);
4185 	else
4186 		return (EINVAL);
4187 
4188 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4189 
4190 	return (error);
4191 } /* ixgbe_sysctl_power_state */
4192 #endif
4193 
4194 /************************************************************************
4195  * ixgbe_sysctl_wol_enable
4196  *
4197  *   Sysctl to enable/disable the WoL capability,
4198  *   if supported by the adapter.
4199  *
4200  *   Values:
4201  *     0 - disabled
4202  *     1 - enabled
4203  ************************************************************************/
4204 static int
4205 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4206 {
4207 	struct adapter  *adapter = (struct adapter *)arg1;
4208 	struct ixgbe_hw *hw = &adapter->hw;
4209 	int             new_wol_enabled;
4210 	int             error = 0;
4211 
4212 	new_wol_enabled = hw->wol_enabled;
4213 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4214 	if ((error) || (req->newptr == NULL))
4215 		return (error);
4216 	new_wol_enabled = !!(new_wol_enabled);
4217 	if (new_wol_enabled == hw->wol_enabled)
4218 		return (0);
4219 
4220 	if (new_wol_enabled > 0 && !adapter->wol_support)
4221 		return (ENODEV);
4222 	else
4223 		hw->wol_enabled = new_wol_enabled;
4224 
4225 	return (0);
4226 } /* ixgbe_sysctl_wol_enable */
4227 
4228 /************************************************************************
4229  * ixgbe_sysctl_wufc - Wake Up Filter Control
4230  *
4231  *   Sysctl to enable/disable the types of packets that the
4232  *   adapter will wake up on upon receipt.
4233  *   Flags:
4234  *     0x1  - Link Status Change
4235  *     0x2  - Magic Packet
4236  *     0x4  - Direct Exact
4237  *     0x8  - Directed Multicast
4238  *     0x10 - Broadcast
4239  *     0x20 - ARP/IPv4 Request Packet
4240  *     0x40 - Direct IPv4 Packet
4241  *     0x80 - Direct IPv6 Packet
4242  *
4243  *   Settings not listed above will cause the sysctl to return an error.
4244  ************************************************************************/
4245 static int
4246 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4247 {
4248 	struct adapter *adapter = (struct adapter *)arg1;
4249 	int            error = 0;
4250 	u32            new_wufc;
4251 
4252 	new_wufc = adapter->wufc;
4253 
4254 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4255 	if ((error) || (req->newptr == NULL))
4256 		return (error);
4257 	if (new_wufc == adapter->wufc)
4258 		return (0);
4259 
4260 	if (new_wufc & 0xffffff00)
4261 		return (EINVAL);
4262 
4263 	new_wufc &= 0xff;
4264 	new_wufc |= (0xffffff & adapter->wufc);
4265 	adapter->wufc = new_wufc;
4266 
4267 	return (0);
4268 } /* ixgbe_sysctl_wufc */
4269 
4270 #ifdef IXGBE_DEBUG
4271 /************************************************************************
4272  * ixgbe_sysctl_print_rss_config
4273  ************************************************************************/
4274 static int
4275 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4276 {
4277 	struct adapter  *adapter = (struct adapter *)arg1;
4278 	struct ixgbe_hw *hw = &adapter->hw;
4279 	device_t        dev = adapter->dev;
4280 	struct sbuf     *buf;
4281 	int             error = 0, reta_size;
4282 	u32             reg;
4283 
4284 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4285 	if (!buf) {
4286 		device_printf(dev, "Could not allocate sbuf for output.\n");
4287 		return (ENOMEM);
4288 	}
4289 
4290 	// TODO: use sbufs to make a string to print out
4291 	/* Set multiplier for RETA setup and table size based on MAC */
4292 	switch (adapter->hw.mac.type) {
4293 	case ixgbe_mac_X550:
4294 	case ixgbe_mac_X550EM_x:
4295 	case ixgbe_mac_X550EM_a:
4296 		reta_size = 128;
4297 		break;
4298 	default:
4299 		reta_size = 32;
4300 		break;
4301 	}
4302 
4303 	/* Print out the redirection table */
4304 	sbuf_cat(buf, "\n");
4305 	for (int i = 0; i < reta_size; i++) {
4306 		if (i < 32) {
4307 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4308 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4309 		} else {
4310 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4311 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4312 		}
4313 	}
4314 
4315 	// TODO: print more config
4316 
4317 	error = sbuf_finish(buf);
4318 	if (error)
4319 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4320 
4321 	sbuf_delete(buf);
4322 
4323 	return (0);
4324 } /* ixgbe_sysctl_print_rss_config */
4325 #endif /* IXGBE_DEBUG */
4326 
4327 /************************************************************************
4328  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4329  *
4330  *   For X552/X557-AT devices using an external PHY
4331  ************************************************************************/
4332 static int
4333 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4334 {
4335 	struct adapter  *adapter = (struct adapter *)arg1;
4336 	struct ixgbe_hw *hw = &adapter->hw;
4337 	u16             reg;
4338 
4339 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4340 		device_printf(iflib_get_dev(adapter->ctx),
4341 		    "Device has no supported external thermal sensor.\n");
4342 		return (ENODEV);
4343 	}
4344 
4345 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4346 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4347 		device_printf(iflib_get_dev(adapter->ctx),
4348 		    "Error reading from PHY's current temperature register\n");
4349 		return (EAGAIN);
4350 	}
4351 
4352 	/* Shift temp for output */
4353 	reg = reg >> 8;
4354 
4355 	return (sysctl_handle_16(oidp, NULL, reg, req));
4356 } /* ixgbe_sysctl_phy_temp */
4357 
4358 /************************************************************************
4359  * ixgbe_sysctl_phy_overtemp_occurred
4360  *
4361  *   Reports (directly from the PHY) whether the current PHY
4362  *   temperature is over the overtemp threshold.
4363  ************************************************************************/
4364 static int
4365 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4366 {
4367 	struct adapter  *adapter = (struct adapter *)arg1;
4368 	struct ixgbe_hw *hw = &adapter->hw;
4369 	u16             reg;
4370 
4371 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4372 		device_printf(iflib_get_dev(adapter->ctx),
4373 		    "Device has no supported external thermal sensor.\n");
4374 		return (ENODEV);
4375 	}
4376 
4377 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4378 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4379 		device_printf(iflib_get_dev(adapter->ctx),
4380 		    "Error reading from PHY's temperature status register\n");
4381 		return (EAGAIN);
4382 	}
4383 
4384 	/* Get occurrence bit */
4385 	reg = !!(reg & 0x4000);
4386 
4387 	return (sysctl_handle_16(oidp, 0, reg, req));
4388 } /* ixgbe_sysctl_phy_overtemp_occurred */
4389 
4390 /************************************************************************
4391  * ixgbe_sysctl_eee_state
4392  *
4393  *   Sysctl to set EEE power saving feature
4394  *   Values:
4395  *     0      - disable EEE
4396  *     1      - enable EEE
4397  *     (none) - get current device EEE state
4398  ************************************************************************/
4399 static int
4400 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4401 {
4402 	struct adapter *adapter = (struct adapter *)arg1;
4403 	device_t       dev = adapter->dev;
4404 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4405 	int            curr_eee, new_eee, error = 0;
4406 	s32            retval;
4407 
4408 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4409 
4410 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4411 	if ((error) || (req->newptr == NULL))
4412 		return (error);
4413 
4414 	/* Nothing to do */
4415 	if (new_eee == curr_eee)
4416 		return (0);
4417 
4418 	/* Not supported */
4419 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4420 		return (EINVAL);
4421 
4422 	/* Bounds checking */
4423 	if ((new_eee < 0) || (new_eee > 1))
4424 		return (EINVAL);
4425 
4426 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4427 	if (retval) {
4428 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4429 		return (EINVAL);
4430 	}
4431 
4432 	/* Restart auto-neg */
4433 	ifp->if_init(ifp);
4434 
4435 	device_printf(dev, "New EEE state: %d\n", new_eee);
4436 
4437 	/* Cache new value */
4438 	if (new_eee)
4439 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4440 	else
4441 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4442 
4443 	return (error);
4444 } /* ixgbe_sysctl_eee_state */
4445 
4446 /************************************************************************
4447  * ixgbe_init_device_features
4448  ************************************************************************/
4449 static void
4450 ixgbe_init_device_features(struct adapter *adapter)
4451 {
4452 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4453 	                  | IXGBE_FEATURE_RSS
4454 	                  | IXGBE_FEATURE_MSI
4455 	                  | IXGBE_FEATURE_MSIX
4456 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4457 
4458 	/* Set capabilities first... */
4459 	switch (adapter->hw.mac.type) {
4460 	case ixgbe_mac_82598EB:
4461 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4462 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4463 		break;
4464 	case ixgbe_mac_X540:
4465 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4466 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4467 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4468 		    (adapter->hw.bus.func == 0))
4469 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4470 		break;
4471 	case ixgbe_mac_X550:
4472 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4473 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4474 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4475 		break;
4476 	case ixgbe_mac_X550EM_x:
4477 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4478 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4479 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4480 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4481 		break;
4482 	case ixgbe_mac_X550EM_a:
4483 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4484 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4485 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4486 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4487 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4488 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4489 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4490 		}
4491 		break;
4492 	case ixgbe_mac_82599EB:
4493 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4494 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4495 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4496 		    (adapter->hw.bus.func == 0))
4497 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4498 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4499 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4500 		break;
4501 	default:
4502 		break;
4503 	}
4504 
4505 	/* Enabled by default... */
4506 	/* Fan failure detection */
4507 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4508 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4509 	/* Netmap */
4510 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4511 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4512 	/* EEE */
4513 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4514 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4515 	/* Thermal Sensor */
4516 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4517 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4518 
4519 	/* Enabled via global sysctl... */
4520 	/* Flow Director */
4521 	if (ixgbe_enable_fdir) {
4522 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4523 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4524 		else
4525 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4526 	}
4527 	/*
4528 	 * Message Signal Interrupts - Extended (MSI-X)
4529 	 * Normal MSI is only enabled if MSI-X calls fail.
4530 	 */
4531 	if (!ixgbe_enable_msix)
4532 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4533 	/* Receive-Side Scaling (RSS) */
4534 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4535 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4536 
4537 	/* Disable features with unmet dependencies... */
4538 	/* No MSI-X */
4539 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4540 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4541 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4542 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4543 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4544 	}
4545 } /* ixgbe_init_device_features */
4546 
4547 /************************************************************************
4548  * ixgbe_check_fan_failure
4549  ************************************************************************/
4550 static void
4551 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4552 {
4553 	u32 mask;
4554 
4555 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4556 	    IXGBE_ESDP_SDP1;
4557 
4558 	if (reg & mask)
4559 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4560 } /* ixgbe_check_fan_failure */
4561