xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 137a344c6341d1469432e9deb3a25593f96672ad)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ixgbe_sriov.h"
42 #include "ifdi_if.h"
43 
44 #include <net/netmap.h>
45 #include <dev/netmap/netmap_kern.h>
46 
47 /************************************************************************
48  * Driver version
49  ************************************************************************/
50 char ixgbe_driver_version[] = "4.0.0-k";
51 
52 
53 /************************************************************************
54  * PCI Device ID Table
55  *
56  *   Used by probe to select devices to load on
57  *   Last field stores an index into ixgbe_strings
58  *   Last entry must be all 0s
59  *
60  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  ************************************************************************/
62 static pci_vendor_info_t ixgbe_vendor_info_array[] =
63 {
64   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
65   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
66   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
67   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
68   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
69   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
70   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
71   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
72   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
73   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
74   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
75   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
76   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
77   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
78   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
79   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
80   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
81   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
82   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
83   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
84   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
85   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
86   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
87   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
88   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
89   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
90   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
91   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
92   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
93   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
94   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,  "Intel(R) PRO/10GbE PCI-Express Network Driver"),
95   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
96   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
97   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
98   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
99   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
100   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
101   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
102   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
103   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
104   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
105   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
106   PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) PRO/10GbE PCI-Express Network Driver"),
107 	/* required last entry */
108   PVID_END
109 };
110 
111 static void *ixgbe_register(device_t dev);
112 static int  ixgbe_if_attach_pre(if_ctx_t ctx);
113 static int  ixgbe_if_attach_post(if_ctx_t ctx);
114 static int  ixgbe_if_detach(if_ctx_t ctx);
115 static int  ixgbe_if_shutdown(if_ctx_t ctx);
116 static int  ixgbe_if_suspend(if_ctx_t ctx);
117 static int  ixgbe_if_resume(if_ctx_t ctx);
118 
119 static void ixgbe_if_stop(if_ctx_t ctx);
120 void ixgbe_if_enable_intr(if_ctx_t ctx);
121 static void ixgbe_if_disable_intr(if_ctx_t ctx);
122 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
123 static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
124 static int  ixgbe_if_media_change(if_ctx_t ctx);
125 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
126 static int  ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
127 static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip);
128 static void ixgbe_if_multi_set(if_ctx_t ctx);
129 static int  ixgbe_if_promisc_set(if_ctx_t ctx, int flags);
130 static int  ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
131                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
132 static int  ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
133                                      uint64_t *paddrs, int nrxqs, int nrxqsets);
134 static void ixgbe_if_queues_free(if_ctx_t ctx);
135 static void ixgbe_if_timer(if_ctx_t ctx, uint16_t);
136 static void ixgbe_if_update_admin_status(if_ctx_t ctx);
137 static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag);
138 static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
139 
140 int ixgbe_intr(void *arg);
141 
142 /************************************************************************
143  * Function prototypes
144  ************************************************************************/
145 #if __FreeBSD_version >= 1100036
146 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
147 #endif
148 
149 static void ixgbe_enable_queue(struct adapter *adapter, u32 vector);
150 static void ixgbe_disable_queue(struct adapter *adapter, u32 vector);
151 static void ixgbe_add_device_sysctls(if_ctx_t ctx);
152 static int  ixgbe_allocate_pci_resources(if_ctx_t ctx);
153 static int  ixgbe_setup_low_power_mode(if_ctx_t ctx);
154 
155 static void ixgbe_config_dmac(struct adapter *adapter);
156 static void ixgbe_configure_ivars(struct adapter *adapter);
157 static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector,
158                            s8 type);
159 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
160 static bool ixgbe_sfp_probe(if_ctx_t ctx);
161 
162 static void ixgbe_free_pci_resources(if_ctx_t ctx);
163 
164 static int  ixgbe_msix_link(void *arg);
165 static int  ixgbe_msix_que(void *arg);
166 static void ixgbe_initialize_rss_mapping(struct adapter *adapter);
167 static void ixgbe_initialize_receive_units(if_ctx_t ctx);
168 static void ixgbe_initialize_transmit_units(if_ctx_t ctx);
169 
170 static int  ixgbe_setup_interface(if_ctx_t ctx);
171 static void ixgbe_init_device_features(struct adapter *adapter);
172 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
173 static void ixgbe_add_media_types(if_ctx_t ctx);
174 static void ixgbe_update_stats_counters(struct adapter *adapter);
175 static void ixgbe_config_link(struct adapter *adapter);
176 static void ixgbe_get_slot_info(struct adapter *);
177 static void ixgbe_check_wol_support(struct adapter *adapter);
178 static void ixgbe_enable_rx_drop(struct adapter *);
179 static void ixgbe_disable_rx_drop(struct adapter *);
180 
181 static void ixgbe_add_hw_stats(struct adapter *adapter);
182 static int  ixgbe_set_flowcntl(struct adapter *, int);
183 static int  ixgbe_set_advertise(struct adapter *, int);
184 static int  ixgbe_get_advertise(struct adapter *);
185 static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx);
186 static void ixgbe_config_gpie(struct adapter *adapter);
187 static void ixgbe_config_delay_values(struct adapter *adapter);
188 
189 /* Sysctl handlers */
190 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
191 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
192 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
193 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
194 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
195 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
196 #ifdef IXGBE_DEBUG
197 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
198 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
199 #endif
200 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
201 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
202 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
203 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
204 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
205 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
206 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
207 
208 /* Deferred interrupt tasklets */
209 static void ixgbe_handle_msf(void *);
210 static void ixgbe_handle_mod(void *);
211 static void ixgbe_handle_phy(void *);
212 
213 /************************************************************************
214  *  FreeBSD Device Interface Entry Points
215  ************************************************************************/
216 static device_method_t ix_methods[] = {
217 	/* Device interface */
218 	DEVMETHOD(device_register, ixgbe_register),
219 	DEVMETHOD(device_probe, iflib_device_probe),
220 	DEVMETHOD(device_attach, iflib_device_attach),
221 	DEVMETHOD(device_detach, iflib_device_detach),
222 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
223 	DEVMETHOD(device_suspend, iflib_device_suspend),
224 	DEVMETHOD(device_resume, iflib_device_resume),
225 #ifdef PCI_IOV
226 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
227 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
228 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
229 #endif /* PCI_IOV */
230 	DEVMETHOD_END
231 };
232 
233 static driver_t ix_driver = {
234 	"ix", ix_methods, sizeof(struct adapter),
235 };
236 
237 devclass_t ix_devclass;
238 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
239 
240 MODULE_DEPEND(ix, pci, 1, 1, 1);
241 MODULE_DEPEND(ix, ether, 1, 1, 1);
242 MODULE_DEPEND(ix, iflib, 1, 1, 1);
243 
244 static device_method_t ixgbe_if_methods[] = {
245 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
246 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
247 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
248 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
249 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
250 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
251 	DEVMETHOD(ifdi_init, ixgbe_if_init),
252 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
253 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
254 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
255 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
256 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
257 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
259 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
260 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
261 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
262 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
263 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
264 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
265 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
266 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
267 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
268 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
269 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
270 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
271 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
272 #ifdef PCI_IOV
273 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
274 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
275 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
276 #endif /* PCI_IOV */
277 	DEVMETHOD_END
278 };
279 
280 /*
281  * TUNEABLE PARAMETERS:
282  */
283 
284 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
285 static driver_t ixgbe_if_driver = {
286   "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter)
287 };
288 
289 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
290 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
291     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
292 
293 /* Flow control setting, default to full */
294 static int ixgbe_flow_control = ixgbe_fc_full;
295 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
296     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
297 
298 /* Advertise Speed, default to 0 (auto) */
299 static int ixgbe_advertise_speed = 0;
300 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
301     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
302 
303 /*
304  * Smart speed setting, default to on
305  * this only works as a compile option
306  * right now as its during attach, set
307  * this to 'ixgbe_smart_speed_off' to
308  * disable.
309  */
310 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
311 
312 /*
313  * MSI-X should be the default for best performance,
314  * but this allows it to be forced off for testing.
315  */
316 static int ixgbe_enable_msix = 1;
317 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
318     "Enable MSI-X interrupts");
319 
320 /*
321  * Defining this on will allow the use
322  * of unsupported SFP+ modules, note that
323  * doing so you are on your own :)
324  */
325 static int allow_unsupported_sfp = FALSE;
326 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
327     &allow_unsupported_sfp, 0,
328     "Allow unsupported SFP modules...use at your own risk");
329 
330 /*
331  * Not sure if Flow Director is fully baked,
332  * so we'll default to turning it off.
333  */
334 static int ixgbe_enable_fdir = 0;
335 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
336     "Enable Flow Director");
337 
338 /* Receive-Side Scaling */
339 static int ixgbe_enable_rss = 1;
340 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
341     "Enable Receive-Side Scaling (RSS)");
342 
343 #if 0
344 /* Keep running tab on them for sanity check */
345 static int ixgbe_total_ports;
346 #endif
347 
348 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
349 
350 /*
351  * For Flow Director: this is the number of TX packets we sample
352  * for the filter pool, this means every 20th packet will be probed.
353  *
354  * This feature can be disabled by setting this to 0.
355  */
356 static int atr_sample_rate = 20;
357 
358 extern struct if_txrx ixgbe_txrx;
359 
360 static struct if_shared_ctx ixgbe_sctx_init = {
361 	.isc_magic = IFLIB_MAGIC,
362 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
363 	.isc_tx_maxsize = IXGBE_TSO_SIZE,
364 
365 	.isc_tx_maxsegsize = PAGE_SIZE,
366 
367 	.isc_rx_maxsize = PAGE_SIZE*4,
368 	.isc_rx_nsegments = 1,
369 	.isc_rx_maxsegsize = PAGE_SIZE*4,
370 	.isc_nfl = 1,
371 	.isc_ntxqs = 1,
372 	.isc_nrxqs = 1,
373 
374 	.isc_admin_intrcnt = 1,
375 	.isc_vendor_info = ixgbe_vendor_info_array,
376 	.isc_driver_version = ixgbe_driver_version,
377 	.isc_driver = &ixgbe_if_driver,
378 
379 	.isc_nrxd_min = {MIN_RXD},
380 	.isc_ntxd_min = {MIN_TXD},
381 	.isc_nrxd_max = {MAX_RXD},
382 	.isc_ntxd_max = {MAX_TXD},
383 	.isc_nrxd_default = {DEFAULT_RXD},
384 	.isc_ntxd_default = {DEFAULT_TXD},
385 };
386 
387 if_shared_ctx_t ixgbe_sctx = &ixgbe_sctx_init;
388 
389 /************************************************************************
390  * ixgbe_if_tx_queues_alloc
391  ************************************************************************/
392 static int
393 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
394                          int ntxqs, int ntxqsets)
395 {
396 	struct adapter     *adapter = iflib_get_softc(ctx);
397 	if_softc_ctx_t     scctx = adapter->shared;
398 	struct ix_tx_queue *que;
399 	int                i, j, error;
400 
401 	MPASS(adapter->num_tx_queues > 0);
402 	MPASS(adapter->num_tx_queues == ntxqsets);
403 	MPASS(ntxqs == 1);
404 
405 	/* Allocate queue structure memory */
406 	adapter->tx_queues =
407 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
408 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
409 	if (!adapter->tx_queues) {
410 		device_printf(iflib_get_dev(ctx),
411 		    "Unable to allocate TX ring memory\n");
412 		return (ENOMEM);
413 	}
414 
415 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
416 		struct tx_ring *txr = &que->txr;
417 
418 		/* In case SR-IOV is enabled, align the index properly */
419 		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
420 		    i);
421 
422 		txr->adapter = que->adapter = adapter;
423 		adapter->active_queues |= (u64)1 << txr->me;
424 
425 		/* Allocate report status array */
426 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
427 		if (txr->tx_rsq == NULL) {
428 			error = ENOMEM;
429 			goto fail;
430 		}
431 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
432 			txr->tx_rsq[j] = QIDX_INVALID;
433 		/* get the virtual and physical address of the hardware queues */
434 		txr->tail = IXGBE_TDT(txr->me);
435 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
436 		txr->tx_paddr = paddrs[i];
437 
438 		txr->bytes = 0;
439 		txr->total_packets = 0;
440 
441 		/* Set the rate at which we sample packets */
442 		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
443 			txr->atr_sample = atr_sample_rate;
444 
445 	}
446 
447 	iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod,
448 	    "mod_task");
449 	iflib_config_gtask_init(ctx, &adapter->msf_task, ixgbe_handle_msf,
450 	    "msf_task");
451 	iflib_config_gtask_init(ctx, &adapter->phy_task, ixgbe_handle_phy,
452 	    "phy_task");
453 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
454 		iflib_config_gtask_init(ctx, &adapter->mbx_task,
455 		    ixgbe_handle_mbx, "mbx_task");
456 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
457 		iflib_config_gtask_init(ctx, &adapter->fdir_task,
458 		    ixgbe_reinit_fdir, "fdir_task");
459 
460 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
461 	    adapter->num_tx_queues);
462 
463 	return (0);
464 
465 fail:
466 	ixgbe_if_queues_free(ctx);
467 
468 	return (error);
469 } /* ixgbe_if_tx_queues_alloc */
470 
471 /************************************************************************
472  * ixgbe_if_rx_queues_alloc
473  ************************************************************************/
474 static int
475 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
476                          int nrxqs, int nrxqsets)
477 {
478 	struct adapter     *adapter = iflib_get_softc(ctx);
479 	struct ix_rx_queue *que;
480 	int                i;
481 
482 	MPASS(adapter->num_rx_queues > 0);
483 	MPASS(adapter->num_rx_queues == nrxqsets);
484 	MPASS(nrxqs == 1);
485 
486 	/* Allocate queue structure memory */
487 	adapter->rx_queues =
488 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
489 	                                 M_IXGBE, M_NOWAIT | M_ZERO);
490 	if (!adapter->rx_queues) {
491 		device_printf(iflib_get_dev(ctx),
492 		    "Unable to allocate TX ring memory\n");
493 		return (ENOMEM);
494 	}
495 
496 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
497 		struct rx_ring *rxr = &que->rxr;
498 
499 		/* In case SR-IOV is enabled, align the index properly */
500 		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
501 		    i);
502 
503 		rxr->adapter = que->adapter = adapter;
504 
505 		/* get the virtual and physical address of the hw queues */
506 		rxr->tail = IXGBE_RDT(rxr->me);
507 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
508 		rxr->rx_paddr = paddrs[i];
509 		rxr->bytes = 0;
510 		rxr->que = que;
511 	}
512 
513 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
514 	    adapter->num_rx_queues);
515 
516 	return (0);
517 } /* ixgbe_if_rx_queues_alloc */
518 
519 /************************************************************************
520  * ixgbe_if_queues_free
521  ************************************************************************/
522 static void
523 ixgbe_if_queues_free(if_ctx_t ctx)
524 {
525 	struct adapter     *adapter = iflib_get_softc(ctx);
526 	struct ix_tx_queue *tx_que = adapter->tx_queues;
527 	struct ix_rx_queue *rx_que = adapter->rx_queues;
528 	int                i;
529 
530 	if (tx_que != NULL) {
531 		for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
532 			struct tx_ring *txr = &tx_que->txr;
533 			if (txr->tx_rsq == NULL)
534 				break;
535 
536 			free(txr->tx_rsq, M_IXGBE);
537 			txr->tx_rsq = NULL;
538 		}
539 
540 		free(adapter->tx_queues, M_IXGBE);
541 		adapter->tx_queues = NULL;
542 	}
543 	if (rx_que != NULL) {
544 		free(adapter->rx_queues, M_IXGBE);
545 		adapter->rx_queues = NULL;
546 	}
547 } /* ixgbe_if_queues_free */
548 
549 /************************************************************************
550  * ixgbe_initialize_rss_mapping
551  ************************************************************************/
552 static void
553 ixgbe_initialize_rss_mapping(struct adapter *adapter)
554 {
555 	struct ixgbe_hw *hw = &adapter->hw;
556 	u32             reta = 0, mrqc, rss_key[10];
557 	int             queue_id, table_size, index_mult;
558 	int             i, j;
559 	u32             rss_hash_config;
560 
561 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
562 		/* Fetch the configured RSS key */
563 		rss_getkey((uint8_t *)&rss_key);
564 	} else {
565 		/* set up random bits */
566 		arc4rand(&rss_key, sizeof(rss_key), 0);
567 	}
568 
569 	/* Set multiplier for RETA setup and table size based on MAC */
570 	index_mult = 0x1;
571 	table_size = 128;
572 	switch (adapter->hw.mac.type) {
573 	case ixgbe_mac_82598EB:
574 		index_mult = 0x11;
575 		break;
576 	case ixgbe_mac_X550:
577 	case ixgbe_mac_X550EM_x:
578 	case ixgbe_mac_X550EM_a:
579 		table_size = 512;
580 		break;
581 	default:
582 		break;
583 	}
584 
585 	/* Set up the redirection table */
586 	for (i = 0, j = 0; i < table_size; i++, j++) {
587 		if (j == adapter->num_rx_queues)
588 			j = 0;
589 
590 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
591 			/*
592 			 * Fetch the RSS bucket id for the given indirection
593 			 * entry. Cap it at the number of configured buckets
594 			 * (which is num_rx_queues.)
595 			 */
596 			queue_id = rss_get_indirection_to_bucket(i);
597 			queue_id = queue_id % adapter->num_rx_queues;
598 		} else
599 			queue_id = (j * index_mult);
600 
601 		/*
602 		 * The low 8 bits are for hash value (n+0);
603 		 * The next 8 bits are for hash value (n+1), etc.
604 		 */
605 		reta = reta >> 8;
606 		reta = reta | (((uint32_t)queue_id) << 24);
607 		if ((i & 3) == 3) {
608 			if (i < 128)
609 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
610 			else
611 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
612 				    reta);
613 			reta = 0;
614 		}
615 	}
616 
617 	/* Now fill our hash function seeds */
618 	for (i = 0; i < 10; i++)
619 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
620 
621 	/* Perform hash on these packet types */
622 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
623 		rss_hash_config = rss_gethashconfig();
624 	else {
625 		/*
626 		 * Disable UDP - IP fragments aren't currently being handled
627 		 * and so we end up with a mix of 2-tuple and 4-tuple
628 		 * traffic.
629 		 */
630 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
631 		                | RSS_HASHTYPE_RSS_TCP_IPV4
632 		                | RSS_HASHTYPE_RSS_IPV6
633 		                | RSS_HASHTYPE_RSS_TCP_IPV6
634 		                | RSS_HASHTYPE_RSS_IPV6_EX
635 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
636 	}
637 
638 	mrqc = IXGBE_MRQC_RSSEN;
639 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
640 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
641 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
642 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
643 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
644 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
645 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
646 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
647 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
648 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
649 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
650 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
651 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
652 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
653 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
654 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
655 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
656 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
657 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
658 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
659 } /* ixgbe_initialize_rss_mapping */
660 
661 /************************************************************************
662  * ixgbe_initialize_receive_units - Setup receive registers and features.
663  ************************************************************************/
664 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
665 
666 static void
667 ixgbe_initialize_receive_units(if_ctx_t ctx)
668 {
669 	struct adapter     *adapter = iflib_get_softc(ctx);
670 	if_softc_ctx_t     scctx = adapter->shared;
671 	struct ixgbe_hw    *hw = &adapter->hw;
672 	struct ifnet       *ifp = iflib_get_ifp(ctx);
673 	struct ix_rx_queue *que;
674 	int                i, j;
675 	u32                bufsz, fctrl, srrctl, rxcsum;
676 	u32                hlreg;
677 
678 	/*
679 	 * Make sure receives are disabled while
680 	 * setting up the descriptor ring
681 	 */
682 	ixgbe_disable_rx(hw);
683 
684 	/* Enable broadcasts */
685 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
686 	fctrl |= IXGBE_FCTRL_BAM;
687 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
688 		fctrl |= IXGBE_FCTRL_DPF;
689 		fctrl |= IXGBE_FCTRL_PMCF;
690 	}
691 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
692 
693 	/* Set for Jumbo Frames? */
694 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
695 	if (ifp->if_mtu > ETHERMTU)
696 		hlreg |= IXGBE_HLREG0_JUMBOEN;
697 	else
698 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
699 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
700 
701 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
702 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
703 
704 	/* Setup the Base and Length of the Rx Descriptor Ring */
705 	for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) {
706 		struct rx_ring *rxr = &que->rxr;
707 		u64            rdba = rxr->rx_paddr;
708 
709 		j = rxr->me;
710 
711 		/* Setup the Base and Length of the Rx Descriptor Ring */
712 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
713 		    (rdba & 0x00000000ffffffffULL));
714 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
715 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
716 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
717 
718 		/* Set up the SRRCTL register */
719 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
720 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
721 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
722 		srrctl |= bufsz;
723 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
724 
725 		/*
726 		 * Set DROP_EN iff we have no flow control and >1 queue.
727 		 * Note that srrctl was cleared shortly before during reset,
728 		 * so we do not need to clear the bit, but do it just in case
729 		 * this code is moved elsewhere.
730 		 */
731 		if (adapter->num_rx_queues > 1 &&
732 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
733 			srrctl |= IXGBE_SRRCTL_DROP_EN;
734 		} else {
735 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
736 		}
737 
738 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
739 
740 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
741 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
742 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
743 
744 		/* Set the driver rx tail address */
745 		rxr->tail =  IXGBE_RDT(rxr->me);
746 	}
747 
748 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
749 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
750 		            | IXGBE_PSRTYPE_UDPHDR
751 		            | IXGBE_PSRTYPE_IPV4HDR
752 		            | IXGBE_PSRTYPE_IPV6HDR;
753 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
754 	}
755 
756 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
757 
758 	ixgbe_initialize_rss_mapping(adapter);
759 
760 	if (adapter->num_rx_queues > 1) {
761 		/* RSS and RX IPP Checksum are mutually exclusive */
762 		rxcsum |= IXGBE_RXCSUM_PCSD;
763 	}
764 
765 	if (ifp->if_capenable & IFCAP_RXCSUM)
766 		rxcsum |= IXGBE_RXCSUM_PCSD;
767 
768 	/* This is useful for calculating UDP/IP fragment checksums */
769 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
770 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
771 
772 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
773 
774 } /* ixgbe_initialize_receive_units */
775 
776 /************************************************************************
777  * ixgbe_initialize_transmit_units - Enable transmit units.
778  ************************************************************************/
779 static void
780 ixgbe_initialize_transmit_units(if_ctx_t ctx)
781 {
782 	struct adapter     *adapter = iflib_get_softc(ctx);
783 	struct ixgbe_hw    *hw = &adapter->hw;
784 	if_softc_ctx_t     scctx = adapter->shared;
785 	struct ix_tx_queue *que;
786 	int i;
787 
788 	/* Setup the Base and Length of the Tx Descriptor Ring */
789 	for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues;
790 	    i++, que++) {
791 		struct tx_ring	   *txr = &que->txr;
792 		u64 tdba = txr->tx_paddr;
793 		u32 txctrl = 0;
794 		int j = txr->me;
795 
796 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
797 		    (tdba & 0x00000000ffffffffULL));
798 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
799 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
800 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
801 
802 		/* Setup the HW Tx Head and Tail descriptor pointers */
803 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
804 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
805 
806 		/* Cache the tail address */
807 		txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
808 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
809 			txr->tx_rsq[k] = QIDX_INVALID;
810 
811 		/* Disable Head Writeback */
812 		/*
813 		 * Note: for X550 series devices, these registers are actually
814 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
815 		 * fields remain the same.
816 		 */
817 		switch (hw->mac.type) {
818 		case ixgbe_mac_82598EB:
819 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
820 			break;
821 		default:
822 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
823 			break;
824 		}
825 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
826 		switch (hw->mac.type) {
827 		case ixgbe_mac_82598EB:
828 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
829 			break;
830 		default:
831 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
832 			break;
833 		}
834 
835 	}
836 
837 	if (hw->mac.type != ixgbe_mac_82598EB) {
838 		u32 dmatxctl, rttdcs;
839 
840 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
841 		dmatxctl |= IXGBE_DMATXCTL_TE;
842 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
843 		/* Disable arbiter to set MTQC */
844 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
845 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
846 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
847 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
848 		    ixgbe_get_mtqc(adapter->iov_mode));
849 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
850 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
851 	}
852 
853 } /* ixgbe_initialize_transmit_units */
854 
855 /************************************************************************
856  * ixgbe_register
857  ************************************************************************/
858 static void *
859 ixgbe_register(device_t dev)
860 {
861 	return (ixgbe_sctx);
862 } /* ixgbe_register */
863 
864 /************************************************************************
865  * ixgbe_if_attach_pre - Device initialization routine, part 1
866  *
867  *   Called when the driver is being loaded.
868  *   Identifies the type of hardware, initializes the hardware,
869  *   and initializes iflib structures.
870  *
871  *   return 0 on success, positive on failure
872  ************************************************************************/
873 static int
874 ixgbe_if_attach_pre(if_ctx_t ctx)
875 {
876 	struct adapter  *adapter;
877 	device_t        dev;
878 	if_softc_ctx_t  scctx;
879 	struct ixgbe_hw *hw;
880 	int             error = 0;
881 	u32             ctrl_ext;
882 
883 	INIT_DEBUGOUT("ixgbe_attach: begin");
884 
885 	/* Allocate, clear, and link in our adapter structure */
886 	dev = iflib_get_dev(ctx);
887 	adapter = iflib_get_softc(ctx);
888 	adapter->hw.back = adapter;
889 	adapter->ctx = ctx;
890 	adapter->dev = dev;
891 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
892 	adapter->media = iflib_get_media(ctx);
893 	hw = &adapter->hw;
894 
895 	/* Determine hardware revision */
896 	hw->vendor_id = pci_get_vendor(dev);
897 	hw->device_id = pci_get_device(dev);
898 	hw->revision_id = pci_get_revid(dev);
899 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
900 	hw->subsystem_device_id = pci_get_subdevice(dev);
901 
902 	/* Do base PCI setup - map BAR0 */
903 	if (ixgbe_allocate_pci_resources(ctx)) {
904 		device_printf(dev, "Allocation of PCI resources failed\n");
905 		return (ENXIO);
906 	}
907 
908 	/* let hardware know driver is loaded */
909 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
910 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
911 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
912 
913 	/*
914 	 * Initialize the shared code
915 	 */
916 	if (ixgbe_init_shared_code(hw) != 0) {
917 		device_printf(dev, "Unable to initialize the shared code\n");
918 		error = ENXIO;
919 		goto err_pci;
920 	}
921 
922 	if (hw->mbx.ops.init_params)
923 		hw->mbx.ops.init_params(hw);
924 
925 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
926 
927 	if (hw->mac.type != ixgbe_mac_82598EB)
928 		hw->phy.smart_speed = ixgbe_smart_speed;
929 
930 	ixgbe_init_device_features(adapter);
931 
932 	/* Enable WoL (if supported) */
933 	ixgbe_check_wol_support(adapter);
934 
935 	/* Verify adapter fan is still functional (if applicable) */
936 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
937 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
938 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
939 	}
940 
941 	/* Ensure SW/FW semaphore is free */
942 	ixgbe_init_swfw_semaphore(hw);
943 
944 	/* Set an initial default flow control value */
945 	hw->fc.requested_mode = ixgbe_flow_control;
946 
947 	hw->phy.reset_if_overtemp = TRUE;
948 	error = ixgbe_reset_hw(hw);
949 	hw->phy.reset_if_overtemp = FALSE;
950 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
951 		/*
952 		 * No optics in this port, set up
953 		 * so the timer routine will probe
954 		 * for later insertion.
955 		 */
956 		adapter->sfp_probe = TRUE;
957 		error = 0;
958 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
959 		device_printf(dev, "Unsupported SFP+ module detected!\n");
960 		error = EIO;
961 		goto err_pci;
962 	} else if (error) {
963 		device_printf(dev, "Hardware initialization failed\n");
964 		error = EIO;
965 		goto err_pci;
966 	}
967 
968 	/* Make sure we have a good EEPROM before we read from it */
969 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
970 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
971 		error = EIO;
972 		goto err_pci;
973 	}
974 
975 	error = ixgbe_start_hw(hw);
976 	switch (error) {
977 	case IXGBE_ERR_EEPROM_VERSION:
978 		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
979 		break;
980 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
981 		device_printf(dev, "Unsupported SFP+ Module\n");
982 		error = EIO;
983 		goto err_pci;
984 	case IXGBE_ERR_SFP_NOT_PRESENT:
985 		device_printf(dev, "No SFP+ Module found\n");
986 		/* falls thru */
987 	default:
988 		break;
989 	}
990 
991 	/* Most of the iflib initialization... */
992 
993 	iflib_set_mac(ctx, hw->mac.addr);
994 	switch (adapter->hw.mac.type) {
995 	case ixgbe_mac_X550:
996 	case ixgbe_mac_X550EM_x:
997 	case ixgbe_mac_X550EM_a:
998 		scctx->isc_rss_table_size = 512;
999 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1000 		break;
1001 	default:
1002 		scctx->isc_rss_table_size = 128;
1003 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1004 	}
1005 
1006 	/* Allow legacy interrupts */
1007 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1008 
1009 	scctx->isc_txqsizes[0] =
1010 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1011 	    sizeof(u32), DBA_ALIGN),
1012 	scctx->isc_rxqsizes[0] =
1013 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1014 	    DBA_ALIGN);
1015 
1016 	/* XXX */
1017 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1018 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1019 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1020 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1021 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
1022 	} else {
1023 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1024 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1025 		scctx->isc_msix_bar = PCIR_BAR(MSIX_82599_BAR);
1026 	}
1027 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1028 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1029 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1030 
1031 	scctx->isc_txrx = &ixgbe_txrx;
1032 
1033 	scctx->isc_capenable = IXGBE_CAPS;
1034 
1035 	return (0);
1036 
1037 err_pci:
1038 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1039 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1040 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1041 	ixgbe_free_pci_resources(ctx);
1042 
1043 	return (error);
1044 } /* ixgbe_if_attach_pre */
1045 
1046  /*********************************************************************
1047  * ixgbe_if_attach_post - Device initialization routine, part 2
1048  *
1049  *   Called during driver load, but after interrupts and
1050  *   resources have been allocated and configured.
1051  *   Sets up some data structures not relevant to iflib.
1052  *
1053  *   return 0 on success, positive on failure
1054  *********************************************************************/
1055 static int
1056 ixgbe_if_attach_post(if_ctx_t ctx)
1057 {
1058 	device_t dev;
1059 	struct adapter  *adapter;
1060 	struct ixgbe_hw *hw;
1061 	int             error = 0;
1062 
1063 	dev = iflib_get_dev(ctx);
1064 	adapter = iflib_get_softc(ctx);
1065 	hw = &adapter->hw;
1066 
1067 
1068 	if (adapter->intr_type == IFLIB_INTR_LEGACY &&
1069 		(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1070 		device_printf(dev, "Device does not support legacy interrupts");
1071 		error = ENXIO;
1072 		goto err;
1073 	}
1074 
1075 	/* Allocate multicast array memory. */
1076 	adapter->mta = malloc(sizeof(*adapter->mta) *
1077 	                      MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1078 	if (adapter->mta == NULL) {
1079 		device_printf(dev, "Can not allocate multicast setup array\n");
1080 		error = ENOMEM;
1081 		goto err;
1082 	}
1083 
1084 	/* hw.ix defaults init */
1085 	ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
1086 
1087 	/* Enable the optics for 82599 SFP+ fiber */
1088 	ixgbe_enable_tx_laser(hw);
1089 
1090 	/* Enable power to the phy. */
1091 	ixgbe_set_phy_power(hw, TRUE);
1092 
1093 	ixgbe_initialize_iov(adapter);
1094 
1095 	error = ixgbe_setup_interface(ctx);
1096 	if (error) {
1097 		device_printf(dev, "Interface setup failed: %d\n", error);
1098 		goto err;
1099 	}
1100 
1101 	ixgbe_if_update_admin_status(ctx);
1102 
1103 	/* Initialize statistics */
1104 	ixgbe_update_stats_counters(adapter);
1105 	ixgbe_add_hw_stats(adapter);
1106 
1107 	/* Check PCIE slot type/speed/width */
1108 	ixgbe_get_slot_info(adapter);
1109 
1110 	/*
1111 	 * Do time init and sysctl init here, but
1112 	 * only on the first port of a bypass adapter.
1113 	 */
1114 	ixgbe_bypass_init(adapter);
1115 
1116 	/* Set an initial dmac value */
1117 	adapter->dmac = 0;
1118 	/* Set initial advertised speeds (if applicable) */
1119 	adapter->advertise = ixgbe_get_advertise(adapter);
1120 
1121 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1122 		ixgbe_define_iov_schemas(dev, &error);
1123 
1124 	/* Add sysctls */
1125 	ixgbe_add_device_sysctls(ctx);
1126 
1127 	return (0);
1128 err:
1129 	return (error);
1130 } /* ixgbe_if_attach_post */
1131 
1132 /************************************************************************
1133  * ixgbe_check_wol_support
1134  *
1135  *   Checks whether the adapter's ports are capable of
1136  *   Wake On LAN by reading the adapter's NVM.
1137  *
1138  *   Sets each port's hw->wol_enabled value depending
1139  *   on the value read here.
1140  ************************************************************************/
1141 static void
1142 ixgbe_check_wol_support(struct adapter *adapter)
1143 {
1144 	struct ixgbe_hw *hw = &adapter->hw;
1145 	u16             dev_caps = 0;
1146 
1147 	/* Find out WoL support for port */
1148 	adapter->wol_support = hw->wol_enabled = 0;
1149 	ixgbe_get_device_caps(hw, &dev_caps);
1150 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1151 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1152 	     hw->bus.func == 0))
1153 		adapter->wol_support = hw->wol_enabled = 1;
1154 
1155 	/* Save initial wake up filter configuration */
1156 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1157 
1158 	return;
1159 } /* ixgbe_check_wol_support */
1160 
1161 /************************************************************************
1162  * ixgbe_setup_interface
1163  *
1164  *   Setup networking device structure and register an interface.
1165  ************************************************************************/
1166 static int
1167 ixgbe_setup_interface(if_ctx_t ctx)
1168 {
1169 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1170 	struct adapter *adapter = iflib_get_softc(ctx);
1171 
1172 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1173 
1174 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1175 	if_setbaudrate(ifp, IF_Gbps(10));
1176 
1177 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1178 
1179 	/*
1180 	 * Don't turn this on by default, if vlans are
1181 	 * created on another pseudo device (eg. lagg)
1182 	 * then vlan events are not passed thru, breaking
1183 	 * operation, but with HW FILTER off it works. If
1184 	 * using vlans directly on the ixgbe driver you can
1185 	 * enable this and get full hardware tag filtering.
1186 	 */
1187 	if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWFILTER);
1188 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1189 
1190 	ixgbe_add_media_types(ctx);
1191 
1192 	/* Autoselect media by default */
1193 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1194 
1195 	return (0);
1196 } /* ixgbe_setup_interface */
1197 
1198 /************************************************************************
1199  * ixgbe_if_get_counter
1200  ************************************************************************/
1201 static uint64_t
1202 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1203 {
1204 	struct adapter *adapter = iflib_get_softc(ctx);
1205 	if_t           ifp = iflib_get_ifp(ctx);
1206 
1207 	switch (cnt) {
1208 	case IFCOUNTER_IPACKETS:
1209 		return (adapter->ipackets);
1210 	case IFCOUNTER_OPACKETS:
1211 		return (adapter->opackets);
1212 	case IFCOUNTER_IBYTES:
1213 		return (adapter->ibytes);
1214 	case IFCOUNTER_OBYTES:
1215 		return (adapter->obytes);
1216 	case IFCOUNTER_IMCASTS:
1217 		return (adapter->imcasts);
1218 	case IFCOUNTER_OMCASTS:
1219 		return (adapter->omcasts);
1220 	case IFCOUNTER_COLLISIONS:
1221 		return (0);
1222 	case IFCOUNTER_IQDROPS:
1223 		return (adapter->iqdrops);
1224 	case IFCOUNTER_OQDROPS:
1225 		return (0);
1226 	case IFCOUNTER_IERRORS:
1227 		return (adapter->ierrors);
1228 	default:
1229 		return (if_get_counter_default(ifp, cnt));
1230 	}
1231 } /* ixgbe_if_get_counter */
1232 
1233 /************************************************************************
1234  * ixgbe_add_media_types
1235  ************************************************************************/
1236 static void
1237 ixgbe_add_media_types(if_ctx_t ctx)
1238 {
1239 	struct adapter  *adapter = iflib_get_softc(ctx);
1240 	struct ixgbe_hw *hw = &adapter->hw;
1241 	device_t        dev = iflib_get_dev(ctx);
1242 	u64             layer;
1243 
1244 	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
1245 
1246 	/* Media types with matching FreeBSD media defines */
1247 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1248 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1249 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1250 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1251 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1252 		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1253 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1254 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1255 
1256 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1257 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1258 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1259 		    NULL);
1260 
1261 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1262 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1263 		if (hw->phy.multispeed_fiber)
1264 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1265 			    NULL);
1266 	}
1267 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1268 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1269 		if (hw->phy.multispeed_fiber)
1270 			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1271 			    NULL);
1272 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1273 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1274 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1275 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1276 
1277 #ifdef IFM_ETH_XTYPE
1278 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1279 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1280 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1281 		ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1282 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1283 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1284 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1285 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1286 #else
1287 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1288 		device_printf(dev, "Media supported: 10GbaseKR\n");
1289 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1290 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1291 	}
1292 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1293 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1294 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1295 		ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1296 	}
1297 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1298 		device_printf(dev, "Media supported: 1000baseKX\n");
1299 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1300 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1301 	}
1302 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1303 		device_printf(dev, "Media supported: 2500baseKX\n");
1304 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1305 		ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1306 	}
1307 #endif
1308 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1309 		device_printf(dev, "Media supported: 1000baseBX\n");
1310 
1311 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1312 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1313 		    0, NULL);
1314 		ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1315 	}
1316 
1317 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1318 } /* ixgbe_add_media_types */
1319 
1320 /************************************************************************
1321  * ixgbe_is_sfp
1322  ************************************************************************/
1323 static inline bool
1324 ixgbe_is_sfp(struct ixgbe_hw *hw)
1325 {
1326 	switch (hw->mac.type) {
1327 	case ixgbe_mac_82598EB:
1328 		if (hw->phy.type == ixgbe_phy_nl)
1329 			return (TRUE);
1330 		return (FALSE);
1331 	case ixgbe_mac_82599EB:
1332 		switch (hw->mac.ops.get_media_type(hw)) {
1333 		case ixgbe_media_type_fiber:
1334 		case ixgbe_media_type_fiber_qsfp:
1335 			return (TRUE);
1336 		default:
1337 			return (FALSE);
1338 		}
1339 	case ixgbe_mac_X550EM_x:
1340 	case ixgbe_mac_X550EM_a:
1341 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1342 			return (TRUE);
1343 		return (FALSE);
1344 	default:
1345 		return (FALSE);
1346 	}
1347 } /* ixgbe_is_sfp */
1348 
1349 /************************************************************************
1350  * ixgbe_config_link
1351  ************************************************************************/
1352 static void
1353 ixgbe_config_link(struct adapter *adapter)
1354 {
1355 	struct ixgbe_hw *hw = &adapter->hw;
1356 	u32             autoneg, err = 0;
1357 	bool            sfp, negotiate;
1358 
1359 	sfp = ixgbe_is_sfp(hw);
1360 
1361 	if (sfp) {
1362 		GROUPTASK_ENQUEUE(&adapter->mod_task);
1363 	} else {
1364 		if (hw->mac.ops.check_link)
1365 			err = ixgbe_check_link(hw, &adapter->link_speed,
1366 			    &adapter->link_up, FALSE);
1367 		if (err)
1368 			return;
1369 		autoneg = hw->phy.autoneg_advertised;
1370 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1371 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1372 			    &negotiate);
1373 		if (err)
1374 			return;
1375 		if (hw->mac.ops.setup_link)
1376 			err = hw->mac.ops.setup_link(hw, autoneg,
1377 			    adapter->link_up);
1378 	}
1379 
1380 } /* ixgbe_config_link */
1381 
1382 /************************************************************************
1383  * ixgbe_update_stats_counters - Update board statistics counters.
1384  ************************************************************************/
1385 static void
1386 ixgbe_update_stats_counters(struct adapter *adapter)
1387 {
1388 	struct ixgbe_hw       *hw = &adapter->hw;
1389 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1390 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1391 	u64                   total_missed_rx = 0;
1392 
1393 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1394 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1395 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1396 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1397 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1398 
1399 	for (int i = 0; i < 16; i++) {
1400 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1401 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1402 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1403 	}
1404 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1405 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1406 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1407 
1408 	/* Hardware workaround, gprc counts missed packets */
1409 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1410 	stats->gprc -= missed_rx;
1411 
1412 	if (hw->mac.type != ixgbe_mac_82598EB) {
1413 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1414 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1415 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1416 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1417 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1418 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1419 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1420 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1421 	} else {
1422 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1423 		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1424 		/* 82598 only has a counter in the high register */
1425 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1426 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1427 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1428 	}
1429 
1430 	/*
1431 	 * Workaround: mprc hardware is incorrectly counting
1432 	 * broadcasts, so for now we subtract those.
1433 	 */
1434 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1435 	stats->bprc += bprc;
1436 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1437 	if (hw->mac.type == ixgbe_mac_82598EB)
1438 		stats->mprc -= bprc;
1439 
1440 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1441 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1442 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1443 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1444 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1445 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1446 
1447 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1448 	stats->lxontxc += lxon;
1449 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1450 	stats->lxofftxc += lxoff;
1451 	total = lxon + lxoff;
1452 
1453 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1454 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1455 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1456 	stats->gptc -= total;
1457 	stats->mptc -= total;
1458 	stats->ptc64 -= total;
1459 	stats->gotc -= total * ETHER_MIN_LEN;
1460 
1461 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1462 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1463 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1464 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1465 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1466 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1467 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1468 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1469 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1470 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1471 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1472 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1473 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1474 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1475 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1476 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1477 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1478 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1479 	/* Only read FCOE on 82599 */
1480 	if (hw->mac.type != ixgbe_mac_82598EB) {
1481 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1482 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1483 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1484 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1485 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1486 	}
1487 
1488 	/* Fill out the OS statistics structure */
1489 	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1490 	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1491 	IXGBE_SET_IBYTES(adapter, stats->gorc);
1492 	IXGBE_SET_OBYTES(adapter, stats->gotc);
1493 	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1494 	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1495 	IXGBE_SET_COLLISIONS(adapter, 0);
1496 	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1497 	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1498 } /* ixgbe_update_stats_counters */
1499 
1500 /************************************************************************
1501  * ixgbe_add_hw_stats
1502  *
1503  *   Add sysctl variables, one per statistic, to the system.
1504  ************************************************************************/
1505 static void
1506 ixgbe_add_hw_stats(struct adapter *adapter)
1507 {
1508 	device_t               dev = iflib_get_dev(adapter->ctx);
1509 	struct ix_rx_queue     *rx_que;
1510 	struct ix_tx_queue     *tx_que;
1511 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1512 	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1513 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1514 	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1515 	struct sysctl_oid      *stat_node, *queue_node;
1516 	struct sysctl_oid_list *stat_list, *queue_list;
1517 	int                    i;
1518 
1519 #define QUEUE_NAME_LEN 32
1520 	char                   namebuf[QUEUE_NAME_LEN];
1521 
1522 	/* Driver Statistics */
1523 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1524 	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1525 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1526 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1527 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1528 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1529 
1530 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
1531 		struct tx_ring *txr = &tx_que->txr;
1532 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1533 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1534 		    CTLFLAG_RD, NULL, "Queue Name");
1535 		queue_list = SYSCTL_CHILDREN(queue_node);
1536 
1537 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1538 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1539 		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1540 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1541 		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1542 		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1543 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1544 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1545 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1546 		    CTLFLAG_RD, &txr->total_packets,
1547 		    "Queue Packets Transmitted");
1548 	}
1549 
1550 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
1551 		struct rx_ring *rxr = &rx_que->rxr;
1552 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1553 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1554 		    CTLFLAG_RD, NULL, "Queue Name");
1555 		queue_list = SYSCTL_CHILDREN(queue_node);
1556 
1557 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1558 		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->rx_queues[i],
1559 		    sizeof(&adapter->rx_queues[i]),
1560 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1561 		    "Interrupt Rate");
1562 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1563 		    CTLFLAG_RD, &(adapter->rx_queues[i].irqs),
1564 		    "irqs on this queue");
1565 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1566 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1567 		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1568 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1569 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1570 		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1571 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1572 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1573 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1574 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1575 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1576 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1577 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1578 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1579 	}
1580 
1581 	/* MAC stats get their own sub node */
1582 
1583 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1584 	    CTLFLAG_RD, NULL, "MAC Statistics");
1585 	stat_list = SYSCTL_CHILDREN(stat_node);
1586 
1587 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1588 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1589 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1590 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1591 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1592 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1593 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1594 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1595 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1596 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1597 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1598 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1599 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1600 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1601 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1602 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1603 
1604 	/* Flow Control stats */
1605 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1606 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1607 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1608 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1609 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1610 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1611 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1612 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1613 
1614 	/* Packet Reception Stats */
1615 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1616 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1617 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1618 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1619 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1620 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1621 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1622 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1623 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1624 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1625 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1626 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1627 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1628 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1629 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1630 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1631 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1632 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1633 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1634 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1635 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1636 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1637 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1638 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1639 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1640 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1641 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1642 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1643 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1644 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1645 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1646 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1647 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1648 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1649 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1650 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1651 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1652 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1653 
1654 	/* Packet Transmission Stats */
1655 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1656 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1657 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1658 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1659 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1660 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1661 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1662 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1663 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1664 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1665 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1666 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1667 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1668 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1669 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1670 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1671 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1672 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1673 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1674 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1675 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1676 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1677 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1678 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1679 } /* ixgbe_add_hw_stats */
1680 
1681 /************************************************************************
1682  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1683  *
1684  *   Retrieves the TDH value from the hardware
1685  ************************************************************************/
1686 static int
1687 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1688 {
1689 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1690 	int            error;
1691 	unsigned int   val;
1692 
1693 	if (!txr)
1694 		return (0);
1695 
1696 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1697 	error = sysctl_handle_int(oidp, &val, 0, req);
1698 	if (error || !req->newptr)
1699 		return error;
1700 
1701 	return (0);
1702 } /* ixgbe_sysctl_tdh_handler */
1703 
1704 /************************************************************************
1705  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1706  *
1707  *   Retrieves the TDT value from the hardware
1708  ************************************************************************/
1709 static int
1710 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1711 {
1712 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1713 	int            error;
1714 	unsigned int   val;
1715 
1716 	if (!txr)
1717 		return (0);
1718 
1719 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1720 	error = sysctl_handle_int(oidp, &val, 0, req);
1721 	if (error || !req->newptr)
1722 		return error;
1723 
1724 	return (0);
1725 } /* ixgbe_sysctl_tdt_handler */
1726 
1727 /************************************************************************
1728  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1729  *
1730  *   Retrieves the RDH value from the hardware
1731  ************************************************************************/
1732 static int
1733 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1734 {
1735 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1736 	int            error;
1737 	unsigned int   val;
1738 
1739 	if (!rxr)
1740 		return (0);
1741 
1742 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1743 	error = sysctl_handle_int(oidp, &val, 0, req);
1744 	if (error || !req->newptr)
1745 		return error;
1746 
1747 	return (0);
1748 } /* ixgbe_sysctl_rdh_handler */
1749 
1750 /************************************************************************
1751  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1752  *
1753  *   Retrieves the RDT value from the hardware
1754  ************************************************************************/
1755 static int
1756 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1757 {
1758 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1759 	int            error;
1760 	unsigned int   val;
1761 
1762 	if (!rxr)
1763 		return (0);
1764 
1765 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1766 	error = sysctl_handle_int(oidp, &val, 0, req);
1767 	if (error || !req->newptr)
1768 		return error;
1769 
1770 	return (0);
1771 } /* ixgbe_sysctl_rdt_handler */
1772 
1773 /************************************************************************
1774  * ixgbe_if_vlan_register
1775  *
1776  *   Run via vlan config EVENT, it enables us to use the
1777  *   HW Filter table since we can get the vlan id. This
1778  *   just creates the entry in the soft version of the
1779  *   VFTA, init will repopulate the real table.
1780  ************************************************************************/
1781 static void
1782 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1783 {
1784 	struct adapter *adapter = iflib_get_softc(ctx);
1785 	u16            index, bit;
1786 
1787 	index = (vtag >> 5) & 0x7F;
1788 	bit = vtag & 0x1F;
1789 	adapter->shadow_vfta[index] |= (1 << bit);
1790 	++adapter->num_vlans;
1791 	ixgbe_setup_vlan_hw_support(ctx);
1792 } /* ixgbe_if_vlan_register */
1793 
1794 /************************************************************************
1795  * ixgbe_if_vlan_unregister
1796  *
1797  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1798  ************************************************************************/
1799 static void
1800 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1801 {
1802 	struct adapter *adapter = iflib_get_softc(ctx);
1803 	u16            index, bit;
1804 
1805 	index = (vtag >> 5) & 0x7F;
1806 	bit = vtag & 0x1F;
1807 	adapter->shadow_vfta[index] &= ~(1 << bit);
1808 	--adapter->num_vlans;
1809 	/* Re-init to load the changes */
1810 	ixgbe_setup_vlan_hw_support(ctx);
1811 } /* ixgbe_if_vlan_unregister */
1812 
1813 /************************************************************************
1814  * ixgbe_setup_vlan_hw_support
1815  ************************************************************************/
1816 static void
1817 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1818 {
1819 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1820 	struct adapter  *adapter = iflib_get_softc(ctx);
1821 	struct ixgbe_hw *hw = &adapter->hw;
1822 	struct rx_ring  *rxr;
1823 	int             i;
1824 	u32             ctrl;
1825 
1826 
1827 	/*
1828 	 * We get here thru init_locked, meaning
1829 	 * a soft reset, this has already cleared
1830 	 * the VFTA and other state, so if there
1831 	 * have been no vlan's registered do nothing.
1832 	 */
1833 	if (adapter->num_vlans == 0)
1834 		return;
1835 
1836 	/* Setup the queues for vlans */
1837 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1838 		for (i = 0; i < adapter->num_rx_queues; i++) {
1839 			rxr = &adapter->rx_queues[i].rxr;
1840 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
1841 			if (hw->mac.type != ixgbe_mac_82598EB) {
1842 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1843 				ctrl |= IXGBE_RXDCTL_VME;
1844 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1845 			}
1846 			rxr->vtag_strip = TRUE;
1847 		}
1848 	}
1849 
1850 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1851 		return;
1852 	/*
1853 	 * A soft reset zero's out the VFTA, so
1854 	 * we need to repopulate it now.
1855 	 */
1856 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1857 		if (adapter->shadow_vfta[i] != 0)
1858 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1859 			    adapter->shadow_vfta[i]);
1860 
1861 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1862 	/* Enable the Filter Table if enabled */
1863 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1864 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1865 		ctrl |= IXGBE_VLNCTRL_VFE;
1866 	}
1867 	if (hw->mac.type == ixgbe_mac_82598EB)
1868 		ctrl |= IXGBE_VLNCTRL_VME;
1869 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1870 } /* ixgbe_setup_vlan_hw_support */
1871 
1872 /************************************************************************
1873  * ixgbe_get_slot_info
1874  *
1875  *   Get the width and transaction speed of
1876  *   the slot this adapter is plugged into.
1877  ************************************************************************/
1878 static void
1879 ixgbe_get_slot_info(struct adapter *adapter)
1880 {
1881 	device_t        dev = iflib_get_dev(adapter->ctx);
1882 	struct ixgbe_hw *hw = &adapter->hw;
1883 	int             bus_info_valid = TRUE;
1884 	u32             offset;
1885 	u16             link;
1886 
1887 	/* Some devices are behind an internal bridge */
1888 	switch (hw->device_id) {
1889 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1890 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1891 		goto get_parent_info;
1892 	default:
1893 		break;
1894 	}
1895 
1896 	ixgbe_get_bus_info(hw);
1897 
1898 	/*
1899 	 * Some devices don't use PCI-E, but there is no need
1900 	 * to display "Unknown" for bus speed and width.
1901 	 */
1902 	switch (hw->mac.type) {
1903 	case ixgbe_mac_X550EM_x:
1904 	case ixgbe_mac_X550EM_a:
1905 		return;
1906 	default:
1907 		goto display;
1908 	}
1909 
1910 get_parent_info:
1911 	/*
1912 	 * For the Quad port adapter we need to parse back
1913 	 * up the PCI tree to find the speed of the expansion
1914 	 * slot into which this adapter is plugged. A bit more work.
1915 	 */
1916 	dev = device_get_parent(device_get_parent(dev));
1917 #ifdef IXGBE_DEBUG
1918 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1919 	    pci_get_slot(dev), pci_get_function(dev));
1920 #endif
1921 	dev = device_get_parent(device_get_parent(dev));
1922 #ifdef IXGBE_DEBUG
1923 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1924 	    pci_get_slot(dev), pci_get_function(dev));
1925 #endif
1926 	/* Now get the PCI Express Capabilities offset */
1927 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1928 		/*
1929 		 * Hmm...can't get PCI-Express capabilities.
1930 		 * Falling back to default method.
1931 		 */
1932 		bus_info_valid = FALSE;
1933 		ixgbe_get_bus_info(hw);
1934 		goto display;
1935 	}
1936 	/* ...and read the Link Status Register */
1937 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1938 	ixgbe_set_pci_config_data_generic(hw, link);
1939 
1940 display:
1941 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1942 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1943 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1944 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1945 	     "Unknown"),
1946 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1947 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1948 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1949 	     "Unknown"));
1950 
1951 	if (bus_info_valid) {
1952 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1953 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1954 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1955 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1956 			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1957 		}
1958 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1959 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1960 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1961 			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1962 			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1963 		}
1964 	} else
1965 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1966 
1967 	return;
1968 } /* ixgbe_get_slot_info */
1969 
1970 /************************************************************************
1971  * ixgbe_if_msix_intr_assign
1972  *
1973  *   Setup MSI-X Interrupt resources and handlers
1974  ************************************************************************/
1975 static int
1976 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1977 {
1978 	struct adapter     *adapter = iflib_get_softc(ctx);
1979 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1980 	struct ix_tx_queue *tx_que;
1981 	int                error, rid, vector = 0;
1982 	int                cpu_id = 0;
1983 	char               buf[16];
1984 
1985 	/* Admin Que is vector 0*/
1986 	rid = vector + 1;
1987 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1988 		rid = vector + 1;
1989 
1990 		snprintf(buf, sizeof(buf), "rxq%d", i);
1991 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1992 		    IFLIB_INTR_RX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
1993 
1994 		if (error) {
1995 			device_printf(iflib_get_dev(ctx),
1996 			    "Failed to allocate que int %d err: %d", i, error);
1997 			adapter->num_rx_queues = i + 1;
1998 			goto fail;
1999 		}
2000 
2001 		rx_que->msix = vector;
2002 		adapter->active_queues |= (u64)(1 << rx_que->msix);
2003 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2004 			/*
2005 			 * The queue ID is used as the RSS layer bucket ID.
2006 			 * We look up the queue ID -> RSS CPU ID and select
2007 			 * that.
2008 			 */
2009 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
2010 		} else {
2011 			/*
2012 			 * Bind the msix vector, and thus the
2013 			 * rings to the corresponding cpu.
2014 			 *
2015 			 * This just happens to match the default RSS
2016 			 * round-robin bucket -> queue -> CPU allocation.
2017 			 */
2018 			if (adapter->num_rx_queues > 1)
2019 				cpu_id = i;
2020 		}
2021 
2022 	}
2023 	for (int i = 0; i < adapter->num_tx_queues; i++) {
2024 		snprintf(buf, sizeof(buf), "txq%d", i);
2025 		tx_que = &adapter->tx_queues[i];
2026 		tx_que->msix = i % adapter->num_rx_queues;
2027 		iflib_softirq_alloc_generic(ctx,
2028 		    &adapter->rx_queues[tx_que->msix].que_irq,
2029 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2030 	}
2031 	rid = vector + 1;
2032 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
2033 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq");
2034 	if (error) {
2035 		device_printf(iflib_get_dev(ctx),
2036 		    "Failed to register admin handler");
2037 		return (error);
2038 	}
2039 
2040 	adapter->vector = vector;
2041 
2042 	return (0);
2043 fail:
2044 	iflib_irq_free(ctx, &adapter->irq);
2045 	rx_que = adapter->rx_queues;
2046 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
2047 		iflib_irq_free(ctx, &rx_que->que_irq);
2048 
2049 	return (error);
2050 } /* ixgbe_if_msix_intr_assign */
2051 
2052 /*********************************************************************
2053  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2054  **********************************************************************/
2055 static int
2056 ixgbe_msix_que(void *arg)
2057 {
2058 	struct ix_rx_queue *que = arg;
2059 	struct adapter     *adapter = que->adapter;
2060 	struct ifnet       *ifp = iflib_get_ifp(que->adapter->ctx);
2061 
2062 	/* Protect against spurious interrupts */
2063 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2064 		return 0;
2065 
2066 	ixgbe_disable_queue(adapter, que->msix);
2067 	++que->irqs;
2068 
2069 	return (FILTER_SCHEDULE_THREAD);
2070 } /* ixgbe_msix_que */
2071 
2072 /************************************************************************
2073  * ixgbe_media_status - Media Ioctl callback
2074  *
2075  *   Called whenever the user queries the status of
2076  *   the interface using ifconfig.
2077  ************************************************************************/
2078 static void
2079 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2080 {
2081 	struct adapter  *adapter = iflib_get_softc(ctx);
2082 	struct ixgbe_hw *hw = &adapter->hw;
2083 	int             layer;
2084 
2085 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2086 
2087 	iflib_admin_intr_deferred(ctx);
2088 
2089 	ifmr->ifm_status = IFM_AVALID;
2090 	ifmr->ifm_active = IFM_ETHER;
2091 
2092 	if (!adapter->link_active)
2093 		return;
2094 
2095 	ifmr->ifm_status |= IFM_ACTIVE;
2096 	layer = adapter->phy_layer;
2097 
2098 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2099 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2100 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2101 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2102 		switch (adapter->link_speed) {
2103 		case IXGBE_LINK_SPEED_10GB_FULL:
2104 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2105 			break;
2106 		case IXGBE_LINK_SPEED_1GB_FULL:
2107 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2108 			break;
2109 		case IXGBE_LINK_SPEED_100_FULL:
2110 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2111 			break;
2112 		case IXGBE_LINK_SPEED_10_FULL:
2113 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2114 			break;
2115 		}
2116 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2117 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2118 		switch (adapter->link_speed) {
2119 		case IXGBE_LINK_SPEED_10GB_FULL:
2120 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2121 			break;
2122 		}
2123 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2124 		switch (adapter->link_speed) {
2125 		case IXGBE_LINK_SPEED_10GB_FULL:
2126 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2127 			break;
2128 		case IXGBE_LINK_SPEED_1GB_FULL:
2129 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2130 			break;
2131 		}
2132 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2133 		switch (adapter->link_speed) {
2134 		case IXGBE_LINK_SPEED_10GB_FULL:
2135 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2136 			break;
2137 		case IXGBE_LINK_SPEED_1GB_FULL:
2138 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2139 			break;
2140 		}
2141 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2142 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2143 		switch (adapter->link_speed) {
2144 		case IXGBE_LINK_SPEED_10GB_FULL:
2145 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2146 			break;
2147 		case IXGBE_LINK_SPEED_1GB_FULL:
2148 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2149 			break;
2150 		}
2151 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2152 		switch (adapter->link_speed) {
2153 		case IXGBE_LINK_SPEED_10GB_FULL:
2154 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2155 			break;
2156 		}
2157 	/*
2158 	 * XXX: These need to use the proper media types once
2159 	 * they're added.
2160 	 */
2161 #ifndef IFM_ETH_XTYPE
2162 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2163 		switch (adapter->link_speed) {
2164 		case IXGBE_LINK_SPEED_10GB_FULL:
2165 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2166 			break;
2167 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2168 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2169 			break;
2170 		case IXGBE_LINK_SPEED_1GB_FULL:
2171 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2172 			break;
2173 		}
2174 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2175 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2176 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2177 		switch (adapter->link_speed) {
2178 		case IXGBE_LINK_SPEED_10GB_FULL:
2179 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2180 			break;
2181 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2182 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2183 			break;
2184 		case IXGBE_LINK_SPEED_1GB_FULL:
2185 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2186 			break;
2187 		}
2188 #else
2189 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2190 		switch (adapter->link_speed) {
2191 		case IXGBE_LINK_SPEED_10GB_FULL:
2192 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2193 			break;
2194 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2195 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2196 			break;
2197 		case IXGBE_LINK_SPEED_1GB_FULL:
2198 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2199 			break;
2200 		}
2201 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2202 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2203 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2204 		switch (adapter->link_speed) {
2205 		case IXGBE_LINK_SPEED_10GB_FULL:
2206 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2207 			break;
2208 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2209 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2210 			break;
2211 		case IXGBE_LINK_SPEED_1GB_FULL:
2212 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2213 			break;
2214 		}
2215 #endif
2216 
2217 	/* If nothing is recognized... */
2218 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2219 		ifmr->ifm_active |= IFM_UNKNOWN;
2220 
2221 	/* Display current flow control setting used on link */
2222 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2223 	    hw->fc.current_mode == ixgbe_fc_full)
2224 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2225 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2226 	    hw->fc.current_mode == ixgbe_fc_full)
2227 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2228 } /* ixgbe_media_status */
2229 
2230 /************************************************************************
2231  * ixgbe_media_change - Media Ioctl callback
2232  *
2233  *   Called when the user changes speed/duplex using
2234  *   media/mediopt option with ifconfig.
2235  ************************************************************************/
2236 static int
2237 ixgbe_if_media_change(if_ctx_t ctx)
2238 {
2239 	struct adapter   *adapter = iflib_get_softc(ctx);
2240 	struct ifmedia   *ifm = iflib_get_media(ctx);
2241 	struct ixgbe_hw  *hw = &adapter->hw;
2242 	ixgbe_link_speed speed = 0;
2243 
2244 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2245 
2246 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2247 		return (EINVAL);
2248 
2249 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2250 		return (EPERM);
2251 
2252 	/*
2253 	 * We don't actually need to check against the supported
2254 	 * media types of the adapter; ifmedia will take care of
2255 	 * that for us.
2256 	 */
2257 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2258 	case IFM_AUTO:
2259 	case IFM_10G_T:
2260 		speed |= IXGBE_LINK_SPEED_100_FULL;
2261 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2262 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2263 		break;
2264 	case IFM_10G_LRM:
2265 	case IFM_10G_LR:
2266 #ifndef IFM_ETH_XTYPE
2267 	case IFM_10G_SR: /* KR, too */
2268 	case IFM_10G_CX4: /* KX4 */
2269 #else
2270 	case IFM_10G_KR:
2271 	case IFM_10G_KX4:
2272 #endif
2273 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2274 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2275 		break;
2276 #ifndef IFM_ETH_XTYPE
2277 	case IFM_1000_CX: /* KX */
2278 #else
2279 	case IFM_1000_KX:
2280 #endif
2281 	case IFM_1000_LX:
2282 	case IFM_1000_SX:
2283 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2284 		break;
2285 	case IFM_1000_T:
2286 		speed |= IXGBE_LINK_SPEED_100_FULL;
2287 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2288 		break;
2289 	case IFM_10G_TWINAX:
2290 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2291 		break;
2292 	case IFM_100_TX:
2293 		speed |= IXGBE_LINK_SPEED_100_FULL;
2294 		break;
2295 	case IFM_10_T:
2296 		speed |= IXGBE_LINK_SPEED_10_FULL;
2297 		break;
2298 	default:
2299 		goto invalid;
2300 	}
2301 
2302 	hw->mac.autotry_restart = TRUE;
2303 	hw->mac.ops.setup_link(hw, speed, TRUE);
2304 	adapter->advertise =
2305 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2306 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2307 	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2308 	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2309 
2310 	return (0);
2311 
2312 invalid:
2313 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2314 
2315 	return (EINVAL);
2316 } /* ixgbe_if_media_change */
2317 
2318 /************************************************************************
2319  * ixgbe_set_promisc
2320  ************************************************************************/
2321 static int
2322 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2323 {
2324 	struct adapter *adapter = iflib_get_softc(ctx);
2325 	struct ifnet   *ifp = iflib_get_ifp(ctx);
2326 	u32            rctl;
2327 	int            mcnt = 0;
2328 
2329 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2330 	rctl &= (~IXGBE_FCTRL_UPE);
2331 	if (ifp->if_flags & IFF_ALLMULTI)
2332 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2333 	else {
2334 		mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2335 	}
2336 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2337 		rctl &= (~IXGBE_FCTRL_MPE);
2338 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2339 
2340 	if (ifp->if_flags & IFF_PROMISC) {
2341 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2342 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2343 	} else if (ifp->if_flags & IFF_ALLMULTI) {
2344 		rctl |= IXGBE_FCTRL_MPE;
2345 		rctl &= ~IXGBE_FCTRL_UPE;
2346 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2347 	}
2348 	return (0);
2349 } /* ixgbe_if_promisc_set */
2350 
2351 /************************************************************************
2352  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2353  ************************************************************************/
2354 static int
2355 ixgbe_msix_link(void *arg)
2356 {
2357 	struct adapter  *adapter = arg;
2358 	struct ixgbe_hw *hw = &adapter->hw;
2359 	u32             eicr, eicr_mask;
2360 	s32             retval;
2361 
2362 	++adapter->link_irq;
2363 
2364 	/* Pause other interrupts */
2365 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2366 
2367 	/* First get the cause */
2368 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2369 	/* Be sure the queue bits are not cleared */
2370 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2371 	/* Clear interrupt with write */
2372 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2373 
2374 	/* Link status change */
2375 	if (eicr & IXGBE_EICR_LSC) {
2376 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2377 		iflib_admin_intr_deferred(adapter->ctx);
2378 	}
2379 
2380 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2381 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2382 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2383 			/* This is probably overkill :) */
2384 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2385 				return (FILTER_HANDLED);
2386 			/* Disable the interrupt */
2387 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2388 			GROUPTASK_ENQUEUE(&adapter->fdir_task);
2389 		} else
2390 			if (eicr & IXGBE_EICR_ECC) {
2391 				device_printf(iflib_get_dev(adapter->ctx),
2392 				   "\nCRITICAL: ECC ERROR!! Please Reboot!!\n");
2393 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2394 			}
2395 
2396 		/* Check for over temp condition */
2397 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2398 			switch (adapter->hw.mac.type) {
2399 			case ixgbe_mac_X550EM_a:
2400 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2401 					break;
2402 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2403 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2404 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2405 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2406 				retval = hw->phy.ops.check_overtemp(hw);
2407 				if (retval != IXGBE_ERR_OVERTEMP)
2408 					break;
2409 				device_printf(iflib_get_dev(adapter->ctx),
2410 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2411 				device_printf(iflib_get_dev(adapter->ctx),
2412 				    "System shutdown required!\n");
2413 				break;
2414 			default:
2415 				if (!(eicr & IXGBE_EICR_TS))
2416 					break;
2417 				retval = hw->phy.ops.check_overtemp(hw);
2418 				if (retval != IXGBE_ERR_OVERTEMP)
2419 					break;
2420 				device_printf(iflib_get_dev(adapter->ctx),
2421 				    "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2422 				device_printf(iflib_get_dev(adapter->ctx),
2423 				    "System shutdown required!\n");
2424 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2425 				break;
2426 			}
2427 		}
2428 
2429 		/* Check for VF message */
2430 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2431 		    (eicr & IXGBE_EICR_MAILBOX))
2432 			GROUPTASK_ENQUEUE(&adapter->mbx_task);
2433 	}
2434 
2435 	if (ixgbe_is_sfp(hw)) {
2436 		/* Pluggable optics-related interrupt */
2437 		if (hw->mac.type >= ixgbe_mac_X540)
2438 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2439 		else
2440 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2441 
2442 		if (eicr & eicr_mask) {
2443 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2444 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2445 				GROUPTASK_ENQUEUE(&adapter->mod_task);
2446 		}
2447 
2448 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2449 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2450 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2451 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2452 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
2453 				GROUPTASK_ENQUEUE(&adapter->msf_task);
2454 		}
2455 	}
2456 
2457 	/* Check for fan failure */
2458 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2459 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2460 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2461 	}
2462 
2463 	/* External PHY interrupt */
2464 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2465 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2466 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2467 		GROUPTASK_ENQUEUE(&adapter->phy_task);
2468 	}
2469 
2470 	/* Re-enable other interrupts */
2471 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2472 
2473 	return (FILTER_HANDLED);
2474 } /* ixgbe_msix_link */
2475 
2476 /************************************************************************
2477  * ixgbe_sysctl_interrupt_rate_handler
2478  ************************************************************************/
2479 static int
2480 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2481 {
2482 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2483 	int                error;
2484 	unsigned int       reg, usec, rate;
2485 
2486 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2487 	usec = ((reg & 0x0FF8) >> 3);
2488 	if (usec > 0)
2489 		rate = 500000 / usec;
2490 	else
2491 		rate = 0;
2492 	error = sysctl_handle_int(oidp, &rate, 0, req);
2493 	if (error || !req->newptr)
2494 		return error;
2495 	reg &= ~0xfff; /* default, no limitation */
2496 	ixgbe_max_interrupt_rate = 0;
2497 	if (rate > 0 && rate < 500000) {
2498 		if (rate < 1000)
2499 			rate = 1000;
2500 		ixgbe_max_interrupt_rate = rate;
2501 		reg |= ((4000000/rate) & 0xff8);
2502 	}
2503 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2504 
2505 	return (0);
2506 } /* ixgbe_sysctl_interrupt_rate_handler */
2507 
2508 /************************************************************************
2509  * ixgbe_add_device_sysctls
2510  ************************************************************************/
2511 static void
2512 ixgbe_add_device_sysctls(if_ctx_t ctx)
2513 {
2514 	struct adapter         *adapter = iflib_get_softc(ctx);
2515 	device_t               dev = iflib_get_dev(ctx);
2516 	struct ixgbe_hw        *hw = &adapter->hw;
2517 	struct sysctl_oid_list *child;
2518 	struct sysctl_ctx_list *ctx_list;
2519 
2520 	ctx_list = device_get_sysctl_ctx(dev);
2521 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2522 
2523 	/* Sysctls for all devices */
2524 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2525 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_flowcntl, "I",
2526 	    IXGBE_SYSCTL_DESC_SET_FC);
2527 
2528 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2529 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2530 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2531 
2532 #ifdef IXGBE_DEBUG
2533 	/* testing sysctls (for all devices) */
2534 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2535 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2536 	    "I", "PCI Power State");
2537 
2538 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2539 	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2540 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2541 #endif
2542 	/* for X550 series devices */
2543 	if (hw->mac.type >= ixgbe_mac_X550)
2544 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2545 		    CTLTYPE_U16 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2546 		    "I", "DMA Coalesce");
2547 
2548 	/* for WoL-capable devices */
2549 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2550 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2551 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2552 		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2553 
2554 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2555 		    CTLTYPE_U32 | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2556 		    "I", "Enable/Disable Wake Up Filters");
2557 	}
2558 
2559 	/* for X552/X557-AT devices */
2560 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2561 		struct sysctl_oid *phy_node;
2562 		struct sysctl_oid_list *phy_list;
2563 
2564 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2565 		    CTLFLAG_RD, NULL, "External PHY sysctls");
2566 		phy_list = SYSCTL_CHILDREN(phy_node);
2567 
2568 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2569 		    CTLTYPE_U16 | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2570 		    "I", "Current External PHY Temperature (Celsius)");
2571 
2572 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2573 		    "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD, adapter, 0,
2574 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2575 		    "External PHY High Temperature Event Occurred");
2576 	}
2577 
2578 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2579 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2580 		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2581 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2582 	}
2583 } /* ixgbe_add_device_sysctls */
2584 
2585 /************************************************************************
2586  * ixgbe_allocate_pci_resources
2587  ************************************************************************/
2588 static int
2589 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2590 {
2591 	struct adapter *adapter = iflib_get_softc(ctx);
2592 	device_t        dev = iflib_get_dev(ctx);
2593 	int             rid;
2594 
2595 	rid = PCIR_BAR(0);
2596 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2597 	    RF_ACTIVE);
2598 
2599 	if (!(adapter->pci_mem)) {
2600 		device_printf(dev, "Unable to allocate bus resource: memory\n");
2601 		return (ENXIO);
2602 	}
2603 
2604 	/* Save bus_space values for READ/WRITE_REG macros */
2605 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2606 	adapter->osdep.mem_bus_space_handle =
2607 	    rman_get_bushandle(adapter->pci_mem);
2608 	/* Set hw values for shared code */
2609 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2610 
2611 	return (0);
2612 } /* ixgbe_allocate_pci_resources */
2613 
2614 /************************************************************************
2615  * ixgbe_detach - Device removal routine
2616  *
2617  *   Called when the driver is being removed.
2618  *   Stops the adapter and deallocates all the resources
2619  *   that were allocated for driver operation.
2620  *
2621  *   return 0 on success, positive on failure
2622  ************************************************************************/
2623 static int
2624 ixgbe_if_detach(if_ctx_t ctx)
2625 {
2626 	struct adapter *adapter = iflib_get_softc(ctx);
2627 	device_t       dev = iflib_get_dev(ctx);
2628 	u32            ctrl_ext;
2629 
2630 	INIT_DEBUGOUT("ixgbe_detach: begin");
2631 
2632 	if (ixgbe_pci_iov_detach(dev) != 0) {
2633 		device_printf(dev, "SR-IOV in use; detach first.\n");
2634 		return (EBUSY);
2635 	}
2636 
2637 	iflib_config_gtask_deinit(&adapter->mod_task);
2638 	iflib_config_gtask_deinit(&adapter->msf_task);
2639 	iflib_config_gtask_deinit(&adapter->phy_task);
2640 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2641 		iflib_config_gtask_deinit(&adapter->mbx_task);
2642 
2643 	ixgbe_setup_low_power_mode(ctx);
2644 
2645 	/* let hardware know driver is unloading */
2646 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2647 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2648 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2649 
2650 	ixgbe_free_pci_resources(ctx);
2651 	free(adapter->mta, M_IXGBE);
2652 
2653 	return (0);
2654 } /* ixgbe_if_detach */
2655 
2656 /************************************************************************
2657  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2658  *
2659  *   Prepare the adapter/port for LPLU and/or WoL
2660  ************************************************************************/
2661 static int
2662 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2663 {
2664 	struct adapter  *adapter = iflib_get_softc(ctx);
2665 	struct ixgbe_hw *hw = &adapter->hw;
2666 	device_t        dev = iflib_get_dev(ctx);
2667 	s32             error = 0;
2668 
2669 	if (!hw->wol_enabled)
2670 		ixgbe_set_phy_power(hw, FALSE);
2671 
2672 	/* Limit power management flow to X550EM baseT */
2673 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2674 	    hw->phy.ops.enter_lplu) {
2675 		/* Turn off support for APM wakeup. (Using ACPI instead) */
2676 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2677 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2678 
2679 		/*
2680 		 * Clear Wake Up Status register to prevent any previous wakeup
2681 		 * events from waking us up immediately after we suspend.
2682 		 */
2683 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2684 
2685 		/*
2686 		 * Program the Wakeup Filter Control register with user filter
2687 		 * settings
2688 		 */
2689 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2690 
2691 		/* Enable wakeups and power management in Wakeup Control */
2692 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2693 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2694 
2695 		/* X550EM baseT adapters need a special LPLU flow */
2696 		hw->phy.reset_disable = TRUE;
2697 		ixgbe_if_stop(ctx);
2698 		error = hw->phy.ops.enter_lplu(hw);
2699 		if (error)
2700 			device_printf(dev, "Error entering LPLU: %d\n", error);
2701 		hw->phy.reset_disable = FALSE;
2702 	} else {
2703 		/* Just stop for other adapters */
2704 		ixgbe_if_stop(ctx);
2705 	}
2706 
2707 	return error;
2708 } /* ixgbe_setup_low_power_mode */
2709 
2710 /************************************************************************
2711  * ixgbe_shutdown - Shutdown entry point
2712  ************************************************************************/
2713 static int
2714 ixgbe_if_shutdown(if_ctx_t ctx)
2715 {
2716 	int error = 0;
2717 
2718 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2719 
2720 	error = ixgbe_setup_low_power_mode(ctx);
2721 
2722 	return (error);
2723 } /* ixgbe_if_shutdown */
2724 
2725 /************************************************************************
2726  * ixgbe_suspend
2727  *
2728  *   From D0 to D3
2729  ************************************************************************/
2730 static int
2731 ixgbe_if_suspend(if_ctx_t ctx)
2732 {
2733 	int error = 0;
2734 
2735 	INIT_DEBUGOUT("ixgbe_suspend: begin");
2736 
2737 	error = ixgbe_setup_low_power_mode(ctx);
2738 
2739 	return (error);
2740 } /* ixgbe_if_suspend */
2741 
2742 /************************************************************************
2743  * ixgbe_resume
2744  *
2745  *   From D3 to D0
2746  ************************************************************************/
2747 static int
2748 ixgbe_if_resume(if_ctx_t ctx)
2749 {
2750 	struct adapter  *adapter = iflib_get_softc(ctx);
2751 	device_t        dev = iflib_get_dev(ctx);
2752 	struct ifnet    *ifp = iflib_get_ifp(ctx);
2753 	struct ixgbe_hw *hw = &adapter->hw;
2754 	u32             wus;
2755 
2756 	INIT_DEBUGOUT("ixgbe_resume: begin");
2757 
2758 	/* Read & clear WUS register */
2759 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2760 	if (wus)
2761 		device_printf(dev, "Woken up by (WUS): %#010x\n",
2762 		    IXGBE_READ_REG(hw, IXGBE_WUS));
2763 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2764 	/* And clear WUFC until next low-power transition */
2765 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2766 
2767 	/*
2768 	 * Required after D3->D0 transition;
2769 	 * will re-advertise all previous advertised speeds
2770 	 */
2771 	if (ifp->if_flags & IFF_UP)
2772 		ixgbe_if_init(ctx);
2773 
2774 	return (0);
2775 } /* ixgbe_if_resume */
2776 
2777 /************************************************************************
2778  * ixgbe_if_mtu_set - Ioctl mtu entry point
2779  *
2780  *   Return 0 on success, EINVAL on failure
2781  ************************************************************************/
2782 static int
2783 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2784 {
2785 	struct adapter *adapter = iflib_get_softc(ctx);
2786 	int error = 0;
2787 
2788 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
2789 
2790 	if (mtu > IXGBE_MAX_MTU) {
2791 		error = EINVAL;
2792 	} else {
2793 		adapter->max_frame_size = mtu + IXGBE_MTU_HDR;
2794 	}
2795 
2796 	return error;
2797 } /* ixgbe_if_mtu_set */
2798 
2799 /************************************************************************
2800  * ixgbe_if_crcstrip_set
2801  ************************************************************************/
2802 static void
2803 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
2804 {
2805 	struct adapter *sc = iflib_get_softc(ctx);
2806 	struct ixgbe_hw *hw = &sc->hw;
2807 	/* crc stripping is set in two places:
2808 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
2809 	 * IXGBE_RDRXCTL (set by the original driver in
2810 	 *	ixgbe_setup_hw_rsc() called in init_locked.
2811 	 *	We disable the setting when netmap is compiled in).
2812 	 * We update the values here, but also in ixgbe.c because
2813 	 * init_locked sometimes is called outside our control.
2814 	 */
2815 	uint32_t hl, rxc;
2816 
2817 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2818 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2819 #ifdef NETMAP
2820 	if (netmap_verbose)
2821 		D("%s read  HLREG 0x%x rxc 0x%x",
2822 			onoff ? "enter" : "exit", hl, rxc);
2823 #endif
2824 	/* hw requirements ... */
2825 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2826 	rxc |= IXGBE_RDRXCTL_RSCACKC;
2827 	if (onoff && !crcstrip) {
2828 		/* keep the crc. Fast rx */
2829 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
2830 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
2831 	} else {
2832 		/* reset default mode */
2833 		hl |= IXGBE_HLREG0_RXCRCSTRP;
2834 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
2835 	}
2836 #ifdef NETMAP
2837 	if (netmap_verbose)
2838 		D("%s write HLREG 0x%x rxc 0x%x",
2839 			onoff ? "enter" : "exit", hl, rxc);
2840 #endif
2841 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
2842 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
2843 } /* ixgbe_if_crcstrip_set */
2844 
2845 /*********************************************************************
2846  * ixgbe_if_init - Init entry point
2847  *
2848  *   Used in two ways: It is used by the stack as an init
2849  *   entry point in network interface structure. It is also
2850  *   used by the driver as a hw/sw initialization routine to
2851  *   get to a consistent state.
2852  *
2853  *   Return 0 on success, positive on failure
2854  **********************************************************************/
2855 void
2856 ixgbe_if_init(if_ctx_t ctx)
2857 {
2858 	struct adapter     *adapter = iflib_get_softc(ctx);
2859 	struct ifnet       *ifp = iflib_get_ifp(ctx);
2860 	device_t           dev = iflib_get_dev(ctx);
2861 	struct ixgbe_hw *hw = &adapter->hw;
2862 	struct ix_rx_queue *rx_que;
2863 	struct ix_tx_queue *tx_que;
2864 	u32             txdctl, mhadd;
2865 	u32             rxdctl, rxctrl;
2866 	u32             ctrl_ext;
2867 
2868 	int             i, j, err;
2869 
2870 	INIT_DEBUGOUT("ixgbe_if_init: begin");
2871 
2872 	/* Queue indices may change with IOV mode */
2873 	ixgbe_align_all_queue_indices(adapter);
2874 
2875 	/* reprogram the RAR[0] in case user changed it. */
2876 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2877 
2878 	/* Get the latest mac address, User can use a LAA */
2879 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2880 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2881 	hw->addr_ctrl.rar_used_count = 1;
2882 
2883 	ixgbe_init_hw(hw);
2884 
2885 	ixgbe_initialize_iov(adapter);
2886 
2887 	ixgbe_initialize_transmit_units(ctx);
2888 
2889 	/* Setup Multicast table */
2890 	ixgbe_if_multi_set(ctx);
2891 
2892 	/* Determine the correct mbuf pool, based on frame size */
2893 	if (adapter->max_frame_size <= MCLBYTES)
2894 		adapter->rx_mbuf_sz = MCLBYTES;
2895 	else
2896 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2897 
2898 	/* Configure RX settings */
2899 	ixgbe_initialize_receive_units(ctx);
2900 
2901 	/* Enable SDP & MSI-X interrupts based on adapter */
2902 	ixgbe_config_gpie(adapter);
2903 
2904 	/* Set MTU size */
2905 	if (ifp->if_mtu > ETHERMTU) {
2906 		/* aka IXGBE_MAXFRS on 82599 and newer */
2907 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2908 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2909 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2910 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2911 	}
2912 
2913 	/* Now enable all the queues */
2914 	for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) {
2915 		struct tx_ring *txr = &tx_que->txr;
2916 
2917 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2918 		txdctl |= IXGBE_TXDCTL_ENABLE;
2919 		/* Set WTHRESH to 8, burst writeback */
2920 		txdctl |= (8 << 16);
2921 		/*
2922 		 * When the internal queue falls below PTHRESH (32),
2923 		 * start prefetching as long as there are at least
2924 		 * HTHRESH (1) buffers ready. The values are taken
2925 		 * from the Intel linux driver 3.8.21.
2926 		 * Prefetching enables tx line rate even with 1 queue.
2927 		 */
2928 		txdctl |= (32 << 0) | (1 << 8);
2929 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2930 	}
2931 
2932 	for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) {
2933 		struct rx_ring *rxr = &rx_que->rxr;
2934 
2935 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2936 		if (hw->mac.type == ixgbe_mac_82598EB) {
2937 			/*
2938 			 * PTHRESH = 21
2939 			 * HTHRESH = 4
2940 			 * WTHRESH = 8
2941 			 */
2942 			rxdctl &= ~0x3FFFFF;
2943 			rxdctl |= 0x080420;
2944 		}
2945 		rxdctl |= IXGBE_RXDCTL_ENABLE;
2946 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2947 		for (j = 0; j < 10; j++) {
2948 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2949 			    IXGBE_RXDCTL_ENABLE)
2950 				break;
2951 			else
2952 				msec_delay(1);
2953 		}
2954 		wmb();
2955 	}
2956 
2957 	/* Enable Receive engine */
2958 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2959 	if (hw->mac.type == ixgbe_mac_82598EB)
2960 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
2961 	rxctrl |= IXGBE_RXCTRL_RXEN;
2962 	ixgbe_enable_rx_dma(hw, rxctrl);
2963 
2964 	/* Set up MSI/MSI-X routing */
2965 	if (ixgbe_enable_msix)  {
2966 		ixgbe_configure_ivars(adapter);
2967 		/* Set up auto-mask */
2968 		if (hw->mac.type == ixgbe_mac_82598EB)
2969 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2970 		else {
2971 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2972 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2973 		}
2974 	} else {  /* Simple settings for Legacy/MSI */
2975 		ixgbe_set_ivar(adapter, 0, 0, 0);
2976 		ixgbe_set_ivar(adapter, 0, 0, 1);
2977 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2978 	}
2979 
2980 	ixgbe_init_fdir(adapter);
2981 
2982 	/*
2983 	 * Check on any SFP devices that
2984 	 * need to be kick-started
2985 	 */
2986 	if (hw->phy.type == ixgbe_phy_none) {
2987 		err = hw->phy.ops.identify(hw);
2988 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2989 			device_printf(dev,
2990 			    "Unsupported SFP+ module type was detected.\n");
2991 			return;
2992 		}
2993 	}
2994 
2995 	/* Set moderation on the Link interrupt */
2996 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
2997 
2998 	/* Enable power to the phy. */
2999 	ixgbe_set_phy_power(hw, TRUE);
3000 
3001 	/* Config/Enable Link */
3002 	ixgbe_config_link(adapter);
3003 
3004 	/* Hardware Packet Buffer & Flow Control setup */
3005 	ixgbe_config_delay_values(adapter);
3006 
3007 	/* Initialize the FC settings */
3008 	ixgbe_start_hw(hw);
3009 
3010 	/* Set up VLAN support and filter */
3011 	ixgbe_setup_vlan_hw_support(ctx);
3012 
3013 	/* Setup DMA Coalescing */
3014 	ixgbe_config_dmac(adapter);
3015 
3016 	/* And now turn on interrupts */
3017 	ixgbe_if_enable_intr(ctx);
3018 
3019 	/* Enable the use of the MBX by the VF's */
3020 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3021 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3022 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3023 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3024 	}
3025 
3026 } /* ixgbe_init_locked */
3027 
3028 /************************************************************************
3029  * ixgbe_set_ivar
3030  *
3031  *   Setup the correct IVAR register for a particular MSI-X interrupt
3032  *     (yes this is all very magic and confusing :)
3033  *    - entry is the register array entry
3034  *    - vector is the MSI-X vector for this queue
3035  *    - type is RX/TX/MISC
3036  ************************************************************************/
3037 static void
3038 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3039 {
3040 	struct ixgbe_hw *hw = &adapter->hw;
3041 	u32 ivar, index;
3042 
3043 	vector |= IXGBE_IVAR_ALLOC_VAL;
3044 
3045 	switch (hw->mac.type) {
3046 	case ixgbe_mac_82598EB:
3047 		if (type == -1)
3048 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3049 		else
3050 			entry += (type * 64);
3051 		index = (entry >> 2) & 0x1F;
3052 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3053 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3054 		ivar |= (vector << (8 * (entry & 0x3)));
3055 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3056 		break;
3057 	case ixgbe_mac_82599EB:
3058 	case ixgbe_mac_X540:
3059 	case ixgbe_mac_X550:
3060 	case ixgbe_mac_X550EM_x:
3061 	case ixgbe_mac_X550EM_a:
3062 		if (type == -1) { /* MISC IVAR */
3063 			index = (entry & 1) * 8;
3064 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3065 			ivar &= ~(0xFF << index);
3066 			ivar |= (vector << index);
3067 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3068 		} else {          /* RX/TX IVARS */
3069 			index = (16 * (entry & 1)) + (8 * type);
3070 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3071 			ivar &= ~(0xFF << index);
3072 			ivar |= (vector << index);
3073 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3074 		}
3075 	default:
3076 		break;
3077 	}
3078 } /* ixgbe_set_ivar */
3079 
3080 /************************************************************************
3081  * ixgbe_configure_ivars
3082  ************************************************************************/
3083 static void
3084 ixgbe_configure_ivars(struct adapter *adapter)
3085 {
3086 	struct ix_rx_queue *rx_que = adapter->rx_queues;
3087 	struct ix_tx_queue *tx_que = adapter->tx_queues;
3088 	u32                newitr;
3089 
3090 	if (ixgbe_max_interrupt_rate > 0)
3091 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3092 	else {
3093 		/*
3094 		 * Disable DMA coalescing if interrupt moderation is
3095 		 * disabled.
3096 		 */
3097 		adapter->dmac = 0;
3098 		newitr = 0;
3099 	}
3100 
3101 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
3102 		struct rx_ring *rxr = &rx_que->rxr;
3103 
3104 		/* First the RX queue entry */
3105 		ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0);
3106 
3107 		/* Set an Initial EITR value */
3108 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr);
3109 	}
3110 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
3111 		struct tx_ring *txr = &tx_que->txr;
3112 
3113 		/* ... and the TX */
3114 		ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1);
3115 	}
3116 	/* For the Link interrupt */
3117 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3118 } /* ixgbe_configure_ivars */
3119 
3120 /************************************************************************
3121  * ixgbe_config_gpie
3122  ************************************************************************/
3123 static void
3124 ixgbe_config_gpie(struct adapter *adapter)
3125 {
3126 	struct ixgbe_hw *hw = &adapter->hw;
3127 	u32             gpie;
3128 
3129 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3130 
3131 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3132 		/* Enable Enhanced MSI-X mode */
3133 		gpie |= IXGBE_GPIE_MSIX_MODE
3134 		     |  IXGBE_GPIE_EIAME
3135 		     |  IXGBE_GPIE_PBA_SUPPORT
3136 		     |  IXGBE_GPIE_OCD;
3137 	}
3138 
3139 	/* Fan Failure Interrupt */
3140 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3141 		gpie |= IXGBE_SDP1_GPIEN;
3142 
3143 	/* Thermal Sensor Interrupt */
3144 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3145 		gpie |= IXGBE_SDP0_GPIEN_X540;
3146 
3147 	/* Link detection */
3148 	switch (hw->mac.type) {
3149 	case ixgbe_mac_82599EB:
3150 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3151 		break;
3152 	case ixgbe_mac_X550EM_x:
3153 	case ixgbe_mac_X550EM_a:
3154 		gpie |= IXGBE_SDP0_GPIEN_X540;
3155 		break;
3156 	default:
3157 		break;
3158 	}
3159 
3160 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3161 
3162 } /* ixgbe_config_gpie */
3163 
3164 /************************************************************************
3165  * ixgbe_config_delay_values
3166  *
3167  *   Requires adapter->max_frame_size to be set.
3168  ************************************************************************/
3169 static void
3170 ixgbe_config_delay_values(struct adapter *adapter)
3171 {
3172 	struct ixgbe_hw *hw = &adapter->hw;
3173 	u32             rxpb, frame, size, tmp;
3174 
3175 	frame = adapter->max_frame_size;
3176 
3177 	/* Calculate High Water */
3178 	switch (hw->mac.type) {
3179 	case ixgbe_mac_X540:
3180 	case ixgbe_mac_X550:
3181 	case ixgbe_mac_X550EM_x:
3182 	case ixgbe_mac_X550EM_a:
3183 		tmp = IXGBE_DV_X540(frame, frame);
3184 		break;
3185 	default:
3186 		tmp = IXGBE_DV(frame, frame);
3187 		break;
3188 	}
3189 	size = IXGBE_BT2KB(tmp);
3190 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3191 	hw->fc.high_water[0] = rxpb - size;
3192 
3193 	/* Now calculate Low Water */
3194 	switch (hw->mac.type) {
3195 	case ixgbe_mac_X540:
3196 	case ixgbe_mac_X550:
3197 	case ixgbe_mac_X550EM_x:
3198 	case ixgbe_mac_X550EM_a:
3199 		tmp = IXGBE_LOW_DV_X540(frame);
3200 		break;
3201 	default:
3202 		tmp = IXGBE_LOW_DV(frame);
3203 		break;
3204 	}
3205 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3206 
3207 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3208 	hw->fc.send_xon = TRUE;
3209 } /* ixgbe_config_delay_values */
3210 
3211 /************************************************************************
3212  * ixgbe_set_multi - Multicast Update
3213  *
3214  *   Called whenever multicast address list is updated.
3215  ************************************************************************/
3216 static int
3217 ixgbe_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count)
3218 {
3219 	struct adapter *adapter = arg;
3220 	struct ixgbe_mc_addr *mta = adapter->mta;
3221 
3222 	if (ifma->ifma_addr->sa_family != AF_LINK)
3223 		return (0);
3224 	if (count == MAX_NUM_MULTICAST_ADDRESSES)
3225 		return (0);
3226 	bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3227 	    mta[count].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3228 	mta[count].vmdq = adapter->pool;
3229 
3230 	return (1);
3231 } /* ixgbe_mc_filter_apply */
3232 
3233 static void
3234 ixgbe_if_multi_set(if_ctx_t ctx)
3235 {
3236 	struct adapter       *adapter = iflib_get_softc(ctx);
3237 	struct ixgbe_mc_addr *mta;
3238 	struct ifnet         *ifp = iflib_get_ifp(ctx);
3239 	u8                   *update_ptr;
3240 	int                  mcnt = 0;
3241 	u32                  fctrl;
3242 
3243 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3244 
3245 	mta = adapter->mta;
3246 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3247 
3248 	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter);
3249 
3250 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3251 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3252 	if (ifp->if_flags & IFF_PROMISC)
3253 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3254 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3255 	    ifp->if_flags & IFF_ALLMULTI) {
3256 		fctrl |= IXGBE_FCTRL_MPE;
3257 		fctrl &= ~IXGBE_FCTRL_UPE;
3258 	} else
3259 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3260 
3261 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3262 
3263 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3264 		update_ptr = (u8 *)mta;
3265 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3266 		    ixgbe_mc_array_itr, TRUE);
3267 	}
3268 
3269 } /* ixgbe_if_multi_set */
3270 
3271 /************************************************************************
3272  * ixgbe_mc_array_itr
3273  *
3274  *   An iterator function needed by the multicast shared code.
3275  *   It feeds the shared code routine the addresses in the
3276  *   array of ixgbe_set_multi() one by one.
3277  ************************************************************************/
3278 static u8 *
3279 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3280 {
3281 	struct ixgbe_mc_addr *mta;
3282 
3283 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3284 	*vmdq = mta->vmdq;
3285 
3286 	*update_ptr = (u8*)(mta + 1);
3287 
3288 	return (mta->addr);
3289 } /* ixgbe_mc_array_itr */
3290 
3291 /************************************************************************
3292  * ixgbe_local_timer - Timer routine
3293  *
3294  *   Checks for link status, updates statistics,
3295  *   and runs the watchdog check.
3296  ************************************************************************/
3297 static void
3298 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3299 {
3300 	struct adapter *adapter = iflib_get_softc(ctx);
3301 
3302 	if (qid != 0)
3303 		return;
3304 
3305 	/* Check for pluggable optics */
3306 	if (adapter->sfp_probe)
3307 		if (!ixgbe_sfp_probe(ctx))
3308 			return; /* Nothing to do */
3309 
3310 	ixgbe_check_link(&adapter->hw, &adapter->link_speed,
3311 	    &adapter->link_up, 0);
3312 
3313 	/* Fire off the adminq task */
3314 	iflib_admin_intr_deferred(ctx);
3315 
3316 } /* ixgbe_if_timer */
3317 
3318 /************************************************************************
3319  * ixgbe_sfp_probe
3320  *
3321  *   Determine if a port had optics inserted.
3322  ************************************************************************/
3323 static bool
3324 ixgbe_sfp_probe(if_ctx_t ctx)
3325 {
3326 	struct adapter  *adapter = iflib_get_softc(ctx);
3327 	struct ixgbe_hw *hw = &adapter->hw;
3328 	device_t        dev = iflib_get_dev(ctx);
3329 	bool            result = FALSE;
3330 
3331 	if ((hw->phy.type == ixgbe_phy_nl) &&
3332 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3333 		s32 ret = hw->phy.ops.identify_sfp(hw);
3334 		if (ret)
3335 			goto out;
3336 		ret = hw->phy.ops.reset(hw);
3337 		adapter->sfp_probe = FALSE;
3338 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3339 			device_printf(dev, "Unsupported SFP+ module detected!");
3340 			device_printf(dev,
3341 			    "Reload driver with supported module.\n");
3342 			goto out;
3343 		} else
3344 			device_printf(dev, "SFP+ module detected!\n");
3345 		/* We now have supported optics */
3346 		result = TRUE;
3347 	}
3348 out:
3349 
3350 	return (result);
3351 } /* ixgbe_sfp_probe */
3352 
3353 /************************************************************************
3354  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3355  ************************************************************************/
3356 static void
3357 ixgbe_handle_mod(void *context)
3358 {
3359 	if_ctx_t        ctx = context;
3360 	struct adapter  *adapter = iflib_get_softc(ctx);
3361 	struct ixgbe_hw *hw = &adapter->hw;
3362 	device_t        dev = iflib_get_dev(ctx);
3363 	u32             err, cage_full = 0;
3364 
3365 	adapter->sfp_reinit = 1;
3366 	if (adapter->hw.need_crosstalk_fix) {
3367 		switch (hw->mac.type) {
3368 		case ixgbe_mac_82599EB:
3369 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3370 			    IXGBE_ESDP_SDP2;
3371 			break;
3372 		case ixgbe_mac_X550EM_x:
3373 		case ixgbe_mac_X550EM_a:
3374 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3375 			    IXGBE_ESDP_SDP0;
3376 			break;
3377 		default:
3378 			break;
3379 		}
3380 
3381 		if (!cage_full)
3382 			goto handle_mod_out;
3383 	}
3384 
3385 	err = hw->phy.ops.identify_sfp(hw);
3386 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3387 		device_printf(dev,
3388 		    "Unsupported SFP+ module type was detected.\n");
3389 		goto handle_mod_out;
3390 	}
3391 
3392 	if (hw->mac.type == ixgbe_mac_82598EB)
3393 		err = hw->phy.ops.reset(hw);
3394 	else
3395 		err = hw->mac.ops.setup_sfp(hw);
3396 
3397 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3398 		device_printf(dev,
3399 		    "Setup failure - unsupported SFP+ module type.\n");
3400 		goto handle_mod_out;
3401 	}
3402 	GROUPTASK_ENQUEUE(&adapter->msf_task);
3403 	return;
3404 
3405 handle_mod_out:
3406 	adapter->sfp_reinit = 0;
3407 } /* ixgbe_handle_mod */
3408 
3409 
3410 /************************************************************************
3411  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3412  ************************************************************************/
3413 static void
3414 ixgbe_handle_msf(void *context)
3415 {
3416 	if_ctx_t        ctx = context;
3417 	struct adapter  *adapter = iflib_get_softc(ctx);
3418 	struct ixgbe_hw *hw = &adapter->hw;
3419 	u32             autoneg;
3420 	bool            negotiate;
3421 
3422 	if (adapter->sfp_reinit != 1)
3423 		return;
3424 
3425 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3426 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3427 
3428 	autoneg = hw->phy.autoneg_advertised;
3429 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3430 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3431 	if (hw->mac.ops.setup_link)
3432 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3433 
3434 	/* Adjust media types shown in ifconfig */
3435 	ifmedia_removeall(adapter->media);
3436 	ixgbe_add_media_types(adapter->ctx);
3437 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3438 
3439 	adapter->sfp_reinit = 0;
3440 } /* ixgbe_handle_msf */
3441 
3442 /************************************************************************
3443  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3444  ************************************************************************/
3445 static void
3446 ixgbe_handle_phy(void *context)
3447 {
3448 	if_ctx_t        ctx = context;
3449 	struct adapter  *adapter = iflib_get_softc(ctx);
3450 	struct ixgbe_hw *hw = &adapter->hw;
3451 	int             error;
3452 
3453 	error = hw->phy.ops.handle_lasi(hw);
3454 	if (error == IXGBE_ERR_OVERTEMP)
3455 		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3456 	else if (error)
3457 		device_printf(adapter->dev,
3458 		    "Error handling LASI interrupt: %d\n", error);
3459 } /* ixgbe_handle_phy */
3460 
3461 /************************************************************************
3462  * ixgbe_if_stop - Stop the hardware
3463  *
3464  *   Disables all traffic on the adapter by issuing a
3465  *   global reset on the MAC and deallocates TX/RX buffers.
3466  ************************************************************************/
3467 static void
3468 ixgbe_if_stop(if_ctx_t ctx)
3469 {
3470 	struct adapter  *adapter = iflib_get_softc(ctx);
3471 	struct ixgbe_hw *hw = &adapter->hw;
3472 
3473 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3474 
3475 	ixgbe_reset_hw(hw);
3476 	hw->adapter_stopped = FALSE;
3477 	ixgbe_stop_adapter(hw);
3478 	if (hw->mac.type == ixgbe_mac_82599EB)
3479 		ixgbe_stop_mac_link_on_d3_82599(hw);
3480 	/* Turn off the laser - noop with no optics */
3481 	ixgbe_disable_tx_laser(hw);
3482 
3483 	/* Update the stack */
3484 	adapter->link_up = FALSE;
3485 	ixgbe_if_update_admin_status(ctx);
3486 
3487 	/* reprogram the RAR[0] in case user changed it. */
3488 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3489 
3490 	return;
3491 } /* ixgbe_if_stop */
3492 
3493 /************************************************************************
3494  * ixgbe_update_link_status - Update OS on link state
3495  *
3496  * Note: Only updates the OS on the cached link state.
3497  *       The real check of the hardware only happens with
3498  *       a link interrupt.
3499  ************************************************************************/
3500 static void
3501 ixgbe_if_update_admin_status(if_ctx_t ctx)
3502 {
3503 	struct adapter *adapter = iflib_get_softc(ctx);
3504 	device_t       dev = iflib_get_dev(ctx);
3505 
3506 	if (adapter->link_up) {
3507 		if (adapter->link_active == FALSE) {
3508 			if (bootverbose)
3509 				device_printf(dev, "Link is up %d Gbps %s \n",
3510 				    ((adapter->link_speed == 128) ? 10 : 1),
3511 				    "Full Duplex");
3512 			adapter->link_active = TRUE;
3513 			/* Update any Flow Control changes */
3514 			ixgbe_fc_enable(&adapter->hw);
3515 			/* Update DMA coalescing config */
3516 			ixgbe_config_dmac(adapter);
3517 			/* should actually be negotiated value */
3518 			iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10));
3519 
3520 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3521 				ixgbe_ping_all_vfs(adapter);
3522 		}
3523 	} else { /* Link down */
3524 		if (adapter->link_active == TRUE) {
3525 			if (bootverbose)
3526 				device_printf(dev, "Link is Down\n");
3527 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3528 			adapter->link_active = FALSE;
3529 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3530 				ixgbe_ping_all_vfs(adapter);
3531 		}
3532 	}
3533 
3534 	ixgbe_update_stats_counters(adapter);
3535 
3536 	/* Re-enable link interrupts */
3537        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3538 } /* ixgbe_if_update_admin_status */
3539 
3540 /************************************************************************
3541  * ixgbe_config_dmac - Configure DMA Coalescing
3542  ************************************************************************/
3543 static void
3544 ixgbe_config_dmac(struct adapter *adapter)
3545 {
3546 	struct ixgbe_hw          *hw = &adapter->hw;
3547 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3548 
3549 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3550 		return;
3551 
3552 	if (dcfg->watchdog_timer ^ adapter->dmac ||
3553 	    dcfg->link_speed ^ adapter->link_speed) {
3554 		dcfg->watchdog_timer = adapter->dmac;
3555 		dcfg->fcoe_en = FALSE;
3556 		dcfg->link_speed = adapter->link_speed;
3557 		dcfg->num_tcs = 1;
3558 
3559 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3560 		    dcfg->watchdog_timer, dcfg->link_speed);
3561 
3562 		hw->mac.ops.dmac_config(hw);
3563 	}
3564 } /* ixgbe_config_dmac */
3565 
3566 /************************************************************************
3567  * ixgbe_if_enable_intr
3568  ************************************************************************/
3569 void
3570 ixgbe_if_enable_intr(if_ctx_t ctx)
3571 {
3572 	struct adapter     *adapter = iflib_get_softc(ctx);
3573 	struct ixgbe_hw    *hw = &adapter->hw;
3574 	struct ix_rx_queue *que = adapter->rx_queues;
3575 	u32                mask, fwsm;
3576 
3577 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3578 
3579 	switch (adapter->hw.mac.type) {
3580 	case ixgbe_mac_82599EB:
3581 		mask |= IXGBE_EIMS_ECC;
3582 		/* Temperature sensor on some adapters */
3583 		mask |= IXGBE_EIMS_GPI_SDP0;
3584 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3585 		mask |= IXGBE_EIMS_GPI_SDP1;
3586 		mask |= IXGBE_EIMS_GPI_SDP2;
3587 		break;
3588 	case ixgbe_mac_X540:
3589 		/* Detect if Thermal Sensor is enabled */
3590 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3591 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3592 			mask |= IXGBE_EIMS_TS;
3593 		mask |= IXGBE_EIMS_ECC;
3594 		break;
3595 	case ixgbe_mac_X550:
3596 		/* MAC thermal sensor is automatically enabled */
3597 		mask |= IXGBE_EIMS_TS;
3598 		mask |= IXGBE_EIMS_ECC;
3599 		break;
3600 	case ixgbe_mac_X550EM_x:
3601 	case ixgbe_mac_X550EM_a:
3602 		/* Some devices use SDP0 for important information */
3603 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3604 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3605 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3606 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3607 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3608 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3609 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3610 		mask |= IXGBE_EIMS_ECC;
3611 		break;
3612 	default:
3613 		break;
3614 	}
3615 
3616 	/* Enable Fan Failure detection */
3617 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3618 		mask |= IXGBE_EIMS_GPI_SDP1;
3619 	/* Enable SR-IOV */
3620 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3621 		mask |= IXGBE_EIMS_MAILBOX;
3622 	/* Enable Flow Director */
3623 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3624 		mask |= IXGBE_EIMS_FLOW_DIR;
3625 
3626 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3627 
3628 	/* With MSI-X we use auto clear */
3629 	if (adapter->intr_type == IFLIB_INTR_MSIX) {
3630 		mask = IXGBE_EIMS_ENABLE_MASK;
3631 		/* Don't autoclear Link */
3632 		mask &= ~IXGBE_EIMS_OTHER;
3633 		mask &= ~IXGBE_EIMS_LSC;
3634 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3635 			mask &= ~IXGBE_EIMS_MAILBOX;
3636 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3637 	}
3638 
3639 	/*
3640 	 * Now enable all queues, this is done separately to
3641 	 * allow for handling the extended (beyond 32) MSI-X
3642 	 * vectors that can be used by 82599
3643 	 */
3644 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
3645 		ixgbe_enable_queue(adapter, que->msix);
3646 
3647 	IXGBE_WRITE_FLUSH(hw);
3648 
3649 } /* ixgbe_if_enable_intr */
3650 
3651 /************************************************************************
3652  * ixgbe_disable_intr
3653  ************************************************************************/
3654 static void
3655 ixgbe_if_disable_intr(if_ctx_t ctx)
3656 {
3657 	struct adapter *adapter = iflib_get_softc(ctx);
3658 
3659 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3660 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3661 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3662 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3663 	} else {
3664 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3665 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3666 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3667 	}
3668 	IXGBE_WRITE_FLUSH(&adapter->hw);
3669 
3670 } /* ixgbe_if_disable_intr */
3671 
3672 /************************************************************************
3673  * ixgbe_if_rx_queue_intr_enable
3674  ************************************************************************/
3675 static int
3676 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3677 {
3678 	struct adapter     *adapter = iflib_get_softc(ctx);
3679 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
3680 
3681 	ixgbe_enable_queue(adapter, que->rxr.me);
3682 
3683 	return (0);
3684 } /* ixgbe_if_rx_queue_intr_enable */
3685 
3686 /************************************************************************
3687  * ixgbe_enable_queue
3688  ************************************************************************/
3689 static void
3690 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
3691 {
3692 	struct ixgbe_hw *hw = &adapter->hw;
3693 	u64             queue = (u64)(1 << vector);
3694 	u32             mask;
3695 
3696 	if (hw->mac.type == ixgbe_mac_82598EB) {
3697 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3698 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3699 	} else {
3700 		mask = (queue & 0xFFFFFFFF);
3701 		if (mask)
3702 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3703 		mask = (queue >> 32);
3704 		if (mask)
3705 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3706 	}
3707 } /* ixgbe_enable_queue */
3708 
3709 /************************************************************************
3710  * ixgbe_disable_queue
3711  ************************************************************************/
3712 static void
3713 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
3714 {
3715 	struct ixgbe_hw *hw = &adapter->hw;
3716 	u64             queue = (u64)(1 << vector);
3717 	u32             mask;
3718 
3719 	if (hw->mac.type == ixgbe_mac_82598EB) {
3720 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3721 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3722 	} else {
3723 		mask = (queue & 0xFFFFFFFF);
3724 		if (mask)
3725 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3726 		mask = (queue >> 32);
3727 		if (mask)
3728 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3729 	}
3730 } /* ixgbe_disable_queue */
3731 
3732 /************************************************************************
3733  * ixgbe_intr - Legacy Interrupt Service Routine
3734  ************************************************************************/
3735 int
3736 ixgbe_intr(void *arg)
3737 {
3738 	struct adapter     *adapter = arg;
3739 	struct ix_rx_queue *que = adapter->rx_queues;
3740 	struct ixgbe_hw    *hw = &adapter->hw;
3741 	if_ctx_t           ctx = adapter->ctx;
3742 	u32                eicr, eicr_mask;
3743 
3744 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3745 
3746 	++que->irqs;
3747 	if (eicr == 0) {
3748 		ixgbe_if_enable_intr(ctx);
3749 		return (FILTER_HANDLED);
3750 	}
3751 
3752 	/* Check for fan failure */
3753 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
3754 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3755 		device_printf(adapter->dev,
3756 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
3757 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3758 	}
3759 
3760 	/* Link status change */
3761 	if (eicr & IXGBE_EICR_LSC) {
3762 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3763 		iflib_admin_intr_deferred(ctx);
3764 	}
3765 
3766 	if (ixgbe_is_sfp(hw)) {
3767 		/* Pluggable optics-related interrupt */
3768 		if (hw->mac.type >= ixgbe_mac_X540)
3769 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3770 		else
3771 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3772 
3773 		if (eicr & eicr_mask) {
3774 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3775 			GROUPTASK_ENQUEUE(&adapter->mod_task);
3776 		}
3777 
3778 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3779 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3780 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3781 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3782 			if (atomic_cmpset_acq_int(&adapter->sfp_reinit, 0, 1))
3783 				GROUPTASK_ENQUEUE(&adapter->msf_task);
3784 		}
3785 	}
3786 
3787 	/* External PHY interrupt */
3788 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3789 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3790 		GROUPTASK_ENQUEUE(&adapter->phy_task);
3791 
3792 	return (FILTER_SCHEDULE_THREAD);
3793 } /* ixgbe_intr */
3794 
3795 /************************************************************************
3796  * ixgbe_free_pci_resources
3797  ************************************************************************/
3798 static void
3799 ixgbe_free_pci_resources(if_ctx_t ctx)
3800 {
3801 	struct adapter *adapter = iflib_get_softc(ctx);
3802 	struct         ix_rx_queue *que = adapter->rx_queues;
3803 	device_t       dev = iflib_get_dev(ctx);
3804 
3805 	/* Release all msix queue resources */
3806 	if (adapter->intr_type == IFLIB_INTR_MSIX)
3807 		iflib_irq_free(ctx, &adapter->irq);
3808 
3809 	if (que != NULL) {
3810 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
3811 			iflib_irq_free(ctx, &que->que_irq);
3812 		}
3813 	}
3814 
3815 	/*
3816 	 * Free link/admin interrupt
3817 	 */
3818 	if (adapter->pci_mem != NULL)
3819 		bus_release_resource(dev, SYS_RES_MEMORY,
3820 		                     PCIR_BAR(0), adapter->pci_mem);
3821 
3822 } /* ixgbe_free_pci_resources */
3823 
3824 /************************************************************************
3825  * ixgbe_sysctl_flowcntl
3826  *
3827  *   SYSCTL wrapper around setting Flow Control
3828  ************************************************************************/
3829 static int
3830 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3831 {
3832 	struct adapter *adapter;
3833 	int            error, fc;
3834 
3835 	adapter = (struct adapter *)arg1;
3836 	fc = adapter->hw.fc.current_mode;
3837 
3838 	error = sysctl_handle_int(oidp, &fc, 0, req);
3839 	if ((error) || (req->newptr == NULL))
3840 		return (error);
3841 
3842 	/* Don't bother if it's not changed */
3843 	if (fc == adapter->hw.fc.current_mode)
3844 		return (0);
3845 
3846 	return ixgbe_set_flowcntl(adapter, fc);
3847 } /* ixgbe_sysctl_flowcntl */
3848 
3849 /************************************************************************
3850  * ixgbe_set_flowcntl - Set flow control
3851  *
3852  *   Flow control values:
3853  *     0 - off
3854  *     1 - rx pause
3855  *     2 - tx pause
3856  *     3 - full
3857  ************************************************************************/
3858 static int
3859 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3860 {
3861 	switch (fc) {
3862 	case ixgbe_fc_rx_pause:
3863 	case ixgbe_fc_tx_pause:
3864 	case ixgbe_fc_full:
3865 		adapter->hw.fc.requested_mode = fc;
3866 		if (adapter->num_rx_queues > 1)
3867 			ixgbe_disable_rx_drop(adapter);
3868 		break;
3869 	case ixgbe_fc_none:
3870 		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3871 		if (adapter->num_rx_queues > 1)
3872 			ixgbe_enable_rx_drop(adapter);
3873 		break;
3874 	default:
3875 		return (EINVAL);
3876 	}
3877 
3878 	/* Don't autoneg if forcing a value */
3879 	adapter->hw.fc.disable_fc_autoneg = TRUE;
3880 	ixgbe_fc_enable(&adapter->hw);
3881 
3882 	return (0);
3883 } /* ixgbe_set_flowcntl */
3884 
3885 /************************************************************************
3886  * ixgbe_enable_rx_drop
3887  *
3888  *   Enable the hardware to drop packets when the buffer is
3889  *   full. This is useful with multiqueue, so that no single
3890  *   queue being full stalls the entire RX engine. We only
3891  *   enable this when Multiqueue is enabled AND Flow Control
3892  *   is disabled.
3893  ************************************************************************/
3894 static void
3895 ixgbe_enable_rx_drop(struct adapter *adapter)
3896 {
3897 	struct ixgbe_hw *hw = &adapter->hw;
3898 	struct rx_ring  *rxr;
3899 	u32             srrctl;
3900 
3901 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3902 		rxr = &adapter->rx_queues[i].rxr;
3903 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3904 		srrctl |= IXGBE_SRRCTL_DROP_EN;
3905 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3906 	}
3907 
3908 	/* enable drop for each vf */
3909 	for (int i = 0; i < adapter->num_vfs; i++) {
3910 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3911 		                (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
3912 		                IXGBE_QDE_ENABLE));
3913 	}
3914 } /* ixgbe_enable_rx_drop */
3915 
3916 /************************************************************************
3917  * ixgbe_disable_rx_drop
3918  ************************************************************************/
3919 static void
3920 ixgbe_disable_rx_drop(struct adapter *adapter)
3921 {
3922 	struct ixgbe_hw *hw = &adapter->hw;
3923 	struct rx_ring  *rxr;
3924 	u32             srrctl;
3925 
3926 	for (int i = 0; i < adapter->num_rx_queues; i++) {
3927 		rxr = &adapter->rx_queues[i].rxr;
3928 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
3929 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3930 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
3931 	}
3932 
3933 	/* disable drop for each vf */
3934 	for (int i = 0; i < adapter->num_vfs; i++) {
3935 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
3936 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
3937 	}
3938 } /* ixgbe_disable_rx_drop */
3939 
3940 /************************************************************************
3941  * ixgbe_sysctl_advertise
3942  *
3943  *   SYSCTL wrapper around setting advertised speed
3944  ************************************************************************/
3945 static int
3946 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
3947 {
3948 	struct adapter *adapter;
3949 	int            error, advertise;
3950 
3951 	adapter = (struct adapter *)arg1;
3952 	advertise = adapter->advertise;
3953 
3954 	error = sysctl_handle_int(oidp, &advertise, 0, req);
3955 	if ((error) || (req->newptr == NULL))
3956 		return (error);
3957 
3958 	return ixgbe_set_advertise(adapter, advertise);
3959 } /* ixgbe_sysctl_advertise */
3960 
3961 /************************************************************************
3962  * ixgbe_set_advertise - Control advertised link speed
3963  *
3964  *   Flags:
3965  *     0x1 - advertise 100 Mb
3966  *     0x2 - advertise 1G
3967  *     0x4 - advertise 10G
3968  *     0x8 - advertise 10 Mb (yes, Mb)
3969  ************************************************************************/
3970 static int
3971 ixgbe_set_advertise(struct adapter *adapter, int advertise)
3972 {
3973 	device_t         dev = iflib_get_dev(adapter->ctx);
3974 	struct ixgbe_hw  *hw;
3975 	ixgbe_link_speed speed = 0;
3976 	ixgbe_link_speed link_caps = 0;
3977 	s32              err = IXGBE_NOT_IMPLEMENTED;
3978 	bool             negotiate = FALSE;
3979 
3980 	/* Checks to validate new value */
3981 	if (adapter->advertise == advertise) /* no change */
3982 		return (0);
3983 
3984 	hw = &adapter->hw;
3985 
3986 	/* No speed changes for backplane media */
3987 	if (hw->phy.media_type == ixgbe_media_type_backplane)
3988 		return (ENODEV);
3989 
3990 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
3991 	      (hw->phy.multispeed_fiber))) {
3992 		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
3993 		return (EINVAL);
3994 	}
3995 
3996 	if (advertise < 0x1 || advertise > 0xF) {
3997 		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
3998 		return (EINVAL);
3999 	}
4000 
4001 	if (hw->mac.ops.get_link_capabilities) {
4002 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4003 		    &negotiate);
4004 		if (err != IXGBE_SUCCESS) {
4005 			device_printf(dev, "Unable to determine supported advertise speeds\n");
4006 			return (ENODEV);
4007 		}
4008 	}
4009 
4010 	/* Set new value and report new advertised mode */
4011 	if (advertise & 0x1) {
4012 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4013 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4014 			return (EINVAL);
4015 		}
4016 		speed |= IXGBE_LINK_SPEED_100_FULL;
4017 	}
4018 	if (advertise & 0x2) {
4019 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4020 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4021 			return (EINVAL);
4022 		}
4023 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4024 	}
4025 	if (advertise & 0x4) {
4026 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4027 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4028 			return (EINVAL);
4029 		}
4030 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4031 	}
4032 	if (advertise & 0x8) {
4033 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4034 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4035 			return (EINVAL);
4036 		}
4037 		speed |= IXGBE_LINK_SPEED_10_FULL;
4038 	}
4039 
4040 	hw->mac.autotry_restart = TRUE;
4041 	hw->mac.ops.setup_link(hw, speed, TRUE);
4042 	adapter->advertise = advertise;
4043 
4044 	return (0);
4045 } /* ixgbe_set_advertise */
4046 
4047 /************************************************************************
4048  * ixgbe_get_advertise - Get current advertised speed settings
4049  *
4050  *   Formatted for sysctl usage.
4051  *   Flags:
4052  *     0x1 - advertise 100 Mb
4053  *     0x2 - advertise 1G
4054  *     0x4 - advertise 10G
4055  *     0x8 - advertise 10 Mb (yes, Mb)
4056  ************************************************************************/
4057 static int
4058 ixgbe_get_advertise(struct adapter *adapter)
4059 {
4060 	struct ixgbe_hw  *hw = &adapter->hw;
4061 	int              speed;
4062 	ixgbe_link_speed link_caps = 0;
4063 	s32              err;
4064 	bool             negotiate = FALSE;
4065 
4066 	/*
4067 	 * Advertised speed means nothing unless it's copper or
4068 	 * multi-speed fiber
4069 	 */
4070 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4071 	    !(hw->phy.multispeed_fiber))
4072 		return (0);
4073 
4074 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4075 	if (err != IXGBE_SUCCESS)
4076 		return (0);
4077 
4078 	speed =
4079 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4080 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4081 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4082 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4083 
4084 	return speed;
4085 } /* ixgbe_get_advertise */
4086 
4087 /************************************************************************
4088  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4089  *
4090  *   Control values:
4091  *     0/1 - off / on (use default value of 1000)
4092  *
4093  *     Legal timer values are:
4094  *     50,100,250,500,1000,2000,5000,10000
4095  *
4096  *     Turning off interrupt moderation will also turn this off.
4097  ************************************************************************/
4098 static int
4099 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4100 {
4101 	struct adapter *adapter = (struct adapter *)arg1;
4102 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4103 	int            error;
4104 	u16            newval;
4105 
4106 	newval = adapter->dmac;
4107 	error = sysctl_handle_16(oidp, &newval, 0, req);
4108 	if ((error) || (req->newptr == NULL))
4109 		return (error);
4110 
4111 	switch (newval) {
4112 	case 0:
4113 		/* Disabled */
4114 		adapter->dmac = 0;
4115 		break;
4116 	case 1:
4117 		/* Enable and use default */
4118 		adapter->dmac = 1000;
4119 		break;
4120 	case 50:
4121 	case 100:
4122 	case 250:
4123 	case 500:
4124 	case 1000:
4125 	case 2000:
4126 	case 5000:
4127 	case 10000:
4128 		/* Legal values - allow */
4129 		adapter->dmac = newval;
4130 		break;
4131 	default:
4132 		/* Do nothing, illegal value */
4133 		return (EINVAL);
4134 	}
4135 
4136 	/* Re-initialize hardware if it's already running */
4137 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4138 		ifp->if_init(ifp);
4139 
4140 	return (0);
4141 } /* ixgbe_sysctl_dmac */
4142 
4143 #ifdef IXGBE_DEBUG
4144 /************************************************************************
4145  * ixgbe_sysctl_power_state
4146  *
4147  *   Sysctl to test power states
4148  *   Values:
4149  *     0      - set device to D0
4150  *     3      - set device to D3
4151  *     (none) - get current device power state
4152  ************************************************************************/
4153 static int
4154 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4155 {
4156 	struct adapter *adapter = (struct adapter *)arg1;
4157 	device_t       dev = adapter->dev;
4158 	int            curr_ps, new_ps, error = 0;
4159 
4160 	curr_ps = new_ps = pci_get_powerstate(dev);
4161 
4162 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4163 	if ((error) || (req->newptr == NULL))
4164 		return (error);
4165 
4166 	if (new_ps == curr_ps)
4167 		return (0);
4168 
4169 	if (new_ps == 3 && curr_ps == 0)
4170 		error = DEVICE_SUSPEND(dev);
4171 	else if (new_ps == 0 && curr_ps == 3)
4172 		error = DEVICE_RESUME(dev);
4173 	else
4174 		return (EINVAL);
4175 
4176 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4177 
4178 	return (error);
4179 } /* ixgbe_sysctl_power_state */
4180 #endif
4181 
4182 /************************************************************************
4183  * ixgbe_sysctl_wol_enable
4184  *
4185  *   Sysctl to enable/disable the WoL capability,
4186  *   if supported by the adapter.
4187  *
4188  *   Values:
4189  *     0 - disabled
4190  *     1 - enabled
4191  ************************************************************************/
4192 static int
4193 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4194 {
4195 	struct adapter  *adapter = (struct adapter *)arg1;
4196 	struct ixgbe_hw *hw = &adapter->hw;
4197 	int             new_wol_enabled;
4198 	int             error = 0;
4199 
4200 	new_wol_enabled = hw->wol_enabled;
4201 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4202 	if ((error) || (req->newptr == NULL))
4203 		return (error);
4204 	new_wol_enabled = !!(new_wol_enabled);
4205 	if (new_wol_enabled == hw->wol_enabled)
4206 		return (0);
4207 
4208 	if (new_wol_enabled > 0 && !adapter->wol_support)
4209 		return (ENODEV);
4210 	else
4211 		hw->wol_enabled = new_wol_enabled;
4212 
4213 	return (0);
4214 } /* ixgbe_sysctl_wol_enable */
4215 
4216 /************************************************************************
4217  * ixgbe_sysctl_wufc - Wake Up Filter Control
4218  *
4219  *   Sysctl to enable/disable the types of packets that the
4220  *   adapter will wake up on upon receipt.
4221  *   Flags:
4222  *     0x1  - Link Status Change
4223  *     0x2  - Magic Packet
4224  *     0x4  - Direct Exact
4225  *     0x8  - Directed Multicast
4226  *     0x10 - Broadcast
4227  *     0x20 - ARP/IPv4 Request Packet
4228  *     0x40 - Direct IPv4 Packet
4229  *     0x80 - Direct IPv6 Packet
4230  *
4231  *   Settings not listed above will cause the sysctl to return an error.
4232  ************************************************************************/
4233 static int
4234 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4235 {
4236 	struct adapter *adapter = (struct adapter *)arg1;
4237 	int            error = 0;
4238 	u32            new_wufc;
4239 
4240 	new_wufc = adapter->wufc;
4241 
4242 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4243 	if ((error) || (req->newptr == NULL))
4244 		return (error);
4245 	if (new_wufc == adapter->wufc)
4246 		return (0);
4247 
4248 	if (new_wufc & 0xffffff00)
4249 		return (EINVAL);
4250 
4251 	new_wufc &= 0xff;
4252 	new_wufc |= (0xffffff & adapter->wufc);
4253 	adapter->wufc = new_wufc;
4254 
4255 	return (0);
4256 } /* ixgbe_sysctl_wufc */
4257 
4258 #ifdef IXGBE_DEBUG
4259 /************************************************************************
4260  * ixgbe_sysctl_print_rss_config
4261  ************************************************************************/
4262 static int
4263 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4264 {
4265 	struct adapter  *adapter = (struct adapter *)arg1;
4266 	struct ixgbe_hw *hw = &adapter->hw;
4267 	device_t        dev = adapter->dev;
4268 	struct sbuf     *buf;
4269 	int             error = 0, reta_size;
4270 	u32             reg;
4271 
4272 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4273 	if (!buf) {
4274 		device_printf(dev, "Could not allocate sbuf for output.\n");
4275 		return (ENOMEM);
4276 	}
4277 
4278 	// TODO: use sbufs to make a string to print out
4279 	/* Set multiplier for RETA setup and table size based on MAC */
4280 	switch (adapter->hw.mac.type) {
4281 	case ixgbe_mac_X550:
4282 	case ixgbe_mac_X550EM_x:
4283 	case ixgbe_mac_X550EM_a:
4284 		reta_size = 128;
4285 		break;
4286 	default:
4287 		reta_size = 32;
4288 		break;
4289 	}
4290 
4291 	/* Print out the redirection table */
4292 	sbuf_cat(buf, "\n");
4293 	for (int i = 0; i < reta_size; i++) {
4294 		if (i < 32) {
4295 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4296 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4297 		} else {
4298 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4299 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4300 		}
4301 	}
4302 
4303 	// TODO: print more config
4304 
4305 	error = sbuf_finish(buf);
4306 	if (error)
4307 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4308 
4309 	sbuf_delete(buf);
4310 
4311 	return (0);
4312 } /* ixgbe_sysctl_print_rss_config */
4313 #endif /* IXGBE_DEBUG */
4314 
4315 /************************************************************************
4316  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4317  *
4318  *   For X552/X557-AT devices using an external PHY
4319  ************************************************************************/
4320 static int
4321 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4322 {
4323 	struct adapter  *adapter = (struct adapter *)arg1;
4324 	struct ixgbe_hw *hw = &adapter->hw;
4325 	u16             reg;
4326 
4327 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4328 		device_printf(iflib_get_dev(adapter->ctx),
4329 		    "Device has no supported external thermal sensor.\n");
4330 		return (ENODEV);
4331 	}
4332 
4333 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4334 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4335 		device_printf(iflib_get_dev(adapter->ctx),
4336 		    "Error reading from PHY's current temperature register\n");
4337 		return (EAGAIN);
4338 	}
4339 
4340 	/* Shift temp for output */
4341 	reg = reg >> 8;
4342 
4343 	return (sysctl_handle_16(oidp, NULL, reg, req));
4344 } /* ixgbe_sysctl_phy_temp */
4345 
4346 /************************************************************************
4347  * ixgbe_sysctl_phy_overtemp_occurred
4348  *
4349  *   Reports (directly from the PHY) whether the current PHY
4350  *   temperature is over the overtemp threshold.
4351  ************************************************************************/
4352 static int
4353 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4354 {
4355 	struct adapter  *adapter = (struct adapter *)arg1;
4356 	struct ixgbe_hw *hw = &adapter->hw;
4357 	u16             reg;
4358 
4359 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4360 		device_printf(iflib_get_dev(adapter->ctx),
4361 		    "Device has no supported external thermal sensor.\n");
4362 		return (ENODEV);
4363 	}
4364 
4365 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4366 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4367 		device_printf(iflib_get_dev(adapter->ctx),
4368 		    "Error reading from PHY's temperature status register\n");
4369 		return (EAGAIN);
4370 	}
4371 
4372 	/* Get occurrence bit */
4373 	reg = !!(reg & 0x4000);
4374 
4375 	return (sysctl_handle_16(oidp, 0, reg, req));
4376 } /* ixgbe_sysctl_phy_overtemp_occurred */
4377 
4378 /************************************************************************
4379  * ixgbe_sysctl_eee_state
4380  *
4381  *   Sysctl to set EEE power saving feature
4382  *   Values:
4383  *     0      - disable EEE
4384  *     1      - enable EEE
4385  *     (none) - get current device EEE state
4386  ************************************************************************/
4387 static int
4388 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4389 {
4390 	struct adapter *adapter = (struct adapter *)arg1;
4391 	device_t       dev = adapter->dev;
4392 	struct ifnet   *ifp = iflib_get_ifp(adapter->ctx);
4393 	int            curr_eee, new_eee, error = 0;
4394 	s32            retval;
4395 
4396 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4397 
4398 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4399 	if ((error) || (req->newptr == NULL))
4400 		return (error);
4401 
4402 	/* Nothing to do */
4403 	if (new_eee == curr_eee)
4404 		return (0);
4405 
4406 	/* Not supported */
4407 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4408 		return (EINVAL);
4409 
4410 	/* Bounds checking */
4411 	if ((new_eee < 0) || (new_eee > 1))
4412 		return (EINVAL);
4413 
4414 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4415 	if (retval) {
4416 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4417 		return (EINVAL);
4418 	}
4419 
4420 	/* Restart auto-neg */
4421 	ifp->if_init(ifp);
4422 
4423 	device_printf(dev, "New EEE state: %d\n", new_eee);
4424 
4425 	/* Cache new value */
4426 	if (new_eee)
4427 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4428 	else
4429 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4430 
4431 	return (error);
4432 } /* ixgbe_sysctl_eee_state */
4433 
4434 /************************************************************************
4435  * ixgbe_init_device_features
4436  ************************************************************************/
4437 static void
4438 ixgbe_init_device_features(struct adapter *adapter)
4439 {
4440 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4441 	                  | IXGBE_FEATURE_RSS
4442 	                  | IXGBE_FEATURE_MSI
4443 	                  | IXGBE_FEATURE_MSIX
4444 	                  | IXGBE_FEATURE_LEGACY_IRQ;
4445 
4446 	/* Set capabilities first... */
4447 	switch (adapter->hw.mac.type) {
4448 	case ixgbe_mac_82598EB:
4449 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4450 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4451 		break;
4452 	case ixgbe_mac_X540:
4453 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4454 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4455 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4456 		    (adapter->hw.bus.func == 0))
4457 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4458 		break;
4459 	case ixgbe_mac_X550:
4460 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4461 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4462 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4463 		break;
4464 	case ixgbe_mac_X550EM_x:
4465 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4466 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4467 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4468 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4469 		break;
4470 	case ixgbe_mac_X550EM_a:
4471 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4472 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4473 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4474 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4475 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4476 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4477 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4478 		}
4479 		break;
4480 	case ixgbe_mac_82599EB:
4481 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4482 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4483 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4484 		    (adapter->hw.bus.func == 0))
4485 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4486 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4487 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4488 		break;
4489 	default:
4490 		break;
4491 	}
4492 
4493 	/* Enabled by default... */
4494 	/* Fan failure detection */
4495 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4496 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4497 	/* Netmap */
4498 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4499 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4500 	/* EEE */
4501 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4502 		adapter->feat_en |= IXGBE_FEATURE_EEE;
4503 	/* Thermal Sensor */
4504 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4505 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4506 
4507 	/* Enabled via global sysctl... */
4508 	/* Flow Director */
4509 	if (ixgbe_enable_fdir) {
4510 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4511 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4512 		else
4513 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4514 	}
4515 	/*
4516 	 * Message Signal Interrupts - Extended (MSI-X)
4517 	 * Normal MSI is only enabled if MSI-X calls fail.
4518 	 */
4519 	if (!ixgbe_enable_msix)
4520 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4521 	/* Receive-Side Scaling (RSS) */
4522 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4523 		adapter->feat_en |= IXGBE_FEATURE_RSS;
4524 
4525 	/* Disable features with unmet dependencies... */
4526 	/* No MSI-X */
4527 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4528 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4529 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4530 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4531 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4532 	}
4533 } /* ixgbe_init_device_features */
4534 
4535 /************************************************************************
4536  * ixgbe_check_fan_failure
4537  ************************************************************************/
4538 static void
4539 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4540 {
4541 	u32 mask;
4542 
4543 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4544 	    IXGBE_ESDP_SDP1;
4545 
4546 	if (reg & mask)
4547 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4548 } /* ixgbe_check_fan_failure */
4549 
4550