xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision c58d34dd67a419866ee50f152044e49cecbae261)
1 /*****************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 *****************************************************************************/
33 
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37 
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41 
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44 
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 static const char ixgbe_driver_version[] = "4.0.1-k";
49 
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixgbe_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,
62     "Intel(R) 82598EB AF (Dual Fiber)"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,
64     "Intel(R) 82598EB AF (Fiber)"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,
66     "Intel(R) 82598EB AT (CX4)"),
67 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,
68     "Intel(R) 82598EB AT"),
69 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,
70     "Intel(R) 82598EB AT2"),
71 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
72 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,
73     "Intel(R) 82598EB AF DA (Dual Fiber)"),
74 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,
75     "Intel(R) 82598EB AT (Dual CX4)"),
76 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,
77     "Intel(R) 82598EB AF (Dual Fiber LR)"),
78 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,
79     "Intel(R) 82598EB AF (Dual Fiber SR)"),
80 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,
81     "Intel(R) 82598EB LOM"),
82 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,
83     "Intel(R) X520 82599 (KX4)"),
84 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,
85     "Intel(R) X520 82599 (KX4 Mezzanine)"),
86 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,
87     "Intel(R) X520 82599ES (SFI/SFP+)"),
88 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,
89     "Intel(R) X520 82599 (XAUI/BX4)"),
90 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,
91     "Intel(R) X520 82599 (Dual CX4)"),
92 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,
93     "Intel(R) X520-T 82599 LOM"),
94 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,
95     "Intel(R) X520 82599 LS"),
96 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,
97     "Intel(R) X520 82599 (Combined Backplane)"),
98 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,
99     "Intel(R) X520 82599 (Backplane w/FCoE)"),
100 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,
101     "Intel(R) X520 82599 (Dual SFP+)"),
102 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,
103     "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
104 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,
105     "Intel(R) X520-1 82599EN (SFP+)"),
106 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,
107     "Intel(R) X520-4 82599 (Quad SFP+)"),
108 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,
109     "Intel(R) X520-Q1 82599 (QSFP+)"),
110 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,
111     "Intel(R) X540-AT2"),
112 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) X540-T1"),
113 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) X550-T2"),
114 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
115 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,
116     "Intel(R) X552 (KR Backplane)"),
117 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,
118     "Intel(R) X552 (KX4 Backplane)"),
119 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,
120     "Intel(R) X552/X557-AT (10GBASE-T)"),
121 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,
122     "Intel(R) X552 (1000BASE-T)"),
123 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP,
124     "Intel(R) X552 (SFP+)"),
125 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR,
126     "Intel(R) X553 (KR Backplane)"),
127 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L,
128     "Intel(R) X553 L (KR Backplane)"),
129 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP,
130     "Intel(R) X553 (SFP+)"),
131 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N,
132     "Intel(R) X553 N (SFP+)"),
133 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII,
134     "Intel(R) X553 (1GbE SGMII)"),
135 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L,
136     "Intel(R) X553 L (1GbE SGMII)"),
137 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T,
138     "Intel(R) X553/X557-AT (10GBASE-T)"),
139 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T,
140     "Intel(R) X553 (1GbE)"),
141 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L,
142     "Intel(R) X553 L (1GbE)"),
143 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS,
144     "Intel(R) X540-T2 (Bypass)"),
145 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
146     "Intel(R) X520 82599 (Bypass)"),
147 	/* required last entry */
148 	PVID_END
149 };
150 
151 static void *ixgbe_register(device_t);
152 static int  ixgbe_if_attach_pre(if_ctx_t);
153 static int  ixgbe_if_attach_post(if_ctx_t);
154 static int  ixgbe_if_detach(if_ctx_t);
155 static int  ixgbe_if_shutdown(if_ctx_t);
156 static int  ixgbe_if_suspend(if_ctx_t);
157 static int  ixgbe_if_resume(if_ctx_t);
158 
159 static void ixgbe_if_stop(if_ctx_t);
160 void ixgbe_if_enable_intr(if_ctx_t);
161 static void ixgbe_if_disable_intr(if_ctx_t);
162 static void ixgbe_link_intr_enable(if_ctx_t);
163 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
164 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
165 static int  ixgbe_if_media_change(if_ctx_t);
166 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
167 static int  ixgbe_if_mtu_set(if_ctx_t, uint32_t);
168 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
169 static void ixgbe_if_multi_set(if_ctx_t);
170 static int  ixgbe_if_promisc_set(if_ctx_t, int);
171 static int  ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
172     int);
173 static int  ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
174    int);
175 static void ixgbe_if_queues_free(if_ctx_t);
176 static void ixgbe_if_timer(if_ctx_t, uint16_t);
177 static void ixgbe_if_update_admin_status(if_ctx_t);
178 static void ixgbe_if_vlan_register(if_ctx_t, u16);
179 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
180 static int  ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
181 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
182 int ixgbe_intr(void *);
183 
184 /************************************************************************
185  * Function prototypes
186  ************************************************************************/
187 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
188 
189 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
190 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
191 static void ixgbe_add_device_sysctls(if_ctx_t);
192 static int  ixgbe_allocate_pci_resources(if_ctx_t);
193 static int  ixgbe_setup_low_power_mode(if_ctx_t);
194 
195 static void ixgbe_config_dmac(struct ixgbe_softc *);
196 static void ixgbe_configure_ivars(struct ixgbe_softc *);
197 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
198 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
199 static bool ixgbe_sfp_probe(if_ctx_t);
200 
201 static void ixgbe_free_pci_resources(if_ctx_t);
202 
203 static int  ixgbe_msix_link(void *);
204 static int  ixgbe_msix_que(void *);
205 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
206 static void ixgbe_initialize_receive_units(if_ctx_t);
207 static void ixgbe_initialize_transmit_units(if_ctx_t);
208 
209 static int  ixgbe_setup_interface(if_ctx_t);
210 static void ixgbe_init_device_features(struct ixgbe_softc *);
211 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
212 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
213 static void ixgbe_print_fw_version(if_ctx_t);
214 static void ixgbe_add_media_types(if_ctx_t);
215 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
216 static void ixgbe_config_link(if_ctx_t);
217 static void ixgbe_get_slot_info(struct ixgbe_softc *);
218 static void ixgbe_fw_mode_timer(void *);
219 static void ixgbe_check_wol_support(struct ixgbe_softc *);
220 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
221 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
222 
223 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
224 static int  ixgbe_set_flowcntl(struct ixgbe_softc *, int);
225 static int  ixgbe_set_advertise(struct ixgbe_softc *, int);
226 static int  ixgbe_get_default_advertise(struct ixgbe_softc *);
227 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
228 static void ixgbe_config_gpie(struct ixgbe_softc *);
229 static void ixgbe_config_delay_values(struct ixgbe_softc *);
230 
231 /* Sysctl handlers */
232 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
233 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
234 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
235 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
236 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
237 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
238 static int  ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
239 #ifdef IXGBE_DEBUG
240 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
241 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
242 #endif
243 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
244 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
245 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
246 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
247 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
248 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
249 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
250 static int  ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
251 
252 /* Deferred interrupt tasklets */
253 static void ixgbe_handle_msf(void *);
254 static void ixgbe_handle_mod(void *);
255 static void ixgbe_handle_phy(void *);
256 
257 /************************************************************************
258  *  FreeBSD Device Interface Entry Points
259  ************************************************************************/
260 static device_method_t ix_methods[] = {
261 	/* Device interface */
262 	DEVMETHOD(device_register, ixgbe_register),
263 	DEVMETHOD(device_probe, iflib_device_probe),
264 	DEVMETHOD(device_attach, iflib_device_attach),
265 	DEVMETHOD(device_detach, iflib_device_detach),
266 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
267 	DEVMETHOD(device_suspend, iflib_device_suspend),
268 	DEVMETHOD(device_resume, iflib_device_resume),
269 #ifdef PCI_IOV
270 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
271 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
272 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
273 #endif /* PCI_IOV */
274 	DEVMETHOD_END
275 };
276 
277 static driver_t ix_driver = {
278 	"ix", ix_methods, sizeof(struct ixgbe_softc),
279 };
280 
281 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
282 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
283 MODULE_DEPEND(ix, pci, 1, 1, 1);
284 MODULE_DEPEND(ix, ether, 1, 1, 1);
285 MODULE_DEPEND(ix, iflib, 1, 1, 1);
286 
287 static device_method_t ixgbe_if_methods[] = {
288 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
289 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
290 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
291 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
292 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
293 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
294 	DEVMETHOD(ifdi_init, ixgbe_if_init),
295 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
296 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
297 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
298 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
299 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
300 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
301 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
302 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
303 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
304 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
305 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
306 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
307 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
308 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
309 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
310 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
311 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
312 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
313 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
314 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
315 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
316 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
317 	DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
318 #ifdef PCI_IOV
319 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
320 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
321 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
322 #endif /* PCI_IOV */
323 	DEVMETHOD_END
324 };
325 
326 /*
327  * TUNEABLE PARAMETERS:
328  */
329 
330 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
331     "IXGBE driver parameters");
332 static driver_t ixgbe_if_driver = {
333   "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
334 };
335 
336 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
337 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
338     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
339 
340 /* Flow control setting, default to full */
341 static int ixgbe_flow_control = ixgbe_fc_full;
342 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
343     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
344 
345 /* Advertise Speed, default to 0 (auto) */
346 static int ixgbe_advertise_speed = 0;
347 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
348     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
349 
350 /*
351  * Smart speed setting, default to on
352  * this only works as a compile option
353  * right now as its during attach, set
354  * this to 'ixgbe_smart_speed_off' to
355  * disable.
356  */
357 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
358 
359 /*
360  * MSI-X should be the default for best performance,
361  * but this allows it to be forced off for testing.
362  */
363 static int ixgbe_enable_msix = 1;
364 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix,
365     0,
366     "Enable MSI-X interrupts");
367 
368 /*
369  * Defining this on will allow the use
370  * of unsupported SFP+ modules, note that
371  * doing so you are on your own :)
372  */
373 static int allow_unsupported_sfp = false;
374 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
375     &allow_unsupported_sfp, 0,
376     "Allow unsupported SFP modules...use at your own risk");
377 
378 /*
379  * Not sure if Flow Director is fully baked,
380  * so we'll default to turning it off.
381  */
382 static int ixgbe_enable_fdir = 0;
383 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir,
384     0,
385     "Enable Flow Director");
386 
387 /* Receive-Side Scaling */
388 static int ixgbe_enable_rss = 1;
389 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss,
390     0,
391     "Enable Receive-Side Scaling (RSS)");
392 
393 /*
394  * AIM: Adaptive Interrupt Moderation
395  * which means that the interrupt rate
396  * is varied over time based on the
397  * traffic for that interrupt vector
398  */
399 static int ixgbe_enable_aim = false;
400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim,
401     0,
402     "Enable adaptive interrupt moderation");
403 
404 #if 0
405 /* Keep running tab on them for sanity check */
406 static int ixgbe_total_ports;
407 #endif
408 
409 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
410 
411 /*
412  * For Flow Director: this is the number of TX packets we sample
413  * for the filter pool, this means every 20th packet will be probed.
414  *
415  * This feature can be disabled by setting this to 0.
416  */
417 static int atr_sample_rate = 20;
418 
419 extern struct if_txrx ixgbe_txrx;
420 
421 static struct if_shared_ctx ixgbe_sctx_init = {
422 	.isc_magic = IFLIB_MAGIC,
423 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
424 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
425 	.isc_tx_maxsegsize = PAGE_SIZE,
426 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
427 	.isc_tso_maxsegsize = PAGE_SIZE,
428 	.isc_rx_maxsize = PAGE_SIZE*4,
429 	.isc_rx_nsegments = 1,
430 	.isc_rx_maxsegsize = PAGE_SIZE*4,
431 	.isc_nfl = 1,
432 	.isc_ntxqs = 1,
433 	.isc_nrxqs = 1,
434 
435 	.isc_admin_intrcnt = 1,
436 	.isc_vendor_info = ixgbe_vendor_info_array,
437 	.isc_driver_version = ixgbe_driver_version,
438 	.isc_driver = &ixgbe_if_driver,
439 	.isc_flags = IFLIB_TSO_INIT_IP,
440 
441 	.isc_nrxd_min = {MIN_RXD},
442 	.isc_ntxd_min = {MIN_TXD},
443 	.isc_nrxd_max = {MAX_RXD},
444 	.isc_ntxd_max = {MAX_TXD},
445 	.isc_nrxd_default = {DEFAULT_RXD},
446 	.isc_ntxd_default = {DEFAULT_TXD},
447 };
448 
449 /************************************************************************
450  * ixgbe_if_tx_queues_alloc
451  ************************************************************************/
452 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)453 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
454     int ntxqs, int ntxqsets)
455 {
456 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
457 	if_softc_ctx_t scctx = sc->shared;
458 	struct ix_tx_queue *que;
459 	int i, j, error;
460 
461 	MPASS(sc->num_tx_queues > 0);
462 	MPASS(sc->num_tx_queues == ntxqsets);
463 	MPASS(ntxqs == 1);
464 
465 	/* Allocate queue structure memory */
466 	sc->tx_queues =
467 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
468 	    ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO);
469 	if (!sc->tx_queues) {
470 		device_printf(iflib_get_dev(ctx),
471 		    "Unable to allocate TX ring memory\n");
472 		return (ENOMEM);
473 	}
474 
475 	for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
476 		struct tx_ring *txr = &que->txr;
477 
478 		/* In case SR-IOV is enabled, align the index properly */
479 		txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
480 
481 		txr->sc = que->sc = sc;
482 
483 		/* Allocate report status array */
484 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
485 		    scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
486 		if (txr->tx_rsq == NULL) {
487 			error = ENOMEM;
488 			goto fail;
489 		}
490 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
491 			txr->tx_rsq[j] = QIDX_INVALID;
492 		/* get virtual and physical address of the hardware queues */
493 		txr->tail = IXGBE_TDT(txr->me);
494 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
495 		txr->tx_paddr = paddrs[i];
496 
497 		txr->bytes = 0;
498 		txr->total_packets = 0;
499 
500 		/* Set the rate at which we sample packets */
501 		if (sc->feat_en & IXGBE_FEATURE_FDIR)
502 			txr->atr_sample = atr_sample_rate;
503 
504 	}
505 
506 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
507 	    sc->num_tx_queues);
508 
509 	return (0);
510 
511 fail:
512 	ixgbe_if_queues_free(ctx);
513 
514 	return (error);
515 } /* ixgbe_if_tx_queues_alloc */
516 
517 /************************************************************************
518  * ixgbe_if_rx_queues_alloc
519  ************************************************************************/
520 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)521 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
522     int nrxqs, int nrxqsets)
523 {
524 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
525 	struct ix_rx_queue *que;
526 	int i;
527 
528 	MPASS(sc->num_rx_queues > 0);
529 	MPASS(sc->num_rx_queues == nrxqsets);
530 	MPASS(nrxqs == 1);
531 
532 	/* Allocate queue structure memory */
533 	sc->rx_queues =
534 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
535 	    M_IXGBE, M_NOWAIT | M_ZERO);
536 	if (!sc->rx_queues) {
537 		device_printf(iflib_get_dev(ctx),
538 		    "Unable to allocate TX ring memory\n");
539 		return (ENOMEM);
540 	}
541 
542 	for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
543 		struct rx_ring *rxr = &que->rxr;
544 
545 		/* In case SR-IOV is enabled, align the index properly */
546 		rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
547 
548 		rxr->sc = que->sc = sc;
549 
550 		/* get the virtual and physical address of the hw queues */
551 		rxr->tail = IXGBE_RDT(rxr->me);
552 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
553 		rxr->rx_paddr = paddrs[i];
554 		rxr->bytes = 0;
555 		rxr->que = que;
556 	}
557 
558 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
559 	    sc->num_rx_queues);
560 
561 	return (0);
562 } /* ixgbe_if_rx_queues_alloc */
563 
564 /************************************************************************
565  * ixgbe_if_queues_free
566  ************************************************************************/
567 static void
ixgbe_if_queues_free(if_ctx_t ctx)568 ixgbe_if_queues_free(if_ctx_t ctx)
569 {
570 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
571 	struct ix_tx_queue *tx_que = sc->tx_queues;
572 	struct ix_rx_queue *rx_que = sc->rx_queues;
573 	int i;
574 
575 	if (tx_que != NULL) {
576 		for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
577 			struct tx_ring *txr = &tx_que->txr;
578 			if (txr->tx_rsq == NULL)
579 				break;
580 
581 			free(txr->tx_rsq, M_IXGBE);
582 			txr->tx_rsq = NULL;
583 		}
584 
585 		free(sc->tx_queues, M_IXGBE);
586 		sc->tx_queues = NULL;
587 	}
588 	if (rx_que != NULL) {
589 		free(sc->rx_queues, M_IXGBE);
590 		sc->rx_queues = NULL;
591 	}
592 } /* ixgbe_if_queues_free */
593 
594 /************************************************************************
595  * ixgbe_initialize_rss_mapping
596  ************************************************************************/
597 static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc * sc)598 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
599 {
600 	struct ixgbe_hw *hw = &sc->hw;
601 	u32 reta = 0, mrqc, rss_key[10];
602 	int queue_id, table_size, index_mult;
603 	int i, j;
604 	u32 rss_hash_config;
605 
606 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
607 		/* Fetch the configured RSS key */
608 		rss_getkey((uint8_t *)&rss_key);
609 	} else {
610 		/* set up random bits */
611 		arc4rand(&rss_key, sizeof(rss_key), 0);
612 	}
613 
614 	/* Set multiplier for RETA setup and table size based on MAC */
615 	index_mult = 0x1;
616 	table_size = 128;
617 	switch (sc->hw.mac.type) {
618 	case ixgbe_mac_82598EB:
619 		index_mult = 0x11;
620 		break;
621 	case ixgbe_mac_X550:
622 	case ixgbe_mac_X550EM_x:
623 	case ixgbe_mac_X550EM_a:
624 		table_size = 512;
625 		break;
626 	default:
627 		break;
628 	}
629 
630 	/* Set up the redirection table */
631 	for (i = 0, j = 0; i < table_size; i++, j++) {
632 		if (j == sc->num_rx_queues)
633 			j = 0;
634 
635 		if (sc->feat_en & IXGBE_FEATURE_RSS) {
636 			/*
637 			 * Fetch the RSS bucket id for the given indirection
638 			 * entry. Cap it at the number of configured buckets
639 			 * (which is num_rx_queues.)
640 			 */
641 			queue_id = rss_get_indirection_to_bucket(i);
642 			queue_id = queue_id % sc->num_rx_queues;
643 		} else
644 			queue_id = (j * index_mult);
645 
646 		/*
647 		 * The low 8 bits are for hash value (n+0);
648 		 * The next 8 bits are for hash value (n+1), etc.
649 		 */
650 		reta = reta >> 8;
651 		reta = reta | (((uint32_t)queue_id) << 24);
652 		if ((i & 3) == 3) {
653 			if (i < 128)
654 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
655 			else
656 				IXGBE_WRITE_REG(hw,
657 				    IXGBE_ERETA((i >> 2) - 32), reta);
658 			reta = 0;
659 		}
660 	}
661 
662 	/* Now fill our hash function seeds */
663 	for (i = 0; i < 10; i++)
664 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
665 
666 	/* Perform hash on these packet types */
667 	if (sc->feat_en & IXGBE_FEATURE_RSS)
668 		rss_hash_config = rss_gethashconfig();
669 	else {
670 		/*
671 		 * Disable UDP - IP fragments aren't currently being handled
672 		 * and so we end up with a mix of 2-tuple and 4-tuple
673 		 * traffic.
674 		 */
675 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
676 		    RSS_HASHTYPE_RSS_TCP_IPV4 |
677 		    RSS_HASHTYPE_RSS_IPV6 |
678 		    RSS_HASHTYPE_RSS_TCP_IPV6 |
679 		    RSS_HASHTYPE_RSS_IPV6_EX |
680 		    RSS_HASHTYPE_RSS_TCP_IPV6_EX;
681 	}
682 
683 	mrqc = IXGBE_MRQC_RSSEN;
684 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
685 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
686 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
687 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
688 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
689 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
690 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
691 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
692 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
693 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
694 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
695 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
696 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
697 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
698 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
699 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
700 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
701 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
702 	mrqc |= ixgbe_get_mrqc(sc->iov_mode);
703 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
704 } /* ixgbe_initialize_rss_mapping */
705 
706 /************************************************************************
707  * ixgbe_initialize_receive_units - Setup receive registers and features.
708  ************************************************************************/
709 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
710 
711 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)712 ixgbe_initialize_receive_units(if_ctx_t ctx)
713 {
714 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
715 	if_softc_ctx_t scctx = sc->shared;
716 	struct ixgbe_hw *hw = &sc->hw;
717 	if_t ifp = iflib_get_ifp(ctx);
718 	struct ix_rx_queue *que;
719 	int i, j;
720 	u32 bufsz, fctrl, srrctl, rxcsum;
721 	u32 hlreg;
722 
723 	/*
724 	 * Make sure receives are disabled while
725 	 * setting up the descriptor ring
726 	 */
727 	ixgbe_disable_rx(hw);
728 
729 	/* Enable broadcasts */
730 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
731 	fctrl |= IXGBE_FCTRL_BAM;
732 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
733 		fctrl |= IXGBE_FCTRL_DPF;
734 		fctrl |= IXGBE_FCTRL_PMCF;
735 	}
736 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
737 
738 	/* Set for Jumbo Frames? */
739 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
740 	if (if_getmtu(ifp) > ETHERMTU)
741 		hlreg |= IXGBE_HLREG0_JUMBOEN;
742 	else
743 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
744 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
745 
746 	bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
747 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
748 
749 	/* Setup the Base and Length of the Rx Descriptor Ring */
750 	for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
751 		struct rx_ring *rxr = &que->rxr;
752 		u64 rdba = rxr->rx_paddr;
753 
754 		j = rxr->me;
755 
756 		/* Setup the Base and Length of the Rx Descriptor Ring */
757 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
758 		    (rdba & 0x00000000ffffffffULL));
759 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
760 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
761 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
762 
763 		/* Set up the SRRCTL register */
764 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
765 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
766 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
767 		srrctl |= bufsz;
768 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
769 
770 		/*
771 		 * Set DROP_EN iff we have no flow control and >1 queue.
772 		 * Note that srrctl was cleared shortly before during reset,
773 		 * so we do not need to clear the bit, but do it just in case
774 		 * this code is moved elsewhere.
775 		 */
776 		if (sc->num_rx_queues > 1 &&
777 		    sc->hw.fc.requested_mode == ixgbe_fc_none) {
778 			srrctl |= IXGBE_SRRCTL_DROP_EN;
779 		} else {
780 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
781 		}
782 
783 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
784 
785 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
786 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
787 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
788 
789 		/* Set the driver rx tail address */
790 		rxr->tail =  IXGBE_RDT(rxr->me);
791 	}
792 
793 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
794 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
795 		    IXGBE_PSRTYPE_UDPHDR |
796 		    IXGBE_PSRTYPE_IPV4HDR |
797 		    IXGBE_PSRTYPE_IPV6HDR;
798 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
799 	}
800 
801 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
802 
803 	ixgbe_initialize_rss_mapping(sc);
804 
805 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
806 		/* RSS and RX IPP Checksum are mutually exclusive */
807 		rxcsum |= IXGBE_RXCSUM_PCSD;
808 	}
809 
810 	if (if_getcapenable(ifp) & IFCAP_RXCSUM)
811 		rxcsum |= IXGBE_RXCSUM_PCSD;
812 
813 	/* This is useful for calculating UDP/IP fragment checksums */
814 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
815 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
816 
817 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
818 
819 } /* ixgbe_initialize_receive_units */
820 
821 /************************************************************************
822  * ixgbe_initialize_transmit_units - Enable transmit units.
823  ************************************************************************/
824 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)825 ixgbe_initialize_transmit_units(if_ctx_t ctx)
826 {
827 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
828 	struct ixgbe_hw *hw = &sc->hw;
829 	if_softc_ctx_t scctx = sc->shared;
830 	struct ix_tx_queue *que;
831 	int i;
832 
833 	/* Setup the Base and Length of the Tx Descriptor Ring */
834 	for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
835 	    i++, que++) {
836 		struct tx_ring	   *txr = &que->txr;
837 		u64 tdba = txr->tx_paddr;
838 		u32 txctrl = 0;
839 		int j = txr->me;
840 
841 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
842 		    (tdba & 0x00000000ffffffffULL));
843 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
844 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
845 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
846 
847 		/* Setup the HW Tx Head and Tail descriptor pointers */
848 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
849 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
850 
851 		/* Cache the tail address */
852 		txr->tail = IXGBE_TDT(txr->me);
853 
854 		txr->tx_rs_cidx = txr->tx_rs_pidx;
855 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
856 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
857 			txr->tx_rsq[k] = QIDX_INVALID;
858 
859 		/* Disable Head Writeback */
860 		/*
861 		 * Note: for X550 series devices, these registers are actually
862 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
863 		 * fields remain the same.
864 		 */
865 		switch (hw->mac.type) {
866 		case ixgbe_mac_82598EB:
867 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
868 			break;
869 		default:
870 			txctrl =
871 			    IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
872 			break;
873 		}
874 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
875 		switch (hw->mac.type) {
876 		case ixgbe_mac_82598EB:
877 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
878 			break;
879 		default:
880 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
881 			    txctrl);
882 			break;
883 		}
884 
885 	}
886 
887 	if (hw->mac.type != ixgbe_mac_82598EB) {
888 		u32 dmatxctl, rttdcs;
889 
890 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
891 		dmatxctl |= IXGBE_DMATXCTL_TE;
892 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
893 		/* Disable arbiter to set MTQC */
894 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
895 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
896 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
897 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
898 		    ixgbe_get_mtqc(sc->iov_mode));
899 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
900 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
901 	}
902 
903 } /* ixgbe_initialize_transmit_units */
904 
905 /************************************************************************
906  * ixgbe_register
907  ************************************************************************/
908 static void *
ixgbe_register(device_t dev)909 ixgbe_register(device_t dev)
910 {
911 	return (&ixgbe_sctx_init);
912 } /* ixgbe_register */
913 
914 /************************************************************************
915  * ixgbe_if_attach_pre - Device initialization routine, part 1
916  *
917  *   Called when the driver is being loaded.
918  *   Identifies the type of hardware, initializes the hardware,
919  *   and initializes iflib structures.
920  *
921  *   return 0 on success, positive on failure
922  ************************************************************************/
923 static int
ixgbe_if_attach_pre(if_ctx_t ctx)924 ixgbe_if_attach_pre(if_ctx_t ctx)
925 {
926 	struct ixgbe_softc *sc;
927 	device_t dev;
928 	if_softc_ctx_t scctx;
929 	struct ixgbe_hw *hw;
930 	int error = 0;
931 	u32 ctrl_ext;
932 	size_t i;
933 
934 	INIT_DEBUGOUT("ixgbe_attach: begin");
935 
936 	/* Allocate, clear, and link in our adapter structure */
937 	dev = iflib_get_dev(ctx);
938 	sc = iflib_get_softc(ctx);
939 	sc->hw.back = sc;
940 	sc->ctx = ctx;
941 	sc->dev = dev;
942 	scctx = sc->shared = iflib_get_softc_ctx(ctx);
943 	sc->media = iflib_get_media(ctx);
944 	hw = &sc->hw;
945 
946 	/* Determine hardware revision */
947 	hw->vendor_id = pci_get_vendor(dev);
948 	hw->device_id = pci_get_device(dev);
949 	hw->revision_id = pci_get_revid(dev);
950 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
951 	hw->subsystem_device_id = pci_get_subdevice(dev);
952 
953 	/* Do base PCI setup - map BAR0 */
954 	if (ixgbe_allocate_pci_resources(ctx)) {
955 		device_printf(dev, "Allocation of PCI resources failed\n");
956 		return (ENXIO);
957 	}
958 
959 	/* let hardware know driver is loaded */
960 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
961 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
962 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
963 
964 	/*
965 	 * Initialize the shared code
966 	 */
967 	if (ixgbe_init_shared_code(hw) != 0) {
968 		device_printf(dev, "Unable to initialize the shared code\n");
969 		error = ENXIO;
970 		goto err_pci;
971 	}
972 
973 	if (hw->mac.ops.fw_recovery_mode &&
974 	    hw->mac.ops.fw_recovery_mode(hw)) {
975 		device_printf(dev,
976 		    "Firmware recovery mode detected. Limiting "
977 		    "functionality.\nRefer to the Intel(R) Ethernet Adapters "
978 		    "and Devices User Guide for details on firmware recovery "
979 		    "mode.");
980 		error = ENOSYS;
981 		goto err_pci;
982 	}
983 
984 	/* 82598 Does not support SR-IOV, initialize everything else */
985 	if (hw->mac.type >= ixgbe_mac_82599_vf) {
986 		for (i = 0; i < sc->num_vfs; i++)
987 			hw->mbx.ops[i].init_params(hw);
988 	}
989 
990 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
991 
992 	if (hw->mac.type != ixgbe_mac_82598EB)
993 		hw->phy.smart_speed = ixgbe_smart_speed;
994 
995 	ixgbe_init_device_features(sc);
996 
997 	/* Enable WoL (if supported) */
998 	ixgbe_check_wol_support(sc);
999 
1000 	/* Verify adapter fan is still functional (if applicable) */
1001 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1002 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1003 		ixgbe_check_fan_failure(sc, esdp, false);
1004 	}
1005 
1006 	/* Ensure SW/FW semaphore is free */
1007 	ixgbe_init_swfw_semaphore(hw);
1008 
1009 	/* Set an initial default flow control value */
1010 	hw->fc.requested_mode = ixgbe_flow_control;
1011 
1012 	hw->phy.reset_if_overtemp = true;
1013 	error = ixgbe_reset_hw(hw);
1014 	hw->phy.reset_if_overtemp = false;
1015 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
1016 		/*
1017 		 * No optics in this port, set up
1018 		 * so the timer routine will probe
1019 		 * for later insertion.
1020 		 */
1021 		sc->sfp_probe = true;
1022 		error = 0;
1023 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1024 		device_printf(dev, "Unsupported SFP+ module detected!\n");
1025 		error = EIO;
1026 		goto err_pci;
1027 	} else if (error) {
1028 		device_printf(dev, "Hardware initialization failed\n");
1029 		error = EIO;
1030 		goto err_pci;
1031 	}
1032 
1033 	/* Make sure we have a good EEPROM before we read from it */
1034 	if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1035 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
1036 		error = EIO;
1037 		goto err_pci;
1038 	}
1039 
1040 	error = ixgbe_start_hw(hw);
1041 	switch (error) {
1042 	case IXGBE_ERR_EEPROM_VERSION:
1043 		device_printf(dev,
1044 		    "This device is a pre-production adapter/LOM.  Please be"
1045 		    " aware there may be issues associated with your"
1046 		    " hardware.\nIf you are experiencing problems please"
1047 		    " contact your Intel or hardware representative who"
1048 		    " provided you with this hardware.\n");
1049 		break;
1050 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
1051 		device_printf(dev, "Unsupported SFP+ Module\n");
1052 		error = EIO;
1053 		goto err_pci;
1054 	case IXGBE_ERR_SFP_NOT_PRESENT:
1055 		device_printf(dev, "No SFP+ Module found\n");
1056 		/* falls thru */
1057 	default:
1058 		break;
1059 	}
1060 
1061 	/* Most of the iflib initialization... */
1062 
1063 	iflib_set_mac(ctx, hw->mac.addr);
1064 	switch (sc->hw.mac.type) {
1065 	case ixgbe_mac_X550:
1066 	case ixgbe_mac_X550EM_x:
1067 	case ixgbe_mac_X550EM_a:
1068 		scctx->isc_rss_table_size = 512;
1069 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1070 		break;
1071 	default:
1072 		scctx->isc_rss_table_size = 128;
1073 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1074 	}
1075 
1076 	/* Allow legacy interrupts */
1077 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1078 
1079 	scctx->isc_txqsizes[0] =
1080 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1081 	    sizeof(u32), DBA_ALIGN),
1082 	scctx->isc_rxqsizes[0] =
1083 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1084 	    DBA_ALIGN);
1085 
1086 	/* XXX */
1087 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1088 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1089 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1090 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1091 	} else {
1092 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1093 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1094 	}
1095 
1096 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1097 
1098 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1099 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1100 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1101 
1102 	scctx->isc_txrx = &ixgbe_txrx;
1103 
1104 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1105 
1106 	return (0);
1107 
1108 err_pci:
1109 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1110 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1111 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1112 	ixgbe_free_pci_resources(ctx);
1113 
1114 	return (error);
1115 } /* ixgbe_if_attach_pre */
1116 
1117  /*********************************************************************
1118  * ixgbe_if_attach_post - Device initialization routine, part 2
1119  *
1120  *   Called during driver load, but after interrupts and
1121  *   resources have been allocated and configured.
1122  *   Sets up some data structures not relevant to iflib.
1123  *
1124  *   return 0 on success, positive on failure
1125  *********************************************************************/
1126 static int
ixgbe_if_attach_post(if_ctx_t ctx)1127 ixgbe_if_attach_post(if_ctx_t ctx)
1128 {
1129 	device_t dev;
1130 	struct ixgbe_softc *sc;
1131 	struct ixgbe_hw *hw;
1132 	int error = 0;
1133 
1134 	dev = iflib_get_dev(ctx);
1135 	sc = iflib_get_softc(ctx);
1136 	hw = &sc->hw;
1137 
1138 	if (sc->intr_type == IFLIB_INTR_LEGACY &&
1139 		(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1140 		device_printf(dev, "Device does not support legacy interrupts");
1141 		error = ENXIO;
1142 		goto err;
1143 	}
1144 
1145 	/* Allocate multicast array memory. */
1146 	sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
1147 	    M_IXGBE, M_NOWAIT);
1148 	if (sc->mta == NULL) {
1149 		device_printf(dev,
1150 		    "Can not allocate multicast setup array\n");
1151 		error = ENOMEM;
1152 		goto err;
1153 	}
1154 
1155 	/* hw.ix defaults init */
1156 	ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1157 
1158 	/* Enable the optics for 82599 SFP+ fiber */
1159 	ixgbe_enable_tx_laser(hw);
1160 
1161 	/* Enable power to the phy. */
1162 	ixgbe_set_phy_power(hw, true);
1163 
1164 	ixgbe_initialize_iov(sc);
1165 
1166 	error = ixgbe_setup_interface(ctx);
1167 	if (error) {
1168 		device_printf(dev, "Interface setup failed: %d\n", error);
1169 		goto err;
1170 	}
1171 
1172 	ixgbe_if_update_admin_status(ctx);
1173 
1174 	/* Initialize statistics */
1175 	ixgbe_update_stats_counters(sc);
1176 	ixgbe_add_hw_stats(sc);
1177 
1178 	/* Check PCIE slot type/speed/width */
1179 	ixgbe_get_slot_info(sc);
1180 
1181 	/*
1182 	 * Do time init and sysctl init here, but
1183 	 * only on the first port of a bypass sc.
1184 	 */
1185 	ixgbe_bypass_init(sc);
1186 
1187 	/* Display NVM and Option ROM versions */
1188 	ixgbe_print_fw_version(ctx);
1189 
1190 	/* Set an initial dmac value */
1191 	sc->dmac = 0;
1192 	/* Set initial advertised speeds (if applicable) */
1193 	sc->advertise = ixgbe_get_default_advertise(sc);
1194 
1195 	if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1196 		ixgbe_define_iov_schemas(dev, &error);
1197 
1198 	/* Add sysctls */
1199 	ixgbe_add_device_sysctls(ctx);
1200 
1201 	/* Init recovery mode timer and state variable */
1202 	if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1203 		sc->recovery_mode = 0;
1204 
1205 		/* Set up the timer callout */
1206 		callout_init(&sc->fw_mode_timer, true);
1207 
1208 		/* Start the task */
1209 		callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1210 	}
1211 
1212 	return (0);
1213 err:
1214 	return (error);
1215 } /* ixgbe_if_attach_post */
1216 
1217 /************************************************************************
1218  * ixgbe_check_wol_support
1219  *
1220  *   Checks whether the adapter's ports are capable of
1221  *   Wake On LAN by reading the adapter's NVM.
1222  *
1223  *   Sets each port's hw->wol_enabled value depending
1224  *   on the value read here.
1225  ************************************************************************/
1226 static void
ixgbe_check_wol_support(struct ixgbe_softc * sc)1227 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1228 {
1229 	struct ixgbe_hw *hw = &sc->hw;
1230 	u16 dev_caps = 0;
1231 
1232 	/* Find out WoL support for port */
1233 	sc->wol_support = hw->wol_enabled = 0;
1234 	ixgbe_get_device_caps(hw, &dev_caps);
1235 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1236 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1237 	     hw->bus.func == 0))
1238 		sc->wol_support = hw->wol_enabled = 1;
1239 
1240 	/* Save initial wake up filter configuration */
1241 	sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1242 
1243 	return;
1244 } /* ixgbe_check_wol_support */
1245 
1246 /************************************************************************
1247  * ixgbe_setup_interface
1248  *
1249  *   Setup networking device structure and register an interface.
1250  ************************************************************************/
1251 static int
ixgbe_setup_interface(if_ctx_t ctx)1252 ixgbe_setup_interface(if_ctx_t ctx)
1253 {
1254 	if_t ifp = iflib_get_ifp(ctx);
1255 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1256 
1257 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1258 
1259 	if_setbaudrate(ifp, IF_Gbps(10));
1260 
1261 	sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1262 
1263 	sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1264 
1265 	ixgbe_add_media_types(ctx);
1266 
1267 	/* Autoselect media by default */
1268 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1269 
1270 	return (0);
1271 } /* ixgbe_setup_interface */
1272 
1273 /************************************************************************
1274  * ixgbe_if_get_counter
1275  ************************************************************************/
1276 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1277 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1278 {
1279 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1280 	if_t ifp = iflib_get_ifp(ctx);
1281 
1282 	switch (cnt) {
1283 	case IFCOUNTER_IPACKETS:
1284 		return (sc->ipackets);
1285 	case IFCOUNTER_OPACKETS:
1286 		return (sc->opackets);
1287 	case IFCOUNTER_IBYTES:
1288 		return (sc->ibytes);
1289 	case IFCOUNTER_OBYTES:
1290 		return (sc->obytes);
1291 	case IFCOUNTER_IMCASTS:
1292 		return (sc->imcasts);
1293 	case IFCOUNTER_OMCASTS:
1294 		return (sc->omcasts);
1295 	case IFCOUNTER_COLLISIONS:
1296 		return (0);
1297 	case IFCOUNTER_IQDROPS:
1298 		return (sc->iqdrops);
1299 	case IFCOUNTER_OQDROPS:
1300 		return (0);
1301 	case IFCOUNTER_IERRORS:
1302 		return (sc->ierrors);
1303 	default:
1304 		return (if_get_counter_default(ifp, cnt));
1305 	}
1306 } /* ixgbe_if_get_counter */
1307 
1308 /************************************************************************
1309  * ixgbe_if_i2c_req
1310  ************************************************************************/
1311 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1312 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1313 {
1314 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1315 	struct ixgbe_hw *hw = &sc->hw;
1316 	int i;
1317 
1318 	if (hw->phy.ops.read_i2c_byte == NULL)
1319 		return (ENXIO);
1320 	for (i = 0; i < req->len; i++)
1321 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1322 		    req->dev_addr, &req->data[i]);
1323 	return (0);
1324 } /* ixgbe_if_i2c_req */
1325 
1326 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
1327  * reinitialized
1328  * @ctx: iflib context
1329  * @event: event code to check
1330  *
1331  * Defaults to returning false for unknown events.
1332  *
1333  * @returns true if iflib needs to reinit the interface
1334  */
1335 static bool
ixgbe_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1336 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1337 {
1338 	switch (event) {
1339 	case IFLIB_RESTART_VLAN_CONFIG:
1340 	default:
1341 		return (false);
1342 	}
1343 }
1344 
1345 /************************************************************************
1346  * ixgbe_add_media_types
1347  ************************************************************************/
1348 static void
ixgbe_add_media_types(if_ctx_t ctx)1349 ixgbe_add_media_types(if_ctx_t ctx)
1350 {
1351 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1352 	struct ixgbe_hw *hw = &sc->hw;
1353 	device_t dev = iflib_get_dev(ctx);
1354 	u64 layer;
1355 
1356 	layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1357 
1358 	/* Media types with matching FreeBSD media defines */
1359 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1360 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1361 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1362 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1363 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1364 		ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1365 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1366 		ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1367 
1368 	if (hw->mac.type == ixgbe_mac_X550) {
1369 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1370 		ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1371 	}
1372 
1373 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1374 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1375 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1376 		    NULL);
1377 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1378 	}
1379 
1380 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1381 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1382 		if (hw->phy.multispeed_fiber)
1383 			ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1384 			    NULL);
1385 	}
1386 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1387 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1388 		if (hw->phy.multispeed_fiber)
1389 			ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1390 			    NULL);
1391 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1392 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1393 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1394 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1395 
1396 #ifdef IFM_ETH_XTYPE
1397 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1398 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1399 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1400 		ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1401 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1402 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1403 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1404 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1405 #else
1406 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1407 		device_printf(dev, "Media supported: 10GbaseKR\n");
1408 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1409 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1410 	}
1411 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1412 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1413 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1414 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1415 	}
1416 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1417 		device_printf(dev, "Media supported: 1000baseKX\n");
1418 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1419 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1420 	}
1421 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1422 		device_printf(dev, "Media supported: 2500baseKX\n");
1423 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1424 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1425 	}
1426 #endif
1427 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1428 		device_printf(dev, "Media supported: 1000baseBX\n");
1429 
1430 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1431 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1432 		    0, NULL);
1433 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1434 	}
1435 
1436 	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1437 } /* ixgbe_add_media_types */
1438 
1439 /************************************************************************
1440  * ixgbe_is_sfp
1441  ************************************************************************/
1442 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1443 ixgbe_is_sfp(struct ixgbe_hw *hw)
1444 {
1445 	switch (hw->mac.type) {
1446 	case ixgbe_mac_82598EB:
1447 		if (hw->phy.type == ixgbe_phy_nl)
1448 			return (true);
1449 		return (false);
1450 	case ixgbe_mac_82599EB:
1451 		switch (hw->mac.ops.get_media_type(hw)) {
1452 		case ixgbe_media_type_fiber:
1453 		case ixgbe_media_type_fiber_qsfp:
1454 			return (true);
1455 		default:
1456 			return (false);
1457 		}
1458 	case ixgbe_mac_X550EM_x:
1459 	case ixgbe_mac_X550EM_a:
1460 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1461 			return (true);
1462 		return (false);
1463 	default:
1464 		return (false);
1465 	}
1466 } /* ixgbe_is_sfp */
1467 
1468 /************************************************************************
1469  * ixgbe_config_link
1470  ************************************************************************/
1471 static void
ixgbe_config_link(if_ctx_t ctx)1472 ixgbe_config_link(if_ctx_t ctx)
1473 {
1474 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1475 	struct ixgbe_hw *hw = &sc->hw;
1476 	u32 autoneg, err = 0;
1477 	bool sfp, negotiate;
1478 
1479 	sfp = ixgbe_is_sfp(hw);
1480 
1481 	if (sfp) {
1482 		sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1483 		iflib_admin_intr_deferred(ctx);
1484 	} else {
1485 		if (hw->mac.ops.check_link)
1486 			err = ixgbe_check_link(hw, &sc->link_speed,
1487 			    &sc->link_up, false);
1488 		if (err)
1489 			return;
1490 		autoneg = hw->phy.autoneg_advertised;
1491 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1492 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1493 			    &negotiate);
1494 		if (err)
1495 			return;
1496 
1497 		if (hw->mac.type == ixgbe_mac_X550 &&
1498 		    hw->phy.autoneg_advertised == 0) {
1499 			/*
1500 			 * 2.5G and 5G autonegotiation speeds on X550
1501 			 * are disabled by default due to reported
1502 			 * interoperability issues with some switches.
1503 			 *
1504 			 * The second condition checks if any operations
1505 			 * involving setting autonegotiation speeds have
1506 			 * been performed prior to this ixgbe_config_link()
1507 			 * call.
1508 			 *
1509 			 * If hw->phy.autoneg_advertised does not
1510 			 * equal 0, this means that the user might have
1511 			 * set autonegotiation speeds via the sysctl
1512 			 * before bringing the interface up. In this
1513 			 * case, we should not disable 2.5G and 5G
1514 			 * since that speeds might be selected by the
1515 			 * user.
1516 			 *
1517 			 * Otherwise (i.e. if hw->phy.autoneg_advertised
1518 			 * is set to 0), it is the first time we set
1519 			 * autonegotiation preferences and the default
1520 			 * set of speeds should exclude 2.5G and 5G.
1521 			 */
1522 			autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1523 			    IXGBE_LINK_SPEED_5GB_FULL);
1524 		}
1525 
1526 		if (hw->mac.ops.setup_link)
1527 			err = hw->mac.ops.setup_link(hw, autoneg,
1528 			    sc->link_up);
1529 	}
1530 } /* ixgbe_config_link */
1531 
1532 /************************************************************************
1533  * ixgbe_update_stats_counters - Update board statistics counters.
1534  ************************************************************************/
1535 static void
ixgbe_update_stats_counters(struct ixgbe_softc * sc)1536 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1537 {
1538 	struct ixgbe_hw *hw = &sc->hw;
1539 	struct ixgbe_hw_stats *stats = &sc->stats.pf;
1540 	u32 missed_rx = 0, bprc, lxon, lxoff, total;
1541 	u32 lxoffrxc;
1542 	u64 total_missed_rx = 0;
1543 
1544 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1545 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1546 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1547 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1548 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1549 
1550 	for (int i = 0; i < 16; i++) {
1551 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1552 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1553 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1554 	}
1555 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1556 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1557 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1558 
1559 	/* Hardware workaround, gprc counts missed packets */
1560 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1561 	stats->gprc -= missed_rx;
1562 
1563 	if (hw->mac.type != ixgbe_mac_82598EB) {
1564 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1565 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1566 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1567 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1568 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1569 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1570 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1571 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1572 		stats->lxoffrxc += lxoffrxc;
1573 	} else {
1574 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1575 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1576 		stats->lxoffrxc += lxoffrxc;
1577 		/* 82598 only has a counter in the high register */
1578 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1579 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1580 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1581 	}
1582 
1583 	/*
1584 	 * For watchdog management we need to know if we have been paused
1585 	 * during the last interval, so capture that here.
1586 	*/
1587 	if (lxoffrxc)
1588 		sc->shared->isc_pause_frames = 1;
1589 
1590 	/*
1591 	 * Workaround: mprc hardware is incorrectly counting
1592 	 * broadcasts, so for now we subtract those.
1593 	 */
1594 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1595 	stats->bprc += bprc;
1596 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1597 	if (hw->mac.type == ixgbe_mac_82598EB)
1598 		stats->mprc -= bprc;
1599 
1600 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1601 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1602 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1603 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1604 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1605 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1606 
1607 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1608 	stats->lxontxc += lxon;
1609 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1610 	stats->lxofftxc += lxoff;
1611 	total = lxon + lxoff;
1612 
1613 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1614 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1615 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1616 	stats->gptc -= total;
1617 	stats->mptc -= total;
1618 	stats->ptc64 -= total;
1619 	stats->gotc -= total * ETHER_MIN_LEN;
1620 
1621 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1622 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1623 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1624 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1625 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1626 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1627 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1628 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1629 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1630 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1631 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1632 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1633 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1634 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1635 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1636 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1637 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1638 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1639 	/* Only read FCOE on 82599 */
1640 	if (hw->mac.type != ixgbe_mac_82598EB) {
1641 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1642 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1643 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1644 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1645 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1646 	}
1647 
1648 	/* Fill out the OS statistics structure */
1649 	IXGBE_SET_IPACKETS(sc, stats->gprc);
1650 	IXGBE_SET_OPACKETS(sc, stats->gptc);
1651 	IXGBE_SET_IBYTES(sc, stats->gorc);
1652 	IXGBE_SET_OBYTES(sc, stats->gotc);
1653 	IXGBE_SET_IMCASTS(sc, stats->mprc);
1654 	IXGBE_SET_OMCASTS(sc, stats->mptc);
1655 	IXGBE_SET_COLLISIONS(sc, 0);
1656 	IXGBE_SET_IQDROPS(sc, total_missed_rx);
1657 
1658 	/*
1659 	 * Aggregate following types of errors as RX errors:
1660 	 * - CRC error count,
1661 	 * - illegal byte error count,
1662 	 * - missed packets count,
1663 	 * - length error count,
1664 	 * - undersized packets count,
1665 	 * - fragmented packets count,
1666 	 * - oversized packets count,
1667 	 * - jabber count.
1668 	 */
1669 	IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1670 	    stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
1671 	    stats->roc + stats->rjc);
1672 } /* ixgbe_update_stats_counters */
1673 
1674 /************************************************************************
1675  * ixgbe_add_hw_stats
1676  *
1677  *   Add sysctl variables, one per statistic, to the system.
1678  ************************************************************************/
1679 static void
ixgbe_add_hw_stats(struct ixgbe_softc * sc)1680 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1681 {
1682 	device_t dev = iflib_get_dev(sc->ctx);
1683 	struct ix_rx_queue *rx_que;
1684 	struct ix_tx_queue *tx_que;
1685 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1686 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1687 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1688 	struct ixgbe_hw_stats *stats = &sc->stats.pf;
1689 	struct sysctl_oid *stat_node, *queue_node;
1690 	struct sysctl_oid_list *stat_list, *queue_list;
1691 	int i;
1692 
1693 #define QUEUE_NAME_LEN 32
1694 	char namebuf[QUEUE_NAME_LEN];
1695 
1696 	/* Driver Statistics */
1697 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1698 	    CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1699 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1700 	    CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1701 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1702 	    CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1703 
1704 	for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
1705 	    i++, tx_que++) {
1706 		struct tx_ring *txr = &tx_que->txr;
1707 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1708 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1709 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1710 		queue_list = SYSCTL_CHILDREN(queue_node);
1711 
1712 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1713 		    CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1714 		    ixgbe_sysctl_tdh_handler, "IU",
1715 		    "Transmit Descriptor Head");
1716 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1717 		    CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1718 		    ixgbe_sysctl_tdt_handler, "IU",
1719 		    "Transmit Descriptor Tail");
1720 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1721 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1722 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1723 		    CTLFLAG_RD, &txr->total_packets,
1724 		    "Queue Packets Transmitted");
1725 	}
1726 
1727 	for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
1728 	    i++, rx_que++) {
1729 		struct rx_ring *rxr = &rx_que->rxr;
1730 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1731 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1732 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1733 		queue_list = SYSCTL_CHILDREN(queue_node);
1734 
1735 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1736 		    CTLTYPE_UINT | CTLFLAG_RW,
1737 		    &sc->rx_queues[i], 0,
1738 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1739 		    "Interrupt Rate");
1740 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1741 		    CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1742 		    "irqs on this queue");
1743 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1744 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1745 		    ixgbe_sysctl_rdh_handler, "IU",
1746 		    "Receive Descriptor Head");
1747 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1748 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1749 		    ixgbe_sysctl_rdt_handler, "IU",
1750 		    "Receive Descriptor Tail");
1751 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1752 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1753 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1754 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1755 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1756 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1757 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1758 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1759 	}
1760 
1761 	/* MAC stats get their own sub node */
1762 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1763 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1764 	stat_list = SYSCTL_CHILDREN(stat_node);
1765 
1766 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1767 	    CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1768 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1769 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1770 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1771 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1772 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1773 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1774 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1775 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1776 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1777 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1778 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1779 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1780 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1781 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1782 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1783 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1784 
1785 	/* Flow Control stats */
1786 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1787 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1788 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1789 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1790 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1791 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1792 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1793 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1794 
1795 	/* Packet Reception Stats */
1796 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1797 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1798 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1799 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1800 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1801 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1802 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1803 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1804 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1805 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1806 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1807 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1808 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1809 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1810 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1811 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1812 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1813 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1814 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1815 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1816 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1817 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1818 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1819 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1820 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1821 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1822 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1823 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1824 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1825 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1826 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1827 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1828 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1829 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1830 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1831 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1832 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1833 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1834 
1835 	/* Packet Transmission Stats */
1836 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1837 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1838 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1839 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1840 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1841 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1842 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1843 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1844 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1845 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1846 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1847 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1848 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1849 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1850 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1851 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1852 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1853 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1854 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1855 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1856 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1857 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1858 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1859 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1860 } /* ixgbe_add_hw_stats */
1861 
1862 /************************************************************************
1863  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1864  *
1865  *   Retrieves the TDH value from the hardware
1866  ************************************************************************/
1867 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)1868 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1869 {
1870 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1871 	int error;
1872 	unsigned int val;
1873 
1874 	if (!txr)
1875 		return (0);
1876 
1877 
1878 	if (atomic_load_acq_int(&txr->sc->recovery_mode))
1879 		return (EPERM);
1880 
1881 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1882 	error = sysctl_handle_int(oidp, &val, 0, req);
1883 	if (error || !req->newptr)
1884 		return error;
1885 
1886 	return (0);
1887 } /* ixgbe_sysctl_tdh_handler */
1888 
1889 /************************************************************************
1890  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1891  *
1892  *   Retrieves the TDT value from the hardware
1893  ************************************************************************/
1894 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)1895 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1896 {
1897 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1898 	int error;
1899 	unsigned int val;
1900 
1901 	if (!txr)
1902 		return (0);
1903 
1904 	if (atomic_load_acq_int(&txr->sc->recovery_mode))
1905 		return (EPERM);
1906 
1907 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1908 	error = sysctl_handle_int(oidp, &val, 0, req);
1909 	if (error || !req->newptr)
1910 		return error;
1911 
1912 	return (0);
1913 } /* ixgbe_sysctl_tdt_handler */
1914 
1915 /************************************************************************
1916  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1917  *
1918  *   Retrieves the RDH value from the hardware
1919  ************************************************************************/
1920 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)1921 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1922 {
1923 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1924 	int error;
1925 	unsigned int val;
1926 
1927 	if (!rxr)
1928 		return (0);
1929 
1930 	if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1931 		return (EPERM);
1932 
1933 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1934 	error = sysctl_handle_int(oidp, &val, 0, req);
1935 	if (error || !req->newptr)
1936 		return error;
1937 
1938 	return (0);
1939 } /* ixgbe_sysctl_rdh_handler */
1940 
1941 /************************************************************************
1942  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1943  *
1944  *   Retrieves the RDT value from the hardware
1945  ************************************************************************/
1946 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)1947 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1948 {
1949 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1950 	int error;
1951 	unsigned int val;
1952 
1953 	if (!rxr)
1954 		return (0);
1955 
1956 	if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1957 		return (EPERM);
1958 
1959 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1960 	error = sysctl_handle_int(oidp, &val, 0, req);
1961 	if (error || !req->newptr)
1962 		return error;
1963 
1964 	return (0);
1965 } /* ixgbe_sysctl_rdt_handler */
1966 
1967 /************************************************************************
1968  * ixgbe_if_vlan_register
1969  *
1970  *   Run via vlan config EVENT, it enables us to use the
1971  *   HW Filter table since we can get the vlan id. This
1972  *   just creates the entry in the soft version of the
1973  *   VFTA, init will repopulate the real table.
1974  ************************************************************************/
1975 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)1976 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1977 {
1978 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1979 	u16 index, bit;
1980 
1981 	index = (vtag >> 5) & 0x7F;
1982 	bit = vtag & 0x1F;
1983 	sc->shadow_vfta[index] |= (1 << bit);
1984 	++sc->num_vlans;
1985 	ixgbe_setup_vlan_hw_support(ctx);
1986 } /* ixgbe_if_vlan_register */
1987 
1988 /************************************************************************
1989  * ixgbe_if_vlan_unregister
1990  *
1991  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1992  ************************************************************************/
1993 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)1994 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1995 {
1996 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1997 	u16 index, bit;
1998 
1999 	index = (vtag >> 5) & 0x7F;
2000 	bit = vtag & 0x1F;
2001 	sc->shadow_vfta[index] &= ~(1 << bit);
2002 	--sc->num_vlans;
2003 	/* Re-init to load the changes */
2004 	ixgbe_setup_vlan_hw_support(ctx);
2005 } /* ixgbe_if_vlan_unregister */
2006 
2007 /************************************************************************
2008  * ixgbe_setup_vlan_hw_support
2009  ************************************************************************/
2010 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)2011 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
2012 {
2013 	if_t ifp = iflib_get_ifp(ctx);
2014 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2015 	struct ixgbe_hw *hw = &sc->hw;
2016 	struct rx_ring *rxr;
2017 	int i;
2018 	u32 ctrl;
2019 
2020 
2021 	/*
2022 	 * We get here thru init_locked, meaning
2023 	 * a soft reset, this has already cleared
2024 	 * the VFTA and other state, so if there
2025 	 * have been no vlan's registered do nothing.
2026 	 */
2027 	if (sc->num_vlans == 0 ||
2028 	    (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
2029 		/* Clear the vlan hw flag */
2030 		for (i = 0; i < sc->num_rx_queues; i++) {
2031 			rxr = &sc->rx_queues[i].rxr;
2032 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
2033 			if (hw->mac.type != ixgbe_mac_82598EB) {
2034 				ctrl = IXGBE_READ_REG(hw,
2035 				    IXGBE_RXDCTL(rxr->me));
2036 				ctrl &= ~IXGBE_RXDCTL_VME;
2037 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2038 				    ctrl);
2039 			}
2040 			rxr->vtag_strip = false;
2041 		}
2042 		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2043 		/* Enable the Filter Table if enabled */
2044 		ctrl |= IXGBE_VLNCTRL_CFIEN;
2045 		ctrl &= ~IXGBE_VLNCTRL_VFE;
2046 		if (hw->mac.type == ixgbe_mac_82598EB)
2047 			ctrl &= ~IXGBE_VLNCTRL_VME;
2048 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2049 		return;
2050 	}
2051 
2052 	/* Setup the queues for vlans */
2053 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
2054 		for (i = 0; i < sc->num_rx_queues; i++) {
2055 			rxr = &sc->rx_queues[i].rxr;
2056 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
2057 			if (hw->mac.type != ixgbe_mac_82598EB) {
2058 				ctrl = IXGBE_READ_REG(hw,
2059 				    IXGBE_RXDCTL(rxr->me));
2060 				ctrl |= IXGBE_RXDCTL_VME;
2061 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2062 				    ctrl);
2063 			}
2064 			rxr->vtag_strip = true;
2065 		}
2066 	}
2067 
2068 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2069 		return;
2070 	/*
2071 	 * A soft reset zero's out the VFTA, so
2072 	 * we need to repopulate it now.
2073 	 */
2074 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2075 		if (sc->shadow_vfta[i] != 0)
2076 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2077 			    sc->shadow_vfta[i]);
2078 
2079 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2080 	/* Enable the Filter Table if enabled */
2081 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2082 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2083 		ctrl |= IXGBE_VLNCTRL_VFE;
2084 	}
2085 	if (hw->mac.type == ixgbe_mac_82598EB)
2086 		ctrl |= IXGBE_VLNCTRL_VME;
2087 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2088 } /* ixgbe_setup_vlan_hw_support */
2089 
2090 /************************************************************************
2091  * ixgbe_get_slot_info
2092  *
2093  *   Get the width and transaction speed of
2094  *   the slot this adapter is plugged into.
2095  ************************************************************************/
2096 static void
ixgbe_get_slot_info(struct ixgbe_softc * sc)2097 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2098 {
2099 	device_t dev = iflib_get_dev(sc->ctx);
2100 	struct ixgbe_hw *hw = &sc->hw;
2101 	int bus_info_valid = true;
2102 	u32 offset;
2103 	u16 link;
2104 
2105 	/* Some devices are behind an internal bridge */
2106 	switch (hw->device_id) {
2107 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
2108 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2109 		goto get_parent_info;
2110 	default:
2111 		break;
2112 	}
2113 
2114 	ixgbe_get_bus_info(hw);
2115 
2116 	/*
2117 	 * Some devices don't use PCI-E, but there is no need
2118 	 * to display "Unknown" for bus speed and width.
2119 	 */
2120 	switch (hw->mac.type) {
2121 	case ixgbe_mac_X550EM_x:
2122 	case ixgbe_mac_X550EM_a:
2123 		return;
2124 	default:
2125 		goto display;
2126 	}
2127 
2128 get_parent_info:
2129 	/*
2130 	 * For the Quad port adapter we need to parse back
2131 	 * up the PCI tree to find the speed of the expansion
2132 	 * slot into which this adapter is plugged. A bit more work.
2133 	 */
2134 	dev = device_get_parent(device_get_parent(dev));
2135 #ifdef IXGBE_DEBUG
2136 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2137 	    pci_get_slot(dev), pci_get_function(dev));
2138 #endif
2139 	dev = device_get_parent(device_get_parent(dev));
2140 #ifdef IXGBE_DEBUG
2141 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2142 	    pci_get_slot(dev), pci_get_function(dev));
2143 #endif
2144 	/* Now get the PCI Express Capabilities offset */
2145 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2146 		/*
2147 		 * Hmm...can't get PCI-Express capabilities.
2148 		 * Falling back to default method.
2149 		 */
2150 		bus_info_valid = false;
2151 		ixgbe_get_bus_info(hw);
2152 		goto display;
2153 	}
2154 	/* ...and read the Link Status Register */
2155 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2156 	ixgbe_set_pci_config_data_generic(hw, link);
2157 
2158 display:
2159 	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2160 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
2161 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
2162 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
2163 	     "Unknown"),
2164 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2165 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2166 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2167 	     "Unknown"));
2168 
2169 	if (bus_info_valid) {
2170 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2171 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2172 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
2173 			device_printf(dev,
2174 			    "PCI-Express bandwidth available for this card"
2175 			    " is not sufficient for optimal performance.\n");
2176 			device_printf(dev,
2177 			    "For optimal performance a x8 PCIE, or x4 PCIE"
2178 			    " Gen2 slot is required.\n");
2179 		}
2180 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2181 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2182 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
2183 			device_printf(dev,
2184 			    "PCI-Express bandwidth available for this card"
2185 			    " is not sufficient for optimal performance.\n");
2186 			device_printf(dev,
2187 			    "For optimal performance a x8 PCIE Gen3 slot is"
2188 			    " required.\n");
2189 		}
2190 	} else
2191 		device_printf(dev,
2192 		    "Unable to determine slot speed/width. The speed/width"
2193 		    " reported are that of the internal switch.\n");
2194 
2195 	return;
2196 } /* ixgbe_get_slot_info */
2197 
2198 /************************************************************************
2199  * ixgbe_if_msix_intr_assign
2200  *
2201  *   Setup MSI-X Interrupt resources and handlers
2202  ************************************************************************/
2203 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)2204 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2205 {
2206 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2207 	struct ix_rx_queue *rx_que = sc->rx_queues;
2208 	struct ix_tx_queue *tx_que;
2209 	int error, rid, vector = 0;
2210 	char buf[16];
2211 
2212 	/* Admin Que is vector 0*/
2213 	rid = vector + 1;
2214 	for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2215 		rid = vector + 1;
2216 
2217 		snprintf(buf, sizeof(buf), "rxq%d", i);
2218 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2219 		    IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
2220 		    buf);
2221 
2222 		if (error) {
2223 			device_printf(iflib_get_dev(ctx),
2224 			    "Failed to allocate que int %d err: %d",
2225 			    i,error);
2226 			sc->num_rx_queues = i + 1;
2227 			goto fail;
2228 		}
2229 
2230 		rx_que->msix = vector;
2231 	}
2232 	for (int i = 0; i < sc->num_tx_queues; i++) {
2233 		snprintf(buf, sizeof(buf), "txq%d", i);
2234 		tx_que = &sc->tx_queues[i];
2235 		tx_que->msix = i % sc->num_rx_queues;
2236 		iflib_softirq_alloc_generic(ctx,
2237 		    &sc->rx_queues[tx_que->msix].que_irq,
2238 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2239 	}
2240 	rid = vector + 1;
2241 	error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2242 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2243 	if (error) {
2244 		device_printf(iflib_get_dev(ctx),
2245 		    "Failed to register admin handler");
2246 		return (error);
2247 	}
2248 
2249 	sc->vector = vector;
2250 
2251 	return (0);
2252 fail:
2253 	iflib_irq_free(ctx, &sc->irq);
2254 	rx_que = sc->rx_queues;
2255 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2256 		iflib_irq_free(ctx, &rx_que->que_irq);
2257 
2258 	return (error);
2259 } /* ixgbe_if_msix_intr_assign */
2260 
2261 static inline void
ixgbe_perform_aim(struct ixgbe_softc * sc,struct ix_rx_queue * que)2262 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2263 {
2264 	uint32_t newitr = 0;
2265 	struct rx_ring *rxr = &que->rxr;
2266 	/* FIXME struct tx_ring *txr = ... ->txr; */
2267 
2268 	/*
2269 	 * Do Adaptive Interrupt Moderation:
2270 	 *  - Write out last calculated setting
2271 	 *  - Calculate based on average size over
2272 	 *    the last interval.
2273 	 */
2274 	if (que->eitr_setting) {
2275 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2276 		    que->eitr_setting);
2277 	}
2278 
2279 	que->eitr_setting = 0;
2280 	/* Idle, do nothing */
2281 	if (rxr->bytes == 0) {
2282 		/* FIXME && txr->bytes == 0 */
2283 		return;
2284 	}
2285 
2286 	if ((rxr->bytes) && (rxr->packets))
2287 		newitr = rxr->bytes / rxr->packets;
2288 	/* FIXME for transmit accounting
2289 	 * if ((txr->bytes) && (txr->packets))
2290 	 * 	newitr = txr->bytes/txr->packets;
2291 	 * if ((rxr->bytes) && (rxr->packets))
2292 	 * 	newitr = max(newitr, (rxr->bytes / rxr->packets));
2293 	 */
2294 
2295 	newitr += 24; /* account for hardware frame, crc */
2296 	/* set an upper boundary */
2297 	newitr = min(newitr, 3000);
2298 
2299 	/* Be nice to the mid range */
2300 	if ((newitr > 300) && (newitr < 1200)) {
2301 		newitr = (newitr / 3);
2302 	} else {
2303 		newitr = (newitr / 2);
2304 	}
2305 
2306 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2307 		newitr |= newitr << 16;
2308 	} else {
2309 		newitr |= IXGBE_EITR_CNT_WDIS;
2310 	}
2311 
2312 	/* save for next interrupt */
2313 	que->eitr_setting = newitr;
2314 
2315 	/* Reset state */
2316 	/* FIXME txr->bytes = 0; */
2317 	/* FIXME txr->packets = 0; */
2318 	rxr->bytes = 0;
2319 	rxr->packets = 0;
2320 
2321 	return;
2322 }
2323 
2324 /*********************************************************************
2325  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2326  **********************************************************************/
2327 static int
ixgbe_msix_que(void * arg)2328 ixgbe_msix_que(void *arg)
2329 {
2330 	struct ix_rx_queue *que = arg;
2331 	struct ixgbe_softc *sc = que->sc;
2332 	if_t ifp = iflib_get_ifp(que->sc->ctx);
2333 
2334 	/* Protect against spurious interrupts */
2335 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2336 		return (FILTER_HANDLED);
2337 
2338 	ixgbe_disable_queue(sc, que->msix);
2339 	++que->irqs;
2340 
2341 	/* Check for AIM */
2342 	if (sc->enable_aim) {
2343 		ixgbe_perform_aim(sc, que);
2344 	}
2345 
2346 	return (FILTER_SCHEDULE_THREAD);
2347 } /* ixgbe_msix_que */
2348 
2349 /************************************************************************
2350  * ixgbe_media_status - Media Ioctl callback
2351  *
2352  *   Called whenever the user queries the status of
2353  *   the interface using ifconfig.
2354  ************************************************************************/
2355 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2356 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2357 {
2358 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2359 	struct ixgbe_hw *hw = &sc->hw;
2360 	int layer;
2361 
2362 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2363 
2364 	ifmr->ifm_status = IFM_AVALID;
2365 	ifmr->ifm_active = IFM_ETHER;
2366 
2367 	if (!sc->link_active)
2368 		return;
2369 
2370 	ifmr->ifm_status |= IFM_ACTIVE;
2371 	layer = sc->phy_layer;
2372 
2373 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2374 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2375 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2376 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2377 		switch (sc->link_speed) {
2378 		case IXGBE_LINK_SPEED_10GB_FULL:
2379 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2380 			break;
2381 		case IXGBE_LINK_SPEED_1GB_FULL:
2382 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2383 			break;
2384 		case IXGBE_LINK_SPEED_100_FULL:
2385 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2386 			break;
2387 		case IXGBE_LINK_SPEED_10_FULL:
2388 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2389 			break;
2390 		}
2391 	if (hw->mac.type == ixgbe_mac_X550)
2392 		switch (sc->link_speed) {
2393 		case IXGBE_LINK_SPEED_5GB_FULL:
2394 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2395 			break;
2396 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2397 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2398 			break;
2399 		}
2400 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2401 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2402 		switch (sc->link_speed) {
2403 		case IXGBE_LINK_SPEED_10GB_FULL:
2404 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2405 			break;
2406 		case IXGBE_LINK_SPEED_1GB_FULL:
2407 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2408 			break;
2409 		}
2410 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2411 		switch (sc->link_speed) {
2412 		case IXGBE_LINK_SPEED_10GB_FULL:
2413 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2414 			break;
2415 		case IXGBE_LINK_SPEED_1GB_FULL:
2416 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2417 			break;
2418 		}
2419 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2420 		switch (sc->link_speed) {
2421 		case IXGBE_LINK_SPEED_10GB_FULL:
2422 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2423 			break;
2424 		case IXGBE_LINK_SPEED_1GB_FULL:
2425 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2426 			break;
2427 		}
2428 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2429 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2430 		switch (sc->link_speed) {
2431 		case IXGBE_LINK_SPEED_10GB_FULL:
2432 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2433 			break;
2434 		case IXGBE_LINK_SPEED_1GB_FULL:
2435 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2436 			break;
2437 		}
2438 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2439 		switch (sc->link_speed) {
2440 		case IXGBE_LINK_SPEED_10GB_FULL:
2441 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2442 			break;
2443 		}
2444 	/*
2445 	 * XXX: These need to use the proper media types once
2446 	 * they're added.
2447 	 */
2448 #ifndef IFM_ETH_XTYPE
2449 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2450 		switch (sc->link_speed) {
2451 		case IXGBE_LINK_SPEED_10GB_FULL:
2452 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2453 			break;
2454 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2455 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2456 			break;
2457 		case IXGBE_LINK_SPEED_1GB_FULL:
2458 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2459 			break;
2460 		}
2461 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2462 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2463 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2464 		switch (sc->link_speed) {
2465 		case IXGBE_LINK_SPEED_10GB_FULL:
2466 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2467 			break;
2468 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2469 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2470 			break;
2471 		case IXGBE_LINK_SPEED_1GB_FULL:
2472 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2473 			break;
2474 		}
2475 #else
2476 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2477 		switch (sc->link_speed) {
2478 		case IXGBE_LINK_SPEED_10GB_FULL:
2479 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2480 			break;
2481 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2482 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2483 			break;
2484 		case IXGBE_LINK_SPEED_1GB_FULL:
2485 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2486 			break;
2487 		}
2488 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2489 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2490 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2491 		switch (sc->link_speed) {
2492 		case IXGBE_LINK_SPEED_10GB_FULL:
2493 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2494 			break;
2495 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2496 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2497 			break;
2498 		case IXGBE_LINK_SPEED_1GB_FULL:
2499 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2500 			break;
2501 		}
2502 #endif
2503 
2504 	/* If nothing is recognized... */
2505 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2506 		ifmr->ifm_active |= IFM_UNKNOWN;
2507 
2508 	/* Display current flow control setting used on link */
2509 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2510 	    hw->fc.current_mode == ixgbe_fc_full)
2511 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2512 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2513 	    hw->fc.current_mode == ixgbe_fc_full)
2514 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2515 } /* ixgbe_media_status */
2516 
2517 /************************************************************************
2518  * ixgbe_media_change - Media Ioctl callback
2519  *
2520  *   Called when the user changes speed/duplex using
2521  *   media/mediopt option with ifconfig.
2522  ************************************************************************/
2523 static int
ixgbe_if_media_change(if_ctx_t ctx)2524 ixgbe_if_media_change(if_ctx_t ctx)
2525 {
2526 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2527 	struct ifmedia *ifm = iflib_get_media(ctx);
2528 	struct ixgbe_hw *hw = &sc->hw;
2529 	ixgbe_link_speed speed = 0;
2530 
2531 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2532 
2533 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2534 		return (EINVAL);
2535 
2536 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2537 		return (EPERM);
2538 
2539 	/*
2540 	 * We don't actually need to check against the supported
2541 	 * media types of the adapter; ifmedia will take care of
2542 	 * that for us.
2543 	 */
2544 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2545 	case IFM_AUTO:
2546 	case IFM_10G_T:
2547 		speed |= IXGBE_LINK_SPEED_100_FULL;
2548 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2549 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2550 		break;
2551 	case IFM_10G_LRM:
2552 	case IFM_10G_LR:
2553 #ifndef IFM_ETH_XTYPE
2554 	case IFM_10G_SR: /* KR, too */
2555 	case IFM_10G_CX4: /* KX4 */
2556 #else
2557 	case IFM_10G_KR:
2558 	case IFM_10G_KX4:
2559 #endif
2560 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2561 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2562 		break;
2563 #ifndef IFM_ETH_XTYPE
2564 	case IFM_1000_CX: /* KX */
2565 #else
2566 	case IFM_1000_KX:
2567 #endif
2568 	case IFM_1000_LX:
2569 	case IFM_1000_SX:
2570 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2571 		break;
2572 	case IFM_1000_T:
2573 		speed |= IXGBE_LINK_SPEED_100_FULL;
2574 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2575 		break;
2576 	case IFM_10G_TWINAX:
2577 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2578 		break;
2579 	case IFM_5000_T:
2580 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
2581 		break;
2582 	case IFM_2500_T:
2583 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2584 		break;
2585 	case IFM_100_TX:
2586 		speed |= IXGBE_LINK_SPEED_100_FULL;
2587 		break;
2588 	case IFM_10_T:
2589 		speed |= IXGBE_LINK_SPEED_10_FULL;
2590 		break;
2591 	default:
2592 		goto invalid;
2593 	}
2594 
2595 	hw->mac.autotry_restart = true;
2596 	hw->mac.ops.setup_link(hw, speed, true);
2597 	sc->advertise =
2598 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x4  : 0) |
2599 	    ((speed & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0) |
2600 	    ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2601 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x2  : 0) |
2602 	    ((speed & IXGBE_LINK_SPEED_100_FULL)   ? 0x1  : 0) |
2603 	    ((speed & IXGBE_LINK_SPEED_10_FULL)    ? 0x8  : 0);
2604 
2605 	return (0);
2606 
2607 invalid:
2608 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2609 
2610 	return (EINVAL);
2611 } /* ixgbe_if_media_change */
2612 
2613 /************************************************************************
2614  * ixgbe_set_promisc
2615  ************************************************************************/
2616 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2617 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2618 {
2619 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2620 	if_t ifp = iflib_get_ifp(ctx);
2621 	u32 rctl;
2622 	int mcnt = 0;
2623 
2624 	rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2625 	rctl &= (~IXGBE_FCTRL_UPE);
2626 	if (if_getflags(ifp) & IFF_ALLMULTI)
2627 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2628 	else {
2629 		mcnt = min(if_llmaddr_count(ifp),
2630 		    MAX_NUM_MULTICAST_ADDRESSES);
2631 	}
2632 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2633 		rctl &= (~IXGBE_FCTRL_MPE);
2634 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2635 
2636 	if (if_getflags(ifp) & IFF_PROMISC) {
2637 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2638 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2639 	} else if (if_getflags(ifp) & IFF_ALLMULTI) {
2640 		rctl |= IXGBE_FCTRL_MPE;
2641 		rctl &= ~IXGBE_FCTRL_UPE;
2642 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2643 	}
2644 	return (0);
2645 } /* ixgbe_if_promisc_set */
2646 
2647 /************************************************************************
2648  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2649  ************************************************************************/
2650 static int
ixgbe_msix_link(void * arg)2651 ixgbe_msix_link(void *arg)
2652 {
2653 	struct ixgbe_softc *sc = arg;
2654 	struct ixgbe_hw *hw = &sc->hw;
2655 	u32 eicr, eicr_mask;
2656 	s32 retval;
2657 
2658 	++sc->link_irq;
2659 
2660 	/* Pause other interrupts */
2661 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2662 
2663 	/* First get the cause */
2664 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2665 	/* Be sure the queue bits are not cleared */
2666 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2667 	/* Clear interrupt with write */
2668 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2669 
2670 	/* Link status change */
2671 	if (eicr & IXGBE_EICR_LSC) {
2672 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2673 		sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2674 	}
2675 
2676 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2677 		if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2678 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2679 			/* This is probably overkill :) */
2680 			if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2681 				return (FILTER_HANDLED);
2682 			/* Disable the interrupt */
2683 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2684 			sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2685 		} else
2686 			if (eicr & IXGBE_EICR_ECC) {
2687 				device_printf(iflib_get_dev(sc->ctx),
2688 				    "Received ECC Err, initiating reset\n");
2689 				hw->mac.flags |=
2690 				    ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2691 				ixgbe_reset_hw(hw);
2692 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2693 				    IXGBE_EICR_ECC);
2694 			}
2695 
2696 		/* Check for over temp condition */
2697 		if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2698 			switch (sc->hw.mac.type) {
2699 			case ixgbe_mac_X550EM_a:
2700 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2701 					break;
2702 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2703 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2704 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2705 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2706 				retval = hw->phy.ops.check_overtemp(hw);
2707 				if (retval != IXGBE_ERR_OVERTEMP)
2708 					break;
2709 				device_printf(iflib_get_dev(sc->ctx),
2710 				    "\nCRITICAL: OVER TEMP!!"
2711 				    " PHY IS SHUT DOWN!!\n");
2712 				device_printf(iflib_get_dev(sc->ctx),
2713 				    "System shutdown required!\n");
2714 				break;
2715 			default:
2716 				if (!(eicr & IXGBE_EICR_TS))
2717 					break;
2718 				retval = hw->phy.ops.check_overtemp(hw);
2719 				if (retval != IXGBE_ERR_OVERTEMP)
2720 					break;
2721 				device_printf(iflib_get_dev(sc->ctx),
2722 				    "\nCRITICAL: OVER TEMP!!"
2723 				    " PHY IS SHUT DOWN!!\n");
2724 				device_printf(iflib_get_dev(sc->ctx),
2725 				    "System shutdown required!\n");
2726 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2727 				    IXGBE_EICR_TS);
2728 				break;
2729 			}
2730 		}
2731 
2732 		/* Check for VF message */
2733 		if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2734 		    (eicr & IXGBE_EICR_MAILBOX))
2735 			sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2736 	}
2737 
2738 	if (ixgbe_is_sfp(hw)) {
2739 		/* Pluggable optics-related interrupt */
2740 		if (hw->mac.type >= ixgbe_mac_X540)
2741 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2742 		else
2743 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2744 
2745 		if (eicr & eicr_mask) {
2746 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2747 			sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2748 		}
2749 
2750 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2751 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2752 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2753 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2754 			sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2755 		}
2756 	}
2757 
2758 	/* Check for fan failure */
2759 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2760 		ixgbe_check_fan_failure(sc, eicr, true);
2761 		IXGBE_WRITE_REG(hw, IXGBE_EICR,
2762 		    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2763 	}
2764 
2765 	/* External PHY interrupt */
2766 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2767 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2768 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2769 		sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2770 	}
2771 
2772 	return (sc->task_requests != 0) ?
2773 	    FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2774 } /* ixgbe_msix_link */
2775 
2776 /************************************************************************
2777  * ixgbe_sysctl_interrupt_rate_handler
2778  ************************************************************************/
2779 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)2780 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2781 {
2782 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2783 	int error;
2784 	unsigned int reg, usec, rate;
2785 
2786 	if (atomic_load_acq_int(&que->sc->recovery_mode))
2787 		return (EPERM);
2788 
2789 	reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2790 	usec = ((reg & 0x0FF8) >> 3);
2791 	if (usec > 0)
2792 		rate = 500000 / usec;
2793 	else
2794 		rate = 0;
2795 	error = sysctl_handle_int(oidp, &rate, 0, req);
2796 	if (error || !req->newptr)
2797 		return error;
2798 	reg &= ~0xfff; /* default, no limitation */
2799 	ixgbe_max_interrupt_rate = 0;
2800 	if (rate > 0 && rate < 500000) {
2801 		if (rate < 1000)
2802 			rate = 1000;
2803 		ixgbe_max_interrupt_rate = rate;
2804 		reg |= ((4000000/rate) & 0xff8);
2805 	}
2806 	IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2807 
2808 	return (0);
2809 } /* ixgbe_sysctl_interrupt_rate_handler */
2810 
2811 /************************************************************************
2812  * ixgbe_add_device_sysctls
2813  ************************************************************************/
2814 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)2815 ixgbe_add_device_sysctls(if_ctx_t ctx)
2816 {
2817 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2818 	device_t dev = iflib_get_dev(ctx);
2819 	struct ixgbe_hw *hw = &sc->hw;
2820 	struct sysctl_oid_list *child;
2821 	struct sysctl_ctx_list *ctx_list;
2822 
2823 	ctx_list = device_get_sysctl_ctx(dev);
2824 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2825 
2826 	/* Sysctls for all devices */
2827 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2828 	    CTLTYPE_INT | CTLFLAG_RW,
2829 	    sc, 0, ixgbe_sysctl_flowcntl, "I",
2830 	    IXGBE_SYSCTL_DESC_SET_FC);
2831 
2832 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2833 	    CTLTYPE_INT | CTLFLAG_RW,
2834 	    sc, 0, ixgbe_sysctl_advertise, "I",
2835 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2836 
2837 	sc->enable_aim = ixgbe_enable_aim;
2838 	SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2839 	    &sc->enable_aim, 0, "Interrupt Moderation");
2840 
2841 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2842 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2843 	    ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2844 
2845 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2846 	    "tso_tcp_flags_mask_first_segment",
2847 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2848 	    sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2849 	    "TSO TCP flags mask for first segment");
2850 
2851 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2852 	    "tso_tcp_flags_mask_middle_segment",
2853 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2854 	    sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2855 	    "TSO TCP flags mask for middle segment");
2856 
2857 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2858 	    "tso_tcp_flags_mask_last_segment",
2859 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2860 	    sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2861 	    "TSO TCP flags mask for last segment");
2862 
2863 #ifdef IXGBE_DEBUG
2864 	/* testing sysctls (for all devices) */
2865 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2866 	    CTLTYPE_INT | CTLFLAG_RW,
2867 	    sc, 0, ixgbe_sysctl_power_state,
2868 	    "I", "PCI Power State");
2869 
2870 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2871 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2872 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2873 #endif
2874 	/* for X550 series devices */
2875 	if (hw->mac.type >= ixgbe_mac_X550)
2876 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2877 		    CTLTYPE_U16 | CTLFLAG_RW,
2878 		    sc, 0, ixgbe_sysctl_dmac,
2879 		    "I", "DMA Coalesce");
2880 
2881 	/* for WoL-capable devices */
2882 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2883 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2884 		    CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2885 		    ixgbe_sysctl_wol_enable, "I",
2886 		    "Enable/Disable Wake on LAN");
2887 
2888 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2889 		    CTLTYPE_U32 | CTLFLAG_RW,
2890 		    sc, 0, ixgbe_sysctl_wufc,
2891 		    "I", "Enable/Disable Wake Up Filters");
2892 	}
2893 
2894 	/* for X552/X557-AT devices */
2895 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2896 		struct sysctl_oid *phy_node;
2897 		struct sysctl_oid_list *phy_list;
2898 
2899 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2900 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2901 		    "External PHY sysctls");
2902 		phy_list = SYSCTL_CHILDREN(phy_node);
2903 
2904 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2905 		    CTLTYPE_U16 | CTLFLAG_RD,
2906 		    sc, 0, ixgbe_sysctl_phy_temp,
2907 		    "I", "Current External PHY Temperature (Celsius)");
2908 
2909 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2910 		    "overtemp_occurred",
2911 		    CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2912 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2913 		    "External PHY High Temperature Event Occurred");
2914 	}
2915 
2916 	if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2917 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2918 		    CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2919 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2920 	}
2921 } /* ixgbe_add_device_sysctls */
2922 
2923 /************************************************************************
2924  * ixgbe_allocate_pci_resources
2925  ************************************************************************/
2926 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)2927 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2928 {
2929 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2930 	device_t dev = iflib_get_dev(ctx);
2931 	int rid;
2932 
2933 	rid = PCIR_BAR(0);
2934 	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2935 	    RF_ACTIVE);
2936 
2937 	if (!(sc->pci_mem)) {
2938 		device_printf(dev,
2939 		    "Unable to allocate bus resource: memory\n");
2940 		return (ENXIO);
2941 	}
2942 
2943 	/* Save bus_space values for READ/WRITE_REG macros */
2944 	sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2945 	sc->osdep.mem_bus_space_handle =
2946 	    rman_get_bushandle(sc->pci_mem);
2947 	/* Set hw values for shared code */
2948 	sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2949 
2950 	return (0);
2951 } /* ixgbe_allocate_pci_resources */
2952 
2953 /************************************************************************
2954  * ixgbe_detach - Device removal routine
2955  *
2956  *   Called when the driver is being removed.
2957  *   Stops the adapter and deallocates all the resources
2958  *   that were allocated for driver operation.
2959  *
2960  *   return 0 on success, positive on failure
2961  ************************************************************************/
2962 static int
ixgbe_if_detach(if_ctx_t ctx)2963 ixgbe_if_detach(if_ctx_t ctx)
2964 {
2965 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2966 	device_t dev = iflib_get_dev(ctx);
2967 	u32 ctrl_ext;
2968 
2969 	INIT_DEBUGOUT("ixgbe_detach: begin");
2970 
2971 	if (ixgbe_pci_iov_detach(dev) != 0) {
2972 		device_printf(dev, "SR-IOV in use; detach first.\n");
2973 		return (EBUSY);
2974 	}
2975 
2976 	ixgbe_setup_low_power_mode(ctx);
2977 
2978 	/* let hardware know driver is unloading */
2979 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2980 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2981 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2982 
2983 	callout_drain(&sc->fw_mode_timer);
2984 
2985 	ixgbe_free_pci_resources(ctx);
2986 	free(sc->mta, M_IXGBE);
2987 
2988 	return (0);
2989 } /* ixgbe_if_detach */
2990 
2991 /************************************************************************
2992  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2993  *
2994  *   Prepare the adapter/port for LPLU and/or WoL
2995  ************************************************************************/
2996 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)2997 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2998 {
2999 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3000 	struct ixgbe_hw *hw = &sc->hw;
3001 	device_t dev = iflib_get_dev(ctx);
3002 	s32 error = 0;
3003 
3004 	if (!hw->wol_enabled)
3005 		ixgbe_set_phy_power(hw, false);
3006 
3007 	/* Limit power management flow to X550EM baseT */
3008 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3009 	    hw->phy.ops.enter_lplu) {
3010 		/* Turn off support for APM wakeup. (Using ACPI instead) */
3011 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3012 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3013 
3014 		/*
3015 		 * Clear Wake Up Status register to prevent any previous
3016 		 * wakeup events from waking us up immediately after we
3017 		 * suspend.
3018 		 */
3019 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3020 
3021 		/*
3022 		 * Program the Wakeup Filter Control register with user filter
3023 		 * settings
3024 		 */
3025 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3026 
3027 		/* Enable wakeups and power management in Wakeup Control */
3028 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
3029 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3030 
3031 		/* X550EM baseT adapters need a special LPLU flow */
3032 		hw->phy.reset_disable = true;
3033 		ixgbe_if_stop(ctx);
3034 		error = hw->phy.ops.enter_lplu(hw);
3035 		if (error)
3036 			device_printf(dev, "Error entering LPLU: %d\n",
3037 			    error);
3038 		hw->phy.reset_disable = false;
3039 	} else {
3040 		/* Just stop for other adapters */
3041 		ixgbe_if_stop(ctx);
3042 	}
3043 
3044 	return error;
3045 } /* ixgbe_setup_low_power_mode */
3046 
3047 /************************************************************************
3048  * ixgbe_shutdown - Shutdown entry point
3049  ************************************************************************/
3050 static int
ixgbe_if_shutdown(if_ctx_t ctx)3051 ixgbe_if_shutdown(if_ctx_t ctx)
3052 {
3053 	int error = 0;
3054 
3055 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
3056 
3057 	error = ixgbe_setup_low_power_mode(ctx);
3058 
3059 	return (error);
3060 } /* ixgbe_if_shutdown */
3061 
3062 /************************************************************************
3063  * ixgbe_suspend
3064  *
3065  *   From D0 to D3
3066  ************************************************************************/
3067 static int
ixgbe_if_suspend(if_ctx_t ctx)3068 ixgbe_if_suspend(if_ctx_t ctx)
3069 {
3070 	int error = 0;
3071 
3072 	INIT_DEBUGOUT("ixgbe_suspend: begin");
3073 
3074 	error = ixgbe_setup_low_power_mode(ctx);
3075 
3076 	return (error);
3077 } /* ixgbe_if_suspend */
3078 
3079 /************************************************************************
3080  * ixgbe_resume
3081  *
3082  *   From D3 to D0
3083  ************************************************************************/
3084 static int
ixgbe_if_resume(if_ctx_t ctx)3085 ixgbe_if_resume(if_ctx_t ctx)
3086 {
3087 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3088 	device_t dev = iflib_get_dev(ctx);
3089 	if_t ifp = iflib_get_ifp(ctx);
3090 	struct ixgbe_hw *hw = &sc->hw;
3091 	u32 wus;
3092 
3093 	INIT_DEBUGOUT("ixgbe_resume: begin");
3094 
3095 	/* Read & clear WUS register */
3096 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3097 	if (wus)
3098 		device_printf(dev, "Woken up by (WUS): %#010x\n",
3099 		    IXGBE_READ_REG(hw, IXGBE_WUS));
3100 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3101 	/* And clear WUFC until next low-power transition */
3102 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3103 
3104 	/*
3105 	 * Required after D3->D0 transition;
3106 	 * will re-advertise all previous advertised speeds
3107 	 */
3108 	if (if_getflags(ifp) & IFF_UP)
3109 		ixgbe_if_init(ctx);
3110 
3111 	return (0);
3112 } /* ixgbe_if_resume */
3113 
3114 /************************************************************************
3115  * ixgbe_if_mtu_set - Ioctl mtu entry point
3116  *
3117  *   Return 0 on success, EINVAL on failure
3118  ************************************************************************/
3119 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)3120 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3121 {
3122 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3123 	int error = 0;
3124 
3125 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3126 
3127 	if (mtu > IXGBE_MAX_MTU) {
3128 		error = EINVAL;
3129 	} else {
3130 		sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3131 	}
3132 
3133 	return error;
3134 } /* ixgbe_if_mtu_set */
3135 
3136 /************************************************************************
3137  * ixgbe_if_crcstrip_set
3138  ************************************************************************/
3139 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)3140 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3141 {
3142 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3143 	struct ixgbe_hw *hw = &sc->hw;
3144 	/* crc stripping is set in two places:
3145 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3146 	 * IXGBE_RDRXCTL (set by the original driver in
3147 	 *	ixgbe_setup_hw_rsc() called in init_locked.
3148 	 *	We disable the setting when netmap is compiled in).
3149 	 * We update the values here, but also in ixgbe.c because
3150 	 * init_locked sometimes is called outside our control.
3151 	 */
3152 	uint32_t hl, rxc;
3153 
3154 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3155 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3156 #ifdef NETMAP
3157 	if (netmap_verbose)
3158 		D("%s read  HLREG 0x%x rxc 0x%x",
3159 			onoff ? "enter" : "exit", hl, rxc);
3160 #endif
3161 	/* hw requirements ... */
3162 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3163 	rxc |= IXGBE_RDRXCTL_RSCACKC;
3164 	if (onoff && !crcstrip) {
3165 		/* keep the crc. Fast rx */
3166 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3167 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3168 	} else {
3169 		/* reset default mode */
3170 		hl |= IXGBE_HLREG0_RXCRCSTRP;
3171 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3172 	}
3173 #ifdef NETMAP
3174 	if (netmap_verbose)
3175 		D("%s write HLREG 0x%x rxc 0x%x",
3176 			onoff ? "enter" : "exit", hl, rxc);
3177 #endif
3178 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3179 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3180 } /* ixgbe_if_crcstrip_set */
3181 
3182 /*********************************************************************
3183  * ixgbe_if_init - Init entry point
3184  *
3185  *   Used in two ways: It is used by the stack as an init
3186  *   entry point in network interface structure. It is also
3187  *   used by the driver as a hw/sw initialization routine to
3188  *   get to a consistent state.
3189  *
3190  *   Return 0 on success, positive on failure
3191  **********************************************************************/
3192 void
ixgbe_if_init(if_ctx_t ctx)3193 ixgbe_if_init(if_ctx_t ctx)
3194 {
3195 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3196 	if_t ifp = iflib_get_ifp(ctx);
3197 	device_t dev = iflib_get_dev(ctx);
3198 	struct ixgbe_hw *hw = &sc->hw;
3199 	struct ix_rx_queue *rx_que;
3200 	struct ix_tx_queue *tx_que;
3201 	u32 txdctl, mhadd;
3202 	u32 rxdctl, rxctrl;
3203 	u32 ctrl_ext;
3204 
3205 	int i, j, err;
3206 
3207 	INIT_DEBUGOUT("ixgbe_if_init: begin");
3208 
3209 	/* Queue indices may change with IOV mode */
3210 	ixgbe_align_all_queue_indices(sc);
3211 
3212 	/* reprogram the RAR[0] in case user changed it. */
3213 	ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3214 
3215 	/* Get the latest mac address, User can use a LAA */
3216 	bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3217 	ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3218 	hw->addr_ctrl.rar_used_count = 1;
3219 
3220 	ixgbe_init_hw(hw);
3221 
3222 	ixgbe_initialize_iov(sc);
3223 
3224 	ixgbe_initialize_transmit_units(ctx);
3225 
3226 	/* Setup Multicast table */
3227 	ixgbe_if_multi_set(ctx);
3228 
3229 	/* Determine the correct mbuf pool, based on frame size */
3230 	sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3231 
3232 	/* Configure RX settings */
3233 	ixgbe_initialize_receive_units(ctx);
3234 
3235 	/*
3236 	 * Initialize variable holding task enqueue requests
3237 	 * from MSI-X interrupts
3238 	 */
3239 	sc->task_requests = 0;
3240 
3241 	/* Enable SDP & MSI-X interrupts based on adapter */
3242 	ixgbe_config_gpie(sc);
3243 
3244 	/* Set MTU size */
3245 	if (if_getmtu(ifp) > ETHERMTU) {
3246 		/* aka IXGBE_MAXFRS on 82599 and newer */
3247 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3248 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
3249 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3250 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3251 	}
3252 
3253 	/* Now enable all the queues */
3254 	for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
3255 	    i++, tx_que++) {
3256 		struct tx_ring *txr = &tx_que->txr;
3257 
3258 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3259 		txdctl |= IXGBE_TXDCTL_ENABLE;
3260 		/* Set WTHRESH to 8, burst writeback */
3261 		txdctl |= (8 << 16);
3262 		/*
3263 		 * When the internal queue falls below PTHRESH (32),
3264 		 * start prefetching as long as there are at least
3265 		 * HTHRESH (1) buffers ready. The values are taken
3266 		 * from the Intel linux driver 3.8.21.
3267 		 * Prefetching enables tx line rate even with 1 queue.
3268 		 */
3269 		txdctl |= (32 << 0) | (1 << 8);
3270 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3271 	}
3272 
3273 	for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
3274 	    i++, rx_que++) {
3275 		struct rx_ring *rxr = &rx_que->rxr;
3276 
3277 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3278 		if (hw->mac.type == ixgbe_mac_82598EB) {
3279 			/*
3280 			 * PTHRESH = 21
3281 			 * HTHRESH = 4
3282 			 * WTHRESH = 8
3283 			 */
3284 			rxdctl &= ~0x3FFFFF;
3285 			rxdctl |= 0x080420;
3286 		}
3287 		rxdctl |= IXGBE_RXDCTL_ENABLE;
3288 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3289 		for (j = 0; j < 10; j++) {
3290 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3291 			    IXGBE_RXDCTL_ENABLE)
3292 				break;
3293 			else
3294 				msec_delay(1);
3295 		}
3296 		wmb();
3297 	}
3298 
3299 	/* Enable Receive engine */
3300 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3301 	if (hw->mac.type == ixgbe_mac_82598EB)
3302 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3303 	rxctrl |= IXGBE_RXCTRL_RXEN;
3304 	ixgbe_enable_rx_dma(hw, rxctrl);
3305 
3306 	/* Set up MSI/MSI-X routing */
3307 	if (ixgbe_enable_msix)  {
3308 		ixgbe_configure_ivars(sc);
3309 		/* Set up auto-mask */
3310 		if (hw->mac.type == ixgbe_mac_82598EB)
3311 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3312 		else {
3313 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3314 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3315 		}
3316 	} else {  /* Simple settings for Legacy/MSI */
3317 		ixgbe_set_ivar(sc, 0, 0, 0);
3318 		ixgbe_set_ivar(sc, 0, 0, 1);
3319 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3320 	}
3321 
3322 	ixgbe_init_fdir(sc);
3323 
3324 	/*
3325 	 * Check on any SFP devices that
3326 	 * need to be kick-started
3327 	 */
3328 	if (hw->phy.type == ixgbe_phy_none) {
3329 		err = hw->phy.ops.identify(hw);
3330 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3331 			device_printf(dev,
3332 			    "Unsupported SFP+ module type was detected.\n");
3333 			return;
3334 		}
3335 	}
3336 
3337 	/* Set moderation on the Link interrupt */
3338 	IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3339 
3340 	/* Enable power to the phy. */
3341 	ixgbe_set_phy_power(hw, true);
3342 
3343 	/* Config/Enable Link */
3344 	ixgbe_config_link(ctx);
3345 
3346 	/* Hardware Packet Buffer & Flow Control setup */
3347 	ixgbe_config_delay_values(sc);
3348 
3349 	/* Initialize the FC settings */
3350 	ixgbe_start_hw(hw);
3351 
3352 	/* Set up VLAN support and filter */
3353 	ixgbe_setup_vlan_hw_support(ctx);
3354 
3355 	/* Setup DMA Coalescing */
3356 	ixgbe_config_dmac(sc);
3357 
3358 	/* And now turn on interrupts */
3359 	ixgbe_if_enable_intr(ctx);
3360 
3361 	/* Enable the use of the MBX by the VF's */
3362 	if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3363 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3364 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3365 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3366 	}
3367 
3368 } /* ixgbe_init_locked */
3369 
3370 /************************************************************************
3371  * ixgbe_set_ivar
3372  *
3373  *   Setup the correct IVAR register for a particular MSI-X interrupt
3374  *     (yes this is all very magic and confusing :)
3375  *    - entry is the register array entry
3376  *    - vector is the MSI-X vector for this queue
3377  *    - type is RX/TX/MISC
3378  ************************************************************************/
3379 static void
ixgbe_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)3380 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3381 {
3382 	struct ixgbe_hw *hw = &sc->hw;
3383 	u32 ivar, index;
3384 
3385 	vector |= IXGBE_IVAR_ALLOC_VAL;
3386 
3387 	switch (hw->mac.type) {
3388 	case ixgbe_mac_82598EB:
3389 		if (type == -1)
3390 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3391 		else
3392 			entry += (type * 64);
3393 		index = (entry >> 2) & 0x1F;
3394 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3395 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3396 		ivar |= (vector << (8 * (entry & 0x3)));
3397 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3398 		break;
3399 	case ixgbe_mac_82599EB:
3400 	case ixgbe_mac_X540:
3401 	case ixgbe_mac_X550:
3402 	case ixgbe_mac_X550EM_x:
3403 	case ixgbe_mac_X550EM_a:
3404 		if (type == -1) { /* MISC IVAR */
3405 			index = (entry & 1) * 8;
3406 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3407 			ivar &= ~(0xFF << index);
3408 			ivar |= (vector << index);
3409 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3410 		} else {          /* RX/TX IVARS */
3411 			index = (16 * (entry & 1)) + (8 * type);
3412 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3413 			ivar &= ~(0xFF << index);
3414 			ivar |= (vector << index);
3415 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3416 		}
3417 	default:
3418 		break;
3419 	}
3420 } /* ixgbe_set_ivar */
3421 
3422 /************************************************************************
3423  * ixgbe_configure_ivars
3424  ************************************************************************/
3425 static void
ixgbe_configure_ivars(struct ixgbe_softc * sc)3426 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3427 {
3428 	struct ix_rx_queue *rx_que = sc->rx_queues;
3429 	struct ix_tx_queue *tx_que = sc->tx_queues;
3430 	u32 newitr;
3431 
3432 	if (ixgbe_max_interrupt_rate > 0)
3433 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3434 	else {
3435 		/*
3436 		 * Disable DMA coalescing if interrupt moderation is
3437 		 * disabled.
3438 		 */
3439 		sc->dmac = 0;
3440 		newitr = 0;
3441 	}
3442 
3443 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3444 		struct rx_ring *rxr = &rx_que->rxr;
3445 
3446 		/* First the RX queue entry */
3447 		ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3448 
3449 		/* Set an Initial EITR value */
3450 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3451 	}
3452 	for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3453 		struct tx_ring *txr = &tx_que->txr;
3454 
3455 		/* ... and the TX */
3456 		ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3457 	}
3458 	/* For the Link interrupt */
3459 	ixgbe_set_ivar(sc, 1, sc->vector, -1);
3460 } /* ixgbe_configure_ivars */
3461 
3462 /************************************************************************
3463  * ixgbe_config_gpie
3464  ************************************************************************/
3465 static void
ixgbe_config_gpie(struct ixgbe_softc * sc)3466 ixgbe_config_gpie(struct ixgbe_softc *sc)
3467 {
3468 	struct ixgbe_hw *hw = &sc->hw;
3469 	u32 gpie;
3470 
3471 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3472 
3473 	if (sc->intr_type == IFLIB_INTR_MSIX) {
3474 		/* Enable Enhanced MSI-X mode */
3475 		gpie |= IXGBE_GPIE_MSIX_MODE |
3476 		    IXGBE_GPIE_EIAME |
3477 		    IXGBE_GPIE_PBA_SUPPORT |
3478 		    IXGBE_GPIE_OCD;
3479 	}
3480 
3481 	/* Fan Failure Interrupt */
3482 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3483 		gpie |= IXGBE_SDP1_GPIEN;
3484 
3485 	/* Thermal Sensor Interrupt */
3486 	if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3487 		gpie |= IXGBE_SDP0_GPIEN_X540;
3488 
3489 	/* Link detection */
3490 	switch (hw->mac.type) {
3491 	case ixgbe_mac_82599EB:
3492 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3493 		break;
3494 	case ixgbe_mac_X550EM_x:
3495 	case ixgbe_mac_X550EM_a:
3496 		gpie |= IXGBE_SDP0_GPIEN_X540;
3497 		break;
3498 	default:
3499 		break;
3500 	}
3501 
3502 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3503 
3504 } /* ixgbe_config_gpie */
3505 
3506 /************************************************************************
3507  * ixgbe_config_delay_values
3508  *
3509  *   Requires sc->max_frame_size to be set.
3510  ************************************************************************/
3511 static void
ixgbe_config_delay_values(struct ixgbe_softc * sc)3512 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3513 {
3514 	struct ixgbe_hw *hw = &sc->hw;
3515 	u32 rxpb, frame, size, tmp;
3516 
3517 	frame = sc->max_frame_size;
3518 
3519 	/* Calculate High Water */
3520 	switch (hw->mac.type) {
3521 	case ixgbe_mac_X540:
3522 	case ixgbe_mac_X550:
3523 	case ixgbe_mac_X550EM_x:
3524 	case ixgbe_mac_X550EM_a:
3525 		tmp = IXGBE_DV_X540(frame, frame);
3526 		break;
3527 	default:
3528 		tmp = IXGBE_DV(frame, frame);
3529 		break;
3530 	}
3531 	size = IXGBE_BT2KB(tmp);
3532 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3533 	hw->fc.high_water[0] = rxpb - size;
3534 
3535 	/* Now calculate Low Water */
3536 	switch (hw->mac.type) {
3537 	case ixgbe_mac_X540:
3538 	case ixgbe_mac_X550:
3539 	case ixgbe_mac_X550EM_x:
3540 	case ixgbe_mac_X550EM_a:
3541 		tmp = IXGBE_LOW_DV_X540(frame);
3542 		break;
3543 	default:
3544 		tmp = IXGBE_LOW_DV(frame);
3545 		break;
3546 	}
3547 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3548 
3549 	hw->fc.pause_time = IXGBE_FC_PAUSE;
3550 	hw->fc.send_xon = true;
3551 } /* ixgbe_config_delay_values */
3552 
3553 /************************************************************************
3554  * ixgbe_set_multi - Multicast Update
3555  *
3556  *   Called whenever multicast address list is updated.
3557  ************************************************************************/
3558 static u_int
ixgbe_mc_filter_apply(void * arg,struct sockaddr_dl * sdl,u_int idx)3559 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3560 {
3561 	struct ixgbe_softc *sc = arg;
3562 	struct ixgbe_mc_addr *mta = sc->mta;
3563 
3564 	if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3565 		return (0);
3566 	bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3567 	mta[idx].vmdq = sc->pool;
3568 
3569 	return (1);
3570 } /* ixgbe_mc_filter_apply */
3571 
3572 static void
ixgbe_if_multi_set(if_ctx_t ctx)3573 ixgbe_if_multi_set(if_ctx_t ctx)
3574 {
3575 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3576 	struct ixgbe_mc_addr *mta;
3577 	if_t ifp = iflib_get_ifp(ctx);
3578 	u8 *update_ptr;
3579 	u32 fctrl;
3580 	u_int mcnt;
3581 
3582 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3583 
3584 	mta = sc->mta;
3585 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3586 
3587 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3588 	    sc);
3589 
3590 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3591 		update_ptr = (u8 *)mta;
3592 		ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3593 		    ixgbe_mc_array_itr, true);
3594 	}
3595 
3596 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3597 
3598 	if (if_getflags(ifp) & IFF_PROMISC)
3599 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3600 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3601 	    if_getflags(ifp) & IFF_ALLMULTI) {
3602 		fctrl |= IXGBE_FCTRL_MPE;
3603 		fctrl &= ~IXGBE_FCTRL_UPE;
3604 	} else
3605 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3606 
3607 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3608 } /* ixgbe_if_multi_set */
3609 
3610 /************************************************************************
3611  * ixgbe_mc_array_itr
3612  *
3613  *   An iterator function needed by the multicast shared code.
3614  *   It feeds the shared code routine the addresses in the
3615  *   array of ixgbe_set_multi() one by one.
3616  ************************************************************************/
3617 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)3618 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3619 {
3620 	struct ixgbe_mc_addr *mta;
3621 
3622 	mta = (struct ixgbe_mc_addr *)*update_ptr;
3623 	*vmdq = mta->vmdq;
3624 
3625 	*update_ptr = (u8*)(mta + 1);
3626 
3627 	return (mta->addr);
3628 } /* ixgbe_mc_array_itr */
3629 
3630 /************************************************************************
3631  * ixgbe_local_timer - Timer routine
3632  *
3633  *   Checks for link status, updates statistics,
3634  *   and runs the watchdog check.
3635  ************************************************************************/
3636 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)3637 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3638 {
3639 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3640 
3641 	if (qid != 0)
3642 		return;
3643 
3644 	/* Check for pluggable optics */
3645 	if (sc->sfp_probe)
3646 		if (!ixgbe_sfp_probe(ctx))
3647 			return; /* Nothing to do */
3648 
3649 	ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3650 
3651 	/* Fire off the adminq task */
3652 	iflib_admin_intr_deferred(ctx);
3653 
3654 } /* ixgbe_if_timer */
3655 
3656 /************************************************************************
3657  * ixgbe_fw_mode_timer - FW mode timer routine
3658  ************************************************************************/
3659 static void
ixgbe_fw_mode_timer(void * arg)3660 ixgbe_fw_mode_timer(void *arg)
3661 {
3662 	struct ixgbe_softc *sc = arg;
3663 	struct ixgbe_hw *hw = &sc->hw;
3664 
3665 	if (ixgbe_fw_recovery_mode(hw)) {
3666 		if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
3667 			/* Firmware error detected, entering recovery mode */
3668 			device_printf(sc->dev,
3669 			    "Firmware recovery mode detected. Limiting"
3670 			    " functionality. Refer to the Intel(R) Ethernet"
3671 			    " Adapters and Devices User Guide for details on"
3672 			    " firmware recovery mode.\n");
3673 
3674 			if (hw->adapter_stopped == FALSE)
3675 				ixgbe_if_stop(sc->ctx);
3676 		}
3677 	} else
3678 		atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
3679 
3680 
3681 	callout_reset(&sc->fw_mode_timer, hz,
3682 	    ixgbe_fw_mode_timer, sc);
3683 } /* ixgbe_fw_mode_timer */
3684 
3685 /************************************************************************
3686  * ixgbe_sfp_probe
3687  *
3688  *   Determine if a port had optics inserted.
3689  ************************************************************************/
3690 static bool
ixgbe_sfp_probe(if_ctx_t ctx)3691 ixgbe_sfp_probe(if_ctx_t ctx)
3692 {
3693 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3694 	struct ixgbe_hw *hw = &sc->hw;
3695 	device_t dev = iflib_get_dev(ctx);
3696 	bool result = false;
3697 
3698 	if ((hw->phy.type == ixgbe_phy_nl) &&
3699 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3700 		s32 ret = hw->phy.ops.identify_sfp(hw);
3701 		if (ret)
3702 			goto out;
3703 		ret = hw->phy.ops.reset(hw);
3704 		sc->sfp_probe = false;
3705 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3706 			device_printf(dev,
3707 			    "Unsupported SFP+ module detected!");
3708 			device_printf(dev,
3709 			    "Reload driver with supported module.\n");
3710 			goto out;
3711 		} else
3712 			device_printf(dev, "SFP+ module detected!\n");
3713 		/* We now have supported optics */
3714 		result = true;
3715 	}
3716 out:
3717 
3718 	return (result);
3719 } /* ixgbe_sfp_probe */
3720 
3721 /************************************************************************
3722  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3723  ************************************************************************/
3724 static void
ixgbe_handle_mod(void * context)3725 ixgbe_handle_mod(void *context)
3726 {
3727 	if_ctx_t ctx = context;
3728 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3729 	struct ixgbe_hw *hw = &sc->hw;
3730 	device_t dev = iflib_get_dev(ctx);
3731 	u32 err, cage_full = 0;
3732 
3733 	if (sc->hw.need_crosstalk_fix) {
3734 		switch (hw->mac.type) {
3735 		case ixgbe_mac_82599EB:
3736 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3737 			    IXGBE_ESDP_SDP2;
3738 			break;
3739 		case ixgbe_mac_X550EM_x:
3740 		case ixgbe_mac_X550EM_a:
3741 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3742 			    IXGBE_ESDP_SDP0;
3743 			break;
3744 		default:
3745 			break;
3746 		}
3747 
3748 		if (!cage_full)
3749 			goto handle_mod_out;
3750 	}
3751 
3752 	err = hw->phy.ops.identify_sfp(hw);
3753 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3754 		device_printf(dev,
3755 		    "Unsupported SFP+ module type was detected.\n");
3756 		goto handle_mod_out;
3757 	}
3758 
3759 	if (hw->mac.type == ixgbe_mac_82598EB)
3760 		err = hw->phy.ops.reset(hw);
3761 	else
3762 		err = hw->mac.ops.setup_sfp(hw);
3763 
3764 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3765 		device_printf(dev,
3766 		    "Setup failure - unsupported SFP+ module type.\n");
3767 		goto handle_mod_out;
3768 	}
3769 	sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3770 	return;
3771 
3772 handle_mod_out:
3773 	sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3774 } /* ixgbe_handle_mod */
3775 
3776 
3777 /************************************************************************
3778  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3779  ************************************************************************/
3780 static void
ixgbe_handle_msf(void * context)3781 ixgbe_handle_msf(void *context)
3782 {
3783 	if_ctx_t ctx = context;
3784 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3785 	struct ixgbe_hw *hw = &sc->hw;
3786 	u32 autoneg;
3787 	bool negotiate;
3788 
3789 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3790 	sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3791 
3792 	autoneg = hw->phy.autoneg_advertised;
3793 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3794 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3795 	if (hw->mac.ops.setup_link)
3796 		hw->mac.ops.setup_link(hw, autoneg, true);
3797 
3798 	/* Adjust media types shown in ifconfig */
3799 	ifmedia_removeall(sc->media);
3800 	ixgbe_add_media_types(sc->ctx);
3801 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3802 } /* ixgbe_handle_msf */
3803 
3804 /************************************************************************
3805  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3806  ************************************************************************/
3807 static void
ixgbe_handle_phy(void * context)3808 ixgbe_handle_phy(void *context)
3809 {
3810 	if_ctx_t ctx = context;
3811 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3812 	struct ixgbe_hw *hw = &sc->hw;
3813 	int error;
3814 
3815 	error = hw->phy.ops.handle_lasi(hw);
3816 	if (error == IXGBE_ERR_OVERTEMP)
3817 		device_printf(sc->dev,
3818 		    "CRITICAL: EXTERNAL PHY OVER TEMP!!"
3819 		    "  PHY will downshift to lower power state!\n");
3820 	else if (error)
3821 		device_printf(sc->dev,
3822 		    "Error handling LASI interrupt: %d\n", error);
3823 } /* ixgbe_handle_phy */
3824 
3825 /************************************************************************
3826  * ixgbe_if_stop - Stop the hardware
3827  *
3828  *   Disables all traffic on the adapter by issuing a
3829  *   global reset on the MAC and deallocates TX/RX buffers.
3830  ************************************************************************/
3831 static void
ixgbe_if_stop(if_ctx_t ctx)3832 ixgbe_if_stop(if_ctx_t ctx)
3833 {
3834 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3835 	struct ixgbe_hw *hw = &sc->hw;
3836 
3837 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3838 
3839 	ixgbe_reset_hw(hw);
3840 	hw->adapter_stopped = false;
3841 	ixgbe_stop_adapter(hw);
3842 	if (hw->mac.type == ixgbe_mac_82599EB)
3843 		ixgbe_stop_mac_link_on_d3_82599(hw);
3844 	/* Turn off the laser - noop with no optics */
3845 	ixgbe_disable_tx_laser(hw);
3846 
3847 	/* Update the stack */
3848 	sc->link_up = false;
3849 	ixgbe_if_update_admin_status(ctx);
3850 
3851 	/* reprogram the RAR[0] in case user changed it. */
3852 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3853 
3854 	return;
3855 } /* ixgbe_if_stop */
3856 
3857 /************************************************************************
3858  * ixgbe_update_link_status - Update OS on link state
3859  *
3860  * Note: Only updates the OS on the cached link state.
3861  *       The real check of the hardware only happens with
3862  *       a link interrupt.
3863  ************************************************************************/
3864 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)3865 ixgbe_if_update_admin_status(if_ctx_t ctx)
3866 {
3867 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3868 	device_t dev = iflib_get_dev(ctx);
3869 
3870 	if (sc->link_up) {
3871 		if (sc->link_active == false) {
3872 			if (bootverbose)
3873 				device_printf(dev, "Link is up %d Gbps %s \n",
3874 				    ((sc->link_speed == 128) ? 10 : 1),
3875 				    "Full Duplex");
3876 			sc->link_active = true;
3877 			/* Update any Flow Control changes */
3878 			ixgbe_fc_enable(&sc->hw);
3879 			/* Update DMA coalescing config */
3880 			ixgbe_config_dmac(sc);
3881 			iflib_link_state_change(ctx, LINK_STATE_UP,
3882 			    ixgbe_link_speed_to_baudrate(sc->link_speed));
3883 
3884 			if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3885 				ixgbe_ping_all_vfs(sc);
3886 		}
3887 	} else { /* Link down */
3888 		if (sc->link_active == true) {
3889 			if (bootverbose)
3890 				device_printf(dev, "Link is Down\n");
3891 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3892 			sc->link_active = false;
3893 			if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3894 				ixgbe_ping_all_vfs(sc);
3895 		}
3896 	}
3897 
3898 	/* Handle task requests from msix_link() */
3899 	if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3900 		ixgbe_handle_mod(ctx);
3901 	if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3902 		ixgbe_handle_msf(ctx);
3903 	if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3904 		ixgbe_handle_mbx(ctx);
3905 	if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3906 		ixgbe_reinit_fdir(ctx);
3907 	if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3908 		ixgbe_handle_phy(ctx);
3909 	sc->task_requests = 0;
3910 
3911 	ixgbe_update_stats_counters(sc);
3912 } /* ixgbe_if_update_admin_status */
3913 
3914 /************************************************************************
3915  * ixgbe_config_dmac - Configure DMA Coalescing
3916  ************************************************************************/
3917 static void
ixgbe_config_dmac(struct ixgbe_softc * sc)3918 ixgbe_config_dmac(struct ixgbe_softc *sc)
3919 {
3920 	struct ixgbe_hw *hw = &sc->hw;
3921 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3922 
3923 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3924 		return;
3925 
3926 	if (dcfg->watchdog_timer ^ sc->dmac ||
3927 	    dcfg->link_speed ^ sc->link_speed) {
3928 		dcfg->watchdog_timer = sc->dmac;
3929 		dcfg->fcoe_en = false;
3930 		dcfg->link_speed = sc->link_speed;
3931 		dcfg->num_tcs = 1;
3932 
3933 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3934 		    dcfg->watchdog_timer, dcfg->link_speed);
3935 
3936 		hw->mac.ops.dmac_config(hw);
3937 	}
3938 } /* ixgbe_config_dmac */
3939 
3940 /************************************************************************
3941  * ixgbe_if_enable_intr
3942  ************************************************************************/
3943 void
ixgbe_if_enable_intr(if_ctx_t ctx)3944 ixgbe_if_enable_intr(if_ctx_t ctx)
3945 {
3946 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3947 	struct ixgbe_hw *hw = &sc->hw;
3948 	struct ix_rx_queue *que = sc->rx_queues;
3949 	u32 mask, fwsm;
3950 
3951 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3952 
3953 	switch (sc->hw.mac.type) {
3954 	case ixgbe_mac_82599EB:
3955 		mask |= IXGBE_EIMS_ECC;
3956 		/* Temperature sensor on some scs */
3957 		mask |= IXGBE_EIMS_GPI_SDP0;
3958 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3959 		mask |= IXGBE_EIMS_GPI_SDP1;
3960 		mask |= IXGBE_EIMS_GPI_SDP2;
3961 		break;
3962 	case ixgbe_mac_X540:
3963 		/* Detect if Thermal Sensor is enabled */
3964 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3965 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3966 			mask |= IXGBE_EIMS_TS;
3967 		mask |= IXGBE_EIMS_ECC;
3968 		break;
3969 	case ixgbe_mac_X550:
3970 		/* MAC thermal sensor is automatically enabled */
3971 		mask |= IXGBE_EIMS_TS;
3972 		mask |= IXGBE_EIMS_ECC;
3973 		break;
3974 	case ixgbe_mac_X550EM_x:
3975 	case ixgbe_mac_X550EM_a:
3976 		/* Some devices use SDP0 for important information */
3977 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3978 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3979 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3980 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3981 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3982 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3983 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3984 		mask |= IXGBE_EIMS_ECC;
3985 		break;
3986 	default:
3987 		break;
3988 	}
3989 
3990 	/* Enable Fan Failure detection */
3991 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3992 		mask |= IXGBE_EIMS_GPI_SDP1;
3993 	/* Enable SR-IOV */
3994 	if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3995 		mask |= IXGBE_EIMS_MAILBOX;
3996 	/* Enable Flow Director */
3997 	if (sc->feat_en & IXGBE_FEATURE_FDIR)
3998 		mask |= IXGBE_EIMS_FLOW_DIR;
3999 
4000 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4001 
4002 	/* With MSI-X we use auto clear */
4003 	if (sc->intr_type == IFLIB_INTR_MSIX) {
4004 		mask = IXGBE_EIMS_ENABLE_MASK;
4005 		/* Don't autoclear Link */
4006 		mask &= ~IXGBE_EIMS_OTHER;
4007 		mask &= ~IXGBE_EIMS_LSC;
4008 		if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
4009 			mask &= ~IXGBE_EIMS_MAILBOX;
4010 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4011 	}
4012 
4013 	/*
4014 	 * Now enable all queues, this is done separately to
4015 	 * allow for handling the extended (beyond 32) MSI-X
4016 	 * vectors that can be used by 82599
4017 	 */
4018 	for (int i = 0; i < sc->num_rx_queues; i++, que++)
4019 		ixgbe_enable_queue(sc, que->msix);
4020 
4021 	IXGBE_WRITE_FLUSH(hw);
4022 
4023 } /* ixgbe_if_enable_intr */
4024 
4025 /************************************************************************
4026  * ixgbe_disable_intr
4027  ************************************************************************/
4028 static void
ixgbe_if_disable_intr(if_ctx_t ctx)4029 ixgbe_if_disable_intr(if_ctx_t ctx)
4030 {
4031 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4032 
4033 	if (sc->intr_type == IFLIB_INTR_MSIX)
4034 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
4035 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
4036 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
4037 	} else {
4038 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
4039 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
4040 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
4041 	}
4042 	IXGBE_WRITE_FLUSH(&sc->hw);
4043 
4044 } /* ixgbe_if_disable_intr */
4045 
4046 /************************************************************************
4047  * ixgbe_link_intr_enable
4048  ************************************************************************/
4049 static void
ixgbe_link_intr_enable(if_ctx_t ctx)4050 ixgbe_link_intr_enable(if_ctx_t ctx)
4051 {
4052 	struct ixgbe_hw *hw =
4053 	    &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
4054 
4055 	/* Re-enable other interrupts */
4056 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
4057 } /* ixgbe_link_intr_enable */
4058 
4059 /************************************************************************
4060  * ixgbe_if_rx_queue_intr_enable
4061  ************************************************************************/
4062 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)4063 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
4064 {
4065 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4066 	struct ix_rx_queue *que = &sc->rx_queues[rxqid];
4067 
4068 	ixgbe_enable_queue(sc, que->msix);
4069 
4070 	return (0);
4071 } /* ixgbe_if_rx_queue_intr_enable */
4072 
4073 /************************************************************************
4074  * ixgbe_enable_queue
4075  ************************************************************************/
4076 static void
ixgbe_enable_queue(struct ixgbe_softc * sc,u32 vector)4077 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
4078 {
4079 	struct ixgbe_hw *hw = &sc->hw;
4080 	u64 queue = 1ULL << vector;
4081 	u32 mask;
4082 
4083 	if (hw->mac.type == ixgbe_mac_82598EB) {
4084 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4085 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4086 	} else {
4087 		mask = (queue & 0xFFFFFFFF);
4088 		if (mask)
4089 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4090 		mask = (queue >> 32);
4091 		if (mask)
4092 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4093 	}
4094 } /* ixgbe_enable_queue */
4095 
4096 /************************************************************************
4097  * ixgbe_disable_queue
4098  ************************************************************************/
4099 static void
ixgbe_disable_queue(struct ixgbe_softc * sc,u32 vector)4100 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
4101 {
4102 	struct ixgbe_hw *hw = &sc->hw;
4103 	u64 queue = 1ULL << vector;
4104 	u32 mask;
4105 
4106 	if (hw->mac.type == ixgbe_mac_82598EB) {
4107 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4108 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
4109 	} else {
4110 		mask = (queue & 0xFFFFFFFF);
4111 		if (mask)
4112 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
4113 		mask = (queue >> 32);
4114 		if (mask)
4115 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
4116 	}
4117 } /* ixgbe_disable_queue */
4118 
4119 /************************************************************************
4120  * ixgbe_intr - Legacy Interrupt Service Routine
4121  ************************************************************************/
4122 int
ixgbe_intr(void * arg)4123 ixgbe_intr(void *arg)
4124 {
4125 	struct ixgbe_softc *sc = arg;
4126 	struct ix_rx_queue *que = sc->rx_queues;
4127 	struct ixgbe_hw *hw = &sc->hw;
4128 	if_ctx_t ctx = sc->ctx;
4129 	u32 eicr, eicr_mask;
4130 
4131 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4132 
4133 	++que->irqs;
4134 	if (eicr == 0) {
4135 		ixgbe_if_enable_intr(ctx);
4136 		return (FILTER_HANDLED);
4137 	}
4138 
4139 	/* Check for fan failure */
4140 	if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4141 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
4142 		device_printf(sc->dev,
4143 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4144 		IXGBE_WRITE_REG(hw, IXGBE_EIMS,
4145 		    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4146 	}
4147 
4148 	/* Link status change */
4149 	if (eicr & IXGBE_EICR_LSC) {
4150 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4151 		iflib_admin_intr_deferred(ctx);
4152 	}
4153 
4154 	if (ixgbe_is_sfp(hw)) {
4155 		/* Pluggable optics-related interrupt */
4156 		if (hw->mac.type >= ixgbe_mac_X540)
4157 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4158 		else
4159 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4160 
4161 		if (eicr & eicr_mask) {
4162 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4163 			sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4164 		}
4165 
4166 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
4167 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4168 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
4169 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4170 			sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4171 		}
4172 	}
4173 
4174 	/* External PHY interrupt */
4175 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4176 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
4177 		sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4178 
4179 	return (FILTER_SCHEDULE_THREAD);
4180 } /* ixgbe_intr */
4181 
4182 /************************************************************************
4183  * ixgbe_free_pci_resources
4184  ************************************************************************/
4185 static void
ixgbe_free_pci_resources(if_ctx_t ctx)4186 ixgbe_free_pci_resources(if_ctx_t ctx)
4187 {
4188 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4189 	struct ix_rx_queue *que = sc->rx_queues;
4190 	device_t dev = iflib_get_dev(ctx);
4191 
4192 	/* Release all MSI-X queue resources */
4193 	if (sc->intr_type == IFLIB_INTR_MSIX)
4194 		iflib_irq_free(ctx, &sc->irq);
4195 
4196 	if (que != NULL) {
4197 		for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4198 			iflib_irq_free(ctx, &que->que_irq);
4199 		}
4200 	}
4201 
4202 	if (sc->pci_mem != NULL)
4203 		bus_release_resource(dev, SYS_RES_MEMORY,
4204 		    rman_get_rid(sc->pci_mem), sc->pci_mem);
4205 } /* ixgbe_free_pci_resources */
4206 
4207 /************************************************************************
4208  * ixgbe_sysctl_flowcntl
4209  *
4210  *   SYSCTL wrapper around setting Flow Control
4211  ************************************************************************/
4212 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)4213 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4214 {
4215 	struct ixgbe_softc *sc;
4216 	int error, fc;
4217 
4218 	sc = (struct ixgbe_softc *)arg1;
4219 	fc = sc->hw.fc.current_mode;
4220 
4221 	error = sysctl_handle_int(oidp, &fc, 0, req);
4222 	if ((error) || (req->newptr == NULL))
4223 		return (error);
4224 
4225 	/* Don't bother if it's not changed */
4226 	if (fc == sc->hw.fc.current_mode)
4227 		return (0);
4228 
4229 	return ixgbe_set_flowcntl(sc, fc);
4230 } /* ixgbe_sysctl_flowcntl */
4231 
4232 /************************************************************************
4233  * ixgbe_set_flowcntl - Set flow control
4234  *
4235  *   Flow control values:
4236  *     0 - off
4237  *     1 - rx pause
4238  *     2 - tx pause
4239  *     3 - full
4240  ************************************************************************/
4241 static int
ixgbe_set_flowcntl(struct ixgbe_softc * sc,int fc)4242 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4243 {
4244 	switch (fc) {
4245 	case ixgbe_fc_rx_pause:
4246 	case ixgbe_fc_tx_pause:
4247 	case ixgbe_fc_full:
4248 		sc->hw.fc.requested_mode = fc;
4249 		if (sc->num_rx_queues > 1)
4250 			ixgbe_disable_rx_drop(sc);
4251 		break;
4252 	case ixgbe_fc_none:
4253 		sc->hw.fc.requested_mode = ixgbe_fc_none;
4254 		if (sc->num_rx_queues > 1)
4255 			ixgbe_enable_rx_drop(sc);
4256 		break;
4257 	default:
4258 		return (EINVAL);
4259 	}
4260 
4261 	/* Don't autoneg if forcing a value */
4262 	sc->hw.fc.disable_fc_autoneg = true;
4263 	ixgbe_fc_enable(&sc->hw);
4264 
4265 	return (0);
4266 } /* ixgbe_set_flowcntl */
4267 
4268 /************************************************************************
4269  * ixgbe_enable_rx_drop
4270  *
4271  *   Enable the hardware to drop packets when the buffer is
4272  *   full. This is useful with multiqueue, so that no single
4273  *   queue being full stalls the entire RX engine. We only
4274  *   enable this when Multiqueue is enabled AND Flow Control
4275  *   is disabled.
4276  ************************************************************************/
4277 static void
ixgbe_enable_rx_drop(struct ixgbe_softc * sc)4278 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4279 {
4280 	struct ixgbe_hw *hw = &sc->hw;
4281 	struct rx_ring *rxr;
4282 	u32 srrctl;
4283 
4284 	for (int i = 0; i < sc->num_rx_queues; i++) {
4285 		rxr = &sc->rx_queues[i].rxr;
4286 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4287 		srrctl |= IXGBE_SRRCTL_DROP_EN;
4288 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4289 	}
4290 
4291 	/* enable drop for each vf */
4292 	for (int i = 0; i < sc->num_vfs; i++) {
4293 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4294 		    (IXGBE_QDE_WRITE |
4295 		    (i << IXGBE_QDE_IDX_SHIFT) |
4296 		    IXGBE_QDE_ENABLE));
4297 	}
4298 } /* ixgbe_enable_rx_drop */
4299 
4300 /************************************************************************
4301  * ixgbe_disable_rx_drop
4302  ************************************************************************/
4303 static void
ixgbe_disable_rx_drop(struct ixgbe_softc * sc)4304 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4305 {
4306 	struct ixgbe_hw *hw = &sc->hw;
4307 	struct rx_ring *rxr;
4308 	u32 srrctl;
4309 
4310 	for (int i = 0; i < sc->num_rx_queues; i++) {
4311 		rxr = &sc->rx_queues[i].rxr;
4312 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4313 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4314 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4315 	}
4316 
4317 	/* disable drop for each vf */
4318 	for (int i = 0; i < sc->num_vfs; i++) {
4319 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4320 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4321 	}
4322 } /* ixgbe_disable_rx_drop */
4323 
4324 /************************************************************************
4325  * ixgbe_sysctl_advertise
4326  *
4327  *   SYSCTL wrapper around setting advertised speed
4328  ************************************************************************/
4329 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)4330 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4331 {
4332 	struct ixgbe_softc *sc;
4333 	int error, advertise;
4334 
4335 	sc = (struct ixgbe_softc *)arg1;
4336 	if (atomic_load_acq_int(&sc->recovery_mode))
4337 		return (EPERM);
4338 
4339 	advertise = sc->advertise;
4340 
4341 	error = sysctl_handle_int(oidp, &advertise, 0, req);
4342 	if ((error) || (req->newptr == NULL))
4343 		return (error);
4344 
4345 	return ixgbe_set_advertise(sc, advertise);
4346 } /* ixgbe_sysctl_advertise */
4347 
4348 /************************************************************************
4349  * ixgbe_set_advertise - Control advertised link speed
4350  *
4351  *   Flags:
4352  *     0x1  - advertise 100 Mb
4353  *     0x2  - advertise 1G
4354  *     0x4  - advertise 10G
4355  *     0x8  - advertise 10 Mb (yes, Mb)
4356  *     0x10 - advertise 2.5G (disabled by default)
4357  *     0x20 - advertise 5G (disabled by default)
4358  *
4359  ************************************************************************/
4360 static int
ixgbe_set_advertise(struct ixgbe_softc * sc,int advertise)4361 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4362 {
4363 	device_t dev = iflib_get_dev(sc->ctx);
4364 	struct ixgbe_hw *hw;
4365 	ixgbe_link_speed speed = 0;
4366 	ixgbe_link_speed link_caps = 0;
4367 	s32 err = IXGBE_NOT_IMPLEMENTED;
4368 	bool negotiate = false;
4369 
4370 	/* Checks to validate new value */
4371 	if (sc->advertise == advertise) /* no change */
4372 		return (0);
4373 
4374 	hw = &sc->hw;
4375 
4376 	/* No speed changes for backplane media */
4377 	if (hw->phy.media_type == ixgbe_media_type_backplane)
4378 		return (ENODEV);
4379 
4380 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4381 	      (hw->phy.multispeed_fiber))) {
4382 		device_printf(dev,
4383 		    "Advertised speed can only be set on copper or multispeed"
4384 		    " fiber media types.\n");
4385 		return (EINVAL);
4386 	}
4387 
4388 	if (advertise < 0x1 || advertise > 0x3F) {
4389 		device_printf(dev,
4390 		    "Invalid advertised speed; valid modes are 0x1 through"
4391 		    " 0x3F\n");
4392 		return (EINVAL);
4393 	}
4394 
4395 	if (hw->mac.ops.get_link_capabilities) {
4396 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4397 		    &negotiate);
4398 		if (err != IXGBE_SUCCESS) {
4399 			device_printf(dev,
4400 			    "Unable to determine supported advertise speeds"
4401 			    "\n");
4402 			return (ENODEV);
4403 		}
4404 	}
4405 
4406 	/* Set new value and report new advertised mode */
4407 	if (advertise & 0x1) {
4408 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4409 			device_printf(dev,
4410 			    "Interface does not support 100Mb advertised"
4411 			    " speed\n");
4412 			return (EINVAL);
4413 		}
4414 		speed |= IXGBE_LINK_SPEED_100_FULL;
4415 	}
4416 	if (advertise & 0x2) {
4417 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4418 			device_printf(dev,
4419 			    "Interface does not support 1Gb advertised speed"
4420 			    "\n");
4421 			return (EINVAL);
4422 		}
4423 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4424 	}
4425 	if (advertise & 0x4) {
4426 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4427 			device_printf(dev,
4428 			    "Interface does not support 10Gb advertised speed"
4429 			    "\n");
4430 			return (EINVAL);
4431 		}
4432 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4433 	}
4434 	if (advertise & 0x8) {
4435 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4436 			device_printf(dev,
4437 			    "Interface does not support 10Mb advertised speed"
4438 			    "\n");
4439 			return (EINVAL);
4440 		}
4441 		speed |= IXGBE_LINK_SPEED_10_FULL;
4442 	}
4443 	if (advertise & 0x10) {
4444 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4445 			device_printf(dev,
4446 			    "Interface does not support 2.5G advertised speed"
4447 			    "\n");
4448 			return (EINVAL);
4449 		}
4450 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4451 	}
4452 	if (advertise & 0x20) {
4453 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4454 			device_printf(dev,
4455 			    "Interface does not support 5G advertised speed"
4456 			    "\n");
4457 			return (EINVAL);
4458 		}
4459 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
4460 	}
4461 
4462 	hw->mac.autotry_restart = true;
4463 	hw->mac.ops.setup_link(hw, speed, true);
4464 	sc->advertise = advertise;
4465 
4466 	return (0);
4467 } /* ixgbe_set_advertise */
4468 
4469 /************************************************************************
4470  * ixgbe_get_default_advertise - Get default advertised speed settings
4471  *
4472  *   Formatted for sysctl usage.
4473  *   Flags:
4474  *     0x1 - advertise 100 Mb
4475  *     0x2 - advertise 1G
4476  *     0x4 - advertise 10G
4477  *     0x8 - advertise 10 Mb (yes, Mb)
4478  *     0x10 - advertise 2.5G (disabled by default)
4479  *     0x20 - advertise 5G (disabled by default)
4480  ************************************************************************/
4481 static int
ixgbe_get_default_advertise(struct ixgbe_softc * sc)4482 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4483 {
4484 	struct ixgbe_hw *hw = &sc->hw;
4485 	int speed;
4486 	ixgbe_link_speed link_caps = 0;
4487 	s32 err;
4488 	bool negotiate = false;
4489 
4490 	/*
4491 	 * Advertised speed means nothing unless it's copper or
4492 	 * multi-speed fiber
4493 	 */
4494 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4495 	    !(hw->phy.multispeed_fiber))
4496 		return (0);
4497 
4498 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4499 	if (err != IXGBE_SUCCESS)
4500 		return (0);
4501 
4502 	if (hw->mac.type == ixgbe_mac_X550) {
4503 		/*
4504 		 * 2.5G and 5G autonegotiation speeds on X550
4505 		 * are disabled by default due to reported
4506 		 * interoperability issues with some switches.
4507 		 */
4508 		link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4509 		    IXGBE_LINK_SPEED_5GB_FULL);
4510 	}
4511 
4512 	speed =
4513 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x4  : 0) |
4514 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0) |
4515 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4516 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x2  : 0) |
4517 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x1  : 0) |
4518 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x8  : 0);
4519 
4520 	return speed;
4521 } /* ixgbe_get_default_advertise */
4522 
4523 /************************************************************************
4524  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4525  *
4526  *   Control values:
4527  *     0/1 - off / on (use default value of 1000)
4528  *
4529  *     Legal timer values are:
4530  *     50,100,250,500,1000,2000,5000,10000
4531  *
4532  *     Turning off interrupt moderation will also turn this off.
4533  ************************************************************************/
4534 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)4535 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4536 {
4537 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4538 	if_t ifp = iflib_get_ifp(sc->ctx);
4539 	int error;
4540 	u16 newval;
4541 
4542 	newval = sc->dmac;
4543 	error = sysctl_handle_16(oidp, &newval, 0, req);
4544 	if ((error) || (req->newptr == NULL))
4545 		return (error);
4546 
4547 	switch (newval) {
4548 	case 0:
4549 		/* Disabled */
4550 		sc->dmac = 0;
4551 		break;
4552 	case 1:
4553 		/* Enable and use default */
4554 		sc->dmac = 1000;
4555 		break;
4556 	case 50:
4557 	case 100:
4558 	case 250:
4559 	case 500:
4560 	case 1000:
4561 	case 2000:
4562 	case 5000:
4563 	case 10000:
4564 		/* Legal values - allow */
4565 		sc->dmac = newval;
4566 		break;
4567 	default:
4568 		/* Do nothing, illegal value */
4569 		return (EINVAL);
4570 	}
4571 
4572 	/* Re-initialize hardware if it's already running */
4573 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
4574 		if_init(ifp, ifp);
4575 
4576 	return (0);
4577 } /* ixgbe_sysctl_dmac */
4578 
4579 #ifdef IXGBE_DEBUG
4580 /************************************************************************
4581  * ixgbe_sysctl_power_state
4582  *
4583  *   Sysctl to test power states
4584  *   Values:
4585  *     0      - set device to D0
4586  *     3      - set device to D3
4587  *     (none) - get current device power state
4588  ************************************************************************/
4589 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)4590 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4591 {
4592 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4593 	device_t dev = sc->dev;
4594 	int curr_ps, new_ps, error = 0;
4595 
4596 	curr_ps = new_ps = pci_get_powerstate(dev);
4597 
4598 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4599 	if ((error) || (req->newptr == NULL))
4600 		return (error);
4601 
4602 	if (new_ps == curr_ps)
4603 		return (0);
4604 
4605 	if (new_ps == 3 && curr_ps == 0)
4606 		error = DEVICE_SUSPEND(dev);
4607 	else if (new_ps == 0 && curr_ps == 3)
4608 		error = DEVICE_RESUME(dev);
4609 	else
4610 		return (EINVAL);
4611 
4612 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4613 
4614 	return (error);
4615 } /* ixgbe_sysctl_power_state */
4616 #endif
4617 
4618 /************************************************************************
4619  * ixgbe_sysctl_wol_enable
4620  *
4621  *   Sysctl to enable/disable the WoL capability,
4622  *   if supported by the adapter.
4623  *
4624  *   Values:
4625  *     0 - disabled
4626  *     1 - enabled
4627  ************************************************************************/
4628 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)4629 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4630 {
4631 	struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
4632 	struct ixgbe_hw *hw = &sc->hw;
4633 	int new_wol_enabled;
4634 	int error = 0;
4635 
4636 	new_wol_enabled = hw->wol_enabled;
4637 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4638 	if ((error) || (req->newptr == NULL))
4639 		return (error);
4640 	new_wol_enabled = !!(new_wol_enabled);
4641 	if (new_wol_enabled == hw->wol_enabled)
4642 		return (0);
4643 
4644 	if (new_wol_enabled > 0 && !sc->wol_support)
4645 		return (ENODEV);
4646 	else
4647 		hw->wol_enabled = new_wol_enabled;
4648 
4649 	return (0);
4650 } /* ixgbe_sysctl_wol_enable */
4651 
4652 /************************************************************************
4653  * ixgbe_sysctl_wufc - Wake Up Filter Control
4654  *
4655  *   Sysctl to enable/disable the types of packets that the
4656  *   adapter will wake up on upon receipt.
4657  *   Flags:
4658  *     0x1  - Link Status Change
4659  *     0x2  - Magic Packet
4660  *     0x4  - Direct Exact
4661  *     0x8  - Directed Multicast
4662  *     0x10 - Broadcast
4663  *     0x20 - ARP/IPv4 Request Packet
4664  *     0x40 - Direct IPv4 Packet
4665  *     0x80 - Direct IPv6 Packet
4666  *
4667  *   Settings not listed above will cause the sysctl to return an error.
4668  ************************************************************************/
4669 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)4670 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4671 {
4672 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4673 	int error = 0;
4674 	u32 new_wufc;
4675 
4676 	new_wufc = sc->wufc;
4677 
4678 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4679 	if ((error) || (req->newptr == NULL))
4680 		return (error);
4681 	if (new_wufc == sc->wufc)
4682 		return (0);
4683 
4684 	if (new_wufc & 0xffffff00)
4685 		return (EINVAL);
4686 
4687 	new_wufc &= 0xff;
4688 	new_wufc |= (0xffffff & sc->wufc);
4689 	sc->wufc = new_wufc;
4690 
4691 	return (0);
4692 } /* ixgbe_sysctl_wufc */
4693 
4694 #ifdef IXGBE_DEBUG
4695 /************************************************************************
4696  * ixgbe_sysctl_print_rss_config
4697  ************************************************************************/
4698 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)4699 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4700 {
4701 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4702 	struct ixgbe_hw *hw = &sc->hw;
4703 	device_t dev = sc->dev;
4704 	struct sbuf *buf;
4705 	int error = 0, reta_size;
4706 	u32 reg;
4707 
4708 	if (atomic_load_acq_int(&sc->recovery_mode))
4709 		return (EPERM);
4710 
4711 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4712 	if (!buf) {
4713 		device_printf(dev, "Could not allocate sbuf for output.\n");
4714 		return (ENOMEM);
4715 	}
4716 
4717 	// TODO: use sbufs to make a string to print out
4718 	/* Set multiplier for RETA setup and table size based on MAC */
4719 	switch (sc->hw.mac.type) {
4720 	case ixgbe_mac_X550:
4721 	case ixgbe_mac_X550EM_x:
4722 	case ixgbe_mac_X550EM_a:
4723 		reta_size = 128;
4724 		break;
4725 	default:
4726 		reta_size = 32;
4727 		break;
4728 	}
4729 
4730 	/* Print out the redirection table */
4731 	sbuf_cat(buf, "\n");
4732 	for (int i = 0; i < reta_size; i++) {
4733 		if (i < 32) {
4734 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4735 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4736 		} else {
4737 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4738 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4739 		}
4740 	}
4741 
4742 	// TODO: print more config
4743 
4744 	error = sbuf_finish(buf);
4745 	if (error)
4746 		device_printf(dev, "Error finishing sbuf: %d\n", error);
4747 
4748 	sbuf_delete(buf);
4749 
4750 	return (0);
4751 } /* ixgbe_sysctl_print_rss_config */
4752 #endif /* IXGBE_DEBUG */
4753 
4754 /************************************************************************
4755  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4756  *
4757  *   For X552/X557-AT devices using an external PHY
4758  ************************************************************************/
4759 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)4760 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4761 {
4762 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4763 	struct ixgbe_hw *hw = &sc->hw;
4764 	u16 reg;
4765 
4766 	if (atomic_load_acq_int(&sc->recovery_mode))
4767 		return (EPERM);
4768 
4769 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4770 		device_printf(iflib_get_dev(sc->ctx),
4771 		    "Device has no supported external thermal sensor.\n");
4772 		return (ENODEV);
4773 	}
4774 
4775 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4776 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4777 		device_printf(iflib_get_dev(sc->ctx),
4778 		    "Error reading from PHY's current temperature register"
4779 		    "\n");
4780 		return (EAGAIN);
4781 	}
4782 
4783 	/* Shift temp for output */
4784 	reg = reg >> 8;
4785 
4786 	return (sysctl_handle_16(oidp, NULL, reg, req));
4787 } /* ixgbe_sysctl_phy_temp */
4788 
4789 /************************************************************************
4790  * ixgbe_sysctl_phy_overtemp_occurred
4791  *
4792  *   Reports (directly from the PHY) whether the current PHY
4793  *   temperature is over the overtemp threshold.
4794  ************************************************************************/
4795 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)4796 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4797 {
4798 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4799 	struct ixgbe_hw *hw = &sc->hw;
4800 	u16 reg;
4801 
4802 	if (atomic_load_acq_int(&sc->recovery_mode))
4803 		return (EPERM);
4804 
4805 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4806 		device_printf(iflib_get_dev(sc->ctx),
4807 		    "Device has no supported external thermal sensor.\n");
4808 		return (ENODEV);
4809 	}
4810 
4811 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4812 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4813 		device_printf(iflib_get_dev(sc->ctx),
4814 		    "Error reading from PHY's temperature status register\n");
4815 		return (EAGAIN);
4816 	}
4817 
4818 	/* Get occurrence bit */
4819 	reg = !!(reg & 0x4000);
4820 
4821 	return (sysctl_handle_16(oidp, 0, reg, req));
4822 } /* ixgbe_sysctl_phy_overtemp_occurred */
4823 
4824 /************************************************************************
4825  * ixgbe_sysctl_eee_state
4826  *
4827  *   Sysctl to set EEE power saving feature
4828  *   Values:
4829  *     0      - disable EEE
4830  *     1      - enable EEE
4831  *     (none) - get current device EEE state
4832  ************************************************************************/
4833 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)4834 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4835 {
4836 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4837 	device_t dev = sc->dev;
4838 	if_t ifp = iflib_get_ifp(sc->ctx);
4839 	int curr_eee, new_eee, error = 0;
4840 	s32 retval;
4841 
4842 	if (atomic_load_acq_int(&sc->recovery_mode))
4843 		return (EPERM);
4844 
4845 	curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4846 
4847 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4848 	if ((error) || (req->newptr == NULL))
4849 		return (error);
4850 
4851 	/* Nothing to do */
4852 	if (new_eee == curr_eee)
4853 		return (0);
4854 
4855 	/* Not supported */
4856 	if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4857 		return (EINVAL);
4858 
4859 	/* Bounds checking */
4860 	if ((new_eee < 0) || (new_eee > 1))
4861 		return (EINVAL);
4862 
4863 	retval = ixgbe_setup_eee(&sc->hw, new_eee);
4864 	if (retval) {
4865 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4866 		return (EINVAL);
4867 	}
4868 
4869 	/* Restart auto-neg */
4870 	if_init(ifp, ifp);
4871 
4872 	device_printf(dev, "New EEE state: %d\n", new_eee);
4873 
4874 	/* Cache new value */
4875 	if (new_eee)
4876 		sc->feat_en |= IXGBE_FEATURE_EEE;
4877 	else
4878 		sc->feat_en &= ~IXGBE_FEATURE_EEE;
4879 
4880 	return (error);
4881 } /* ixgbe_sysctl_eee_state */
4882 
4883 static int
ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)4884 ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
4885 {
4886 	struct ixgbe_softc *sc;
4887 	u32 reg, val, shift;
4888 	int error, mask;
4889 
4890 	sc = oidp->oid_arg1;
4891 	switch (oidp->oid_arg2) {
4892 	case 0:
4893 		reg = IXGBE_DTXTCPFLGL;
4894 		shift = 0;
4895 		break;
4896 	case 1:
4897 		reg = IXGBE_DTXTCPFLGL;
4898 		shift = 16;
4899 		break;
4900 	case 2:
4901 		reg = IXGBE_DTXTCPFLGH;
4902 		shift = 0;
4903 		break;
4904 	default:
4905 		return (EINVAL);
4906 		break;
4907 	}
4908 	val = IXGBE_READ_REG(&sc->hw, reg);
4909 	mask = (val >> shift) & 0xfff;
4910 	error = sysctl_handle_int(oidp, &mask, 0, req);
4911 	if (error != 0 || req->newptr == NULL)
4912 		return (error);
4913 	if (mask < 0 || mask > 0xfff)
4914 		return (EINVAL);
4915 	val = (val & ~(0xfff << shift)) | (mask << shift);
4916 	IXGBE_WRITE_REG(&sc->hw, reg, val);
4917 	return (0);
4918 }
4919 
4920 /************************************************************************
4921  * ixgbe_init_device_features
4922  ************************************************************************/
4923 static void
ixgbe_init_device_features(struct ixgbe_softc * sc)4924 ixgbe_init_device_features(struct ixgbe_softc *sc)
4925 {
4926 	sc->feat_cap = IXGBE_FEATURE_NETMAP |
4927 	    IXGBE_FEATURE_RSS |
4928 	    IXGBE_FEATURE_MSI |
4929 	    IXGBE_FEATURE_MSIX |
4930 	    IXGBE_FEATURE_LEGACY_IRQ;
4931 
4932 	/* Set capabilities first... */
4933 	switch (sc->hw.mac.type) {
4934 	case ixgbe_mac_82598EB:
4935 		if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4936 			sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4937 		break;
4938 	case ixgbe_mac_X540:
4939 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4940 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4941 		if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4942 		    (sc->hw.bus.func == 0))
4943 			sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4944 		break;
4945 	case ixgbe_mac_X550:
4946 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4947 		sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4948 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4949 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4950 		break;
4951 	case ixgbe_mac_X550EM_x:
4952 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4953 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4954 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4955 		if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4956 			sc->feat_cap |= IXGBE_FEATURE_EEE;
4957 		break;
4958 	case ixgbe_mac_X550EM_a:
4959 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4960 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4961 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4962 		sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4963 		if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4964 		    (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4965 			sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4966 			sc->feat_cap |= IXGBE_FEATURE_EEE;
4967 		}
4968 		break;
4969 	case ixgbe_mac_82599EB:
4970 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4971 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
4972 		if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4973 		    (sc->hw.bus.func == 0))
4974 			sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4975 		if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4976 			sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4977 		break;
4978 	default:
4979 		break;
4980 	}
4981 
4982 	/* Enabled by default... */
4983 	/* Fan failure detection */
4984 	if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4985 		sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4986 	/* Netmap */
4987 	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4988 		sc->feat_en |= IXGBE_FEATURE_NETMAP;
4989 	/* EEE */
4990 	if (sc->feat_cap & IXGBE_FEATURE_EEE)
4991 		sc->feat_en |= IXGBE_FEATURE_EEE;
4992 	/* Thermal Sensor */
4993 	if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4994 		sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4995 	/* Recovery mode */
4996 	if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
4997 		sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
4998 
4999 	/* Enabled via global sysctl... */
5000 	/* Flow Director */
5001 	if (ixgbe_enable_fdir) {
5002 		if (sc->feat_cap & IXGBE_FEATURE_FDIR)
5003 			sc->feat_en |= IXGBE_FEATURE_FDIR;
5004 		else
5005 			device_printf(sc->dev,
5006 			    "Device does not support Flow Director."
5007 			    " Leaving disabled.");
5008 	}
5009 	/*
5010 	 * Message Signal Interrupts - Extended (MSI-X)
5011 	 * Normal MSI is only enabled if MSI-X calls fail.
5012 	 */
5013 	if (!ixgbe_enable_msix)
5014 		sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
5015 	/* Receive-Side Scaling (RSS) */
5016 	if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5017 		sc->feat_en |= IXGBE_FEATURE_RSS;
5018 
5019 	/* Disable features with unmet dependencies... */
5020 	/* No MSI-X */
5021 	if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
5022 		sc->feat_cap &= ~IXGBE_FEATURE_RSS;
5023 		sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5024 		sc->feat_en &= ~IXGBE_FEATURE_RSS;
5025 		sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
5026 	}
5027 } /* ixgbe_init_device_features */
5028 
5029 /************************************************************************
5030  * ixgbe_check_fan_failure
5031  ************************************************************************/
5032 static void
ixgbe_check_fan_failure(struct ixgbe_softc * sc,u32 reg,bool in_interrupt)5033 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
5034 {
5035 	u32 mask;
5036 
5037 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
5038 	    IXGBE_ESDP_SDP1;
5039 
5040 	if (reg & mask)
5041 		device_printf(sc->dev,
5042 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5043 } /* ixgbe_check_fan_failure */
5044 
5045 /************************************************************************
5046  * ixgbe_sbuf_fw_version
5047  ************************************************************************/
5048 static void
ixgbe_sbuf_fw_version(struct ixgbe_hw * hw,struct sbuf * buf)5049 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
5050 {
5051 	struct ixgbe_nvm_version nvm_ver = {0};
5052 	const char *space = "";
5053 
5054 	ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
5055 	ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
5056 	ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
5057 	ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
5058 
5059 	/* FW version */
5060 	if ((nvm_ver.phy_fw_maj == 0x0 &&
5061 	    nvm_ver.phy_fw_min == 0x0 &&
5062 	    nvm_ver.phy_fw_id == 0x0) ||
5063 		(nvm_ver.phy_fw_maj == 0xF &&
5064 	    nvm_ver.phy_fw_min == 0xFF &&
5065 	    nvm_ver.phy_fw_id == 0xF)) {
5066 		/* If major, minor and id numbers are set to 0,
5067 		 * reading FW version is unsupported. If major number
5068 		 * is set to 0xF, minor is set to 0xFF and id is set
5069 		 * to 0xF, this means that number read is invalid. */
5070 	} else
5071 		sbuf_printf(buf, "fw %d.%d.%d ",
5072 		    nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min,
5073 		    nvm_ver.phy_fw_id);
5074 
5075 	/* NVM version */
5076 	if ((nvm_ver.nvm_major == 0x0 &&
5077 	    nvm_ver.nvm_minor == 0x0 &&
5078 	    nvm_ver.nvm_id == 0x0) ||
5079 		(nvm_ver.nvm_major == 0xF &&
5080 	    nvm_ver.nvm_minor == 0xFF &&
5081 	    nvm_ver.nvm_id == 0xF)) {
5082 		/* If major, minor and id numbers are set to 0,
5083 		 * reading NVM version is unsupported. If major number
5084 		 * is set to 0xF, minor is set to 0xFF and id is set
5085 		 * to 0xF, this means that number read is invalid. */
5086 	} else
5087 		sbuf_printf(buf, "nvm %x.%02x.%x ",
5088 		    nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
5089 
5090 	if (nvm_ver.oem_valid) {
5091 		sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
5092 		    nvm_ver.oem_minor, nvm_ver.oem_release);
5093 		space = " ";
5094 	}
5095 
5096 	if (nvm_ver.or_valid) {
5097 		sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
5098 		    space, nvm_ver.or_major, nvm_ver.or_build,
5099 		    nvm_ver.or_patch);
5100 		space = " ";
5101 	}
5102 
5103 	if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
5104 	    NVM_VER_INVALID | 0xFFFFFFFF)) {
5105 		sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
5106 	}
5107 } /* ixgbe_sbuf_fw_version */
5108 
5109 /************************************************************************
5110  * ixgbe_print_fw_version
5111  ************************************************************************/
5112 static void
ixgbe_print_fw_version(if_ctx_t ctx)5113 ixgbe_print_fw_version(if_ctx_t ctx)
5114 {
5115 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
5116 	struct ixgbe_hw *hw = &sc->hw;
5117 	device_t dev = sc->dev;
5118 	struct sbuf *buf;
5119 	int error = 0;
5120 
5121 	buf = sbuf_new_auto();
5122 	if (!buf) {
5123 		device_printf(dev, "Could not allocate sbuf for output.\n");
5124 		return;
5125 	}
5126 
5127 	ixgbe_sbuf_fw_version(hw, buf);
5128 
5129 	error = sbuf_finish(buf);
5130 	if (error)
5131 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5132 	else if (sbuf_len(buf))
5133 		device_printf(dev, "%s\n", sbuf_data(buf));
5134 
5135 	sbuf_delete(buf);
5136 } /* ixgbe_print_fw_version */
5137 
5138 /************************************************************************
5139  * ixgbe_sysctl_print_fw_version
5140  ************************************************************************/
5141 static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)5142 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
5143 {
5144 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5145 	struct ixgbe_hw *hw = &sc->hw;
5146 	device_t dev = sc->dev;
5147 	struct sbuf *buf;
5148 	int error = 0;
5149 
5150 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5151 	if (!buf) {
5152 		device_printf(dev, "Could not allocate sbuf for output.\n");
5153 		return (ENOMEM);
5154 	}
5155 
5156 	ixgbe_sbuf_fw_version(hw, buf);
5157 
5158 	error = sbuf_finish(buf);
5159 	if (error)
5160 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5161 
5162 	sbuf_delete(buf);
5163 
5164 	return (0);
5165 } /* ixgbe_sysctl_print_fw_version */
5166