xref: /freebsd/sys/dev/ixgbe/if_ix.c (revision 2170400142b99c0eb604403b74ebd88c05dd7ed4)
1 /*****************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 *****************************************************************************/
33 
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37 
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41 
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44 
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 static const char ixgbe_driver_version[] = "5.0.1-k";
49 
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixgbe_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,
62     "Intel(R) 82598EB AF (Dual Fiber)"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,
64     "Intel(R) 82598EB AF (Fiber)"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,
66     "Intel(R) 82598EB AT (CX4)"),
67 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,
68     "Intel(R) 82598EB AT"),
69 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,
70     "Intel(R) 82598EB AT2"),
71 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
72 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,
73     "Intel(R) 82598EB AF DA (Dual Fiber)"),
74 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,
75     "Intel(R) 82598EB AT (Dual CX4)"),
76 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,
77     "Intel(R) 82598EB AF (Dual Fiber LR)"),
78 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,
79     "Intel(R) 82598EB AF (Dual Fiber SR)"),
80 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,
81     "Intel(R) 82598EB LOM"),
82 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,
83     "Intel(R) X520 82599 (KX4)"),
84 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,
85     "Intel(R) X520 82599 (KX4 Mezzanine)"),
86 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,
87     "Intel(R) X520 82599ES (SFI/SFP+)"),
88 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,
89     "Intel(R) X520 82599 (XAUI/BX4)"),
90 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,
91     "Intel(R) X520 82599 (Dual CX4)"),
92 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,
93     "Intel(R) X520-T 82599 LOM"),
94 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,
95     "Intel(R) X520 82599 LS"),
96 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,
97     "Intel(R) X520 82599 (Combined Backplane)"),
98 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,
99     "Intel(R) X520 82599 (Backplane w/FCoE)"),
100 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,
101     "Intel(R) X520 82599 (Dual SFP+)"),
102 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,
103     "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
104 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,
105     "Intel(R) X520-1 82599EN (SFP+)"),
106 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,
107     "Intel(R) X520-4 82599 (Quad SFP+)"),
108 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,
109     "Intel(R) X520-Q1 82599 (QSFP+)"),
110 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,
111     "Intel(R) X540-AT2"),
112 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1,  "Intel(R) X540-T1"),
113 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T,  "Intel(R) X550-T2"),
114 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
115 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,
116     "Intel(R) X552 (KR Backplane)"),
117 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,
118     "Intel(R) X552 (KX4 Backplane)"),
119 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,
120     "Intel(R) X552/X557-AT (10GBASE-T)"),
121 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,
122     "Intel(R) X552 (1000BASE-T)"),
123 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP,
124     "Intel(R) X552 (SFP+)"),
125 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR,
126     "Intel(R) X553 (KR Backplane)"),
127 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L,
128     "Intel(R) X553 L (KR Backplane)"),
129 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP,
130     "Intel(R) X553 (SFP+)"),
131 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N,
132     "Intel(R) X553 N (SFP+)"),
133 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII,
134     "Intel(R) X553 (1GbE SGMII)"),
135 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L,
136     "Intel(R) X553 L (1GbE SGMII)"),
137 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T,
138     "Intel(R) X553/X557-AT (10GBASE-T)"),
139 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T,
140     "Intel(R) X553 (1GbE)"),
141 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L,
142     "Intel(R) X553 L (1GbE)"),
143 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS,
144     "Intel(R) X540-T2 (Bypass)"),
145 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
146     "Intel(R) X520 82599 (Bypass)"),
147 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
148      "Intel(R) E610 (Backplane)"),
149 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
150      "Intel(R) E610 (SFP)"),
151 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
152      "Intel(R) E610 (2.5 GbE)"),
153 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
154      "Intel(R) E610 (10 GbE)"),
155 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
156      "Intel(R) E610 (SGMII)"),
157 	/* required last entry */
158 	PVID_END
159 };
160 
161 static void *ixgbe_register(device_t);
162 static int  ixgbe_if_attach_pre(if_ctx_t);
163 static int  ixgbe_if_attach_post(if_ctx_t);
164 static int  ixgbe_if_detach(if_ctx_t);
165 static int  ixgbe_if_shutdown(if_ctx_t);
166 static int  ixgbe_if_suspend(if_ctx_t);
167 static int  ixgbe_if_resume(if_ctx_t);
168 
169 static void ixgbe_if_stop(if_ctx_t);
170 void ixgbe_if_enable_intr(if_ctx_t);
171 static void ixgbe_if_disable_intr(if_ctx_t);
172 static void ixgbe_link_intr_enable(if_ctx_t);
173 static int  ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
174 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
175 static int  ixgbe_if_media_change(if_ctx_t);
176 static int  ixgbe_if_msix_intr_assign(if_ctx_t, int);
177 static int  ixgbe_if_mtu_set(if_ctx_t, uint32_t);
178 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
179 static void ixgbe_if_multi_set(if_ctx_t);
180 static int  ixgbe_if_promisc_set(if_ctx_t, int);
181 static int  ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
182     int);
183 static int  ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
184    int);
185 static void ixgbe_if_queues_free(if_ctx_t);
186 static void ixgbe_if_timer(if_ctx_t, uint16_t);
187 static const char *ixgbe_link_speed_to_str(u32 link_speed);
188 static void ixgbe_if_update_admin_status(if_ctx_t);
189 static void ixgbe_if_vlan_register(if_ctx_t, u16);
190 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
191 static int  ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
192 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
193 int ixgbe_intr(void *);
194 
195 static int ixgbe_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
196 
197 /************************************************************************
198  * Function prototypes
199  ************************************************************************/
200 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
201 
202 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
203 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
204 static void ixgbe_add_device_sysctls(if_ctx_t);
205 static int  ixgbe_allocate_pci_resources(if_ctx_t);
206 static int  ixgbe_setup_low_power_mode(if_ctx_t);
207 
208 static void ixgbe_config_dmac(struct ixgbe_softc *);
209 static void ixgbe_configure_ivars(struct ixgbe_softc *);
210 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
211 static u8   *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
212 static bool ixgbe_sfp_probe(if_ctx_t);
213 
214 static void ixgbe_free_pci_resources(if_ctx_t);
215 
216 static int  ixgbe_msix_link(void *);
217 static int  ixgbe_msix_que(void *);
218 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
219 static void ixgbe_initialize_receive_units(if_ctx_t);
220 static void ixgbe_initialize_transmit_units(if_ctx_t);
221 
222 static int  ixgbe_setup_interface(if_ctx_t);
223 static void ixgbe_init_device_features(struct ixgbe_softc *);
224 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
225 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
226 static void ixgbe_print_fw_version(if_ctx_t);
227 static void ixgbe_add_media_types(if_ctx_t);
228 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
229 static void ixgbe_config_link(if_ctx_t);
230 static void ixgbe_get_slot_info(struct ixgbe_softc *);
231 static void ixgbe_fw_mode_timer(void *);
232 static void ixgbe_check_wol_support(struct ixgbe_softc *);
233 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
234 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
235 
236 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
237 static int  ixgbe_set_flowcntl(struct ixgbe_softc *, int);
238 static int  ixgbe_set_advertise(struct ixgbe_softc *, int);
239 static int  ixgbe_get_default_advertise(struct ixgbe_softc *);
240 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
241 static void ixgbe_config_gpie(struct ixgbe_softc *);
242 static void ixgbe_config_delay_values(struct ixgbe_softc *);
243 
244 static void ixgbe_add_debug_sysctls(struct ixgbe_softc *sc);
245 static void ixgbe_add_debug_dump_sysctls(struct ixgbe_softc *sc);
246 static int  ixgbe_debug_dump_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd);
247 static u8   ixgbe_debug_dump_print_cluster(struct ixgbe_softc *sc,
248     struct sbuf *sbuf, u8 cluster_id);
249 static int ixgbe_nvm_access_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd);
250 
251 /* Sysctl handlers */
252 static int  ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
253 static int  ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
254 static int  ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
255 static int  ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
256 static int  ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
257 static int  ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
258 static int  ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
259 #ifdef IXGBE_DEBUG
260 static int  ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
261 static int  ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
262 #endif
263 static int  ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
264 static int  ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
265 static int  ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
266 static int  ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
267 static int  ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
268 static int  ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
269 static int  ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
270 static int  ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
271 
272 static int  ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS);
273 static int  ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS);
274 
275 /* Deferred interrupt tasklets */
276 static void ixgbe_handle_msf(void *);
277 static void ixgbe_handle_mod(void *);
278 static void ixgbe_handle_phy(void *);
279 static void ixgbe_handle_fw_event(void *);
280 
281 static int ixgbe_enable_lse(struct ixgbe_softc *sc);
282 static int ixgbe_disable_lse(struct ixgbe_softc *sc);
283 
284 /************************************************************************
285  *  FreeBSD Device Interface Entry Points
286  ************************************************************************/
287 static device_method_t ix_methods[] = {
288 	/* Device interface */
289 	DEVMETHOD(device_register, ixgbe_register),
290 	DEVMETHOD(device_probe, iflib_device_probe),
291 	DEVMETHOD(device_attach, iflib_device_attach),
292 	DEVMETHOD(device_detach, iflib_device_detach),
293 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
294 	DEVMETHOD(device_suspend, iflib_device_suspend),
295 	DEVMETHOD(device_resume, iflib_device_resume),
296 #ifdef PCI_IOV
297 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
298 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
299 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
300 #endif /* PCI_IOV */
301 	DEVMETHOD_END
302 };
303 
304 static driver_t ix_driver = {
305 	"ix", ix_methods, sizeof(struct ixgbe_softc),
306 };
307 
308 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
309 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
310 MODULE_DEPEND(ix, pci, 1, 1, 1);
311 MODULE_DEPEND(ix, ether, 1, 1, 1);
312 MODULE_DEPEND(ix, iflib, 1, 1, 1);
313 
314 static device_method_t ixgbe_if_methods[] = {
315 	DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
316 	DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
317 	DEVMETHOD(ifdi_detach, ixgbe_if_detach),
318 	DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
319 	DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
320 	DEVMETHOD(ifdi_resume, ixgbe_if_resume),
321 	DEVMETHOD(ifdi_init, ixgbe_if_init),
322 	DEVMETHOD(ifdi_stop, ixgbe_if_stop),
323 	DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
324 	DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
325 	DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
326 	DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
327 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
328 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
329 	DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
330 	DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
331 	DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
332 	DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
333 	DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
334 	DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
335 	DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
336 	DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
337 	DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
338 	DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
339 	DEVMETHOD(ifdi_timer, ixgbe_if_timer),
340 	DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
341 	DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
342 	DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
343 	DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
344 	DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
345 	DEVMETHOD(ifdi_priv_ioctl, ixgbe_if_priv_ioctl),
346 #ifdef PCI_IOV
347 	DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
348 	DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
349 	DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
350 #endif /* PCI_IOV */
351 	DEVMETHOD_END
352 };
353 
354 /*
355  * TUNEABLE PARAMETERS:
356  */
357 
358 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
359     "IXGBE driver parameters");
360 static driver_t ixgbe_if_driver = {
361   "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
362 };
363 
364 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
365 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
366     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
367 
368 /* Flow control setting, default to full */
369 static int ixgbe_flow_control = ixgbe_fc_full;
370 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
371     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
372 
373 /* Advertise Speed, default to 0 (auto) */
374 static int ixgbe_advertise_speed = 0;
375 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
376     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
377 
378 /*
379  * Smart speed setting, default to on
380  * this only works as a compile option
381  * right now as its during attach, set
382  * this to 'ixgbe_smart_speed_off' to
383  * disable.
384  */
385 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
386 
387 /*
388  * MSI-X should be the default for best performance,
389  * but this allows it to be forced off for testing.
390  */
391 static int ixgbe_enable_msix = 1;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix,
393     0,
394     "Enable MSI-X interrupts");
395 
396 /*
397  * Defining this on will allow the use
398  * of unsupported SFP+ modules, note that
399  * doing so you are on your own :)
400  */
401 static int allow_unsupported_sfp = false;
402 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
403     &allow_unsupported_sfp, 0,
404     "Allow unsupported SFP modules...use at your own risk");
405 
406 /*
407  * Not sure if Flow Director is fully baked,
408  * so we'll default to turning it off.
409  */
410 static int ixgbe_enable_fdir = 0;
411 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir,
412     0,
413     "Enable Flow Director");
414 
415 /* Receive-Side Scaling */
416 static int ixgbe_enable_rss = 1;
417 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss,
418     0,
419     "Enable Receive-Side Scaling (RSS)");
420 
421 /*
422  * AIM: Adaptive Interrupt Moderation
423  * which means that the interrupt rate
424  * is varied over time based on the
425  * traffic for that interrupt vector
426  */
427 static int ixgbe_enable_aim = false;
428 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim,
429     0,
430     "Enable adaptive interrupt moderation");
431 
432 #if 0
433 /* Keep running tab on them for sanity check */
434 static int ixgbe_total_ports;
435 #endif
436 
437 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
438 
439 /*
440  * For Flow Director: this is the number of TX packets we sample
441  * for the filter pool, this means every 20th packet will be probed.
442  *
443  * This feature can be disabled by setting this to 0.
444  */
445 static int atr_sample_rate = 20;
446 
447 extern struct if_txrx ixgbe_txrx;
448 
449 static struct if_shared_ctx ixgbe_sctx_init = {
450 	.isc_magic = IFLIB_MAGIC,
451 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
452 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
453 	.isc_tx_maxsegsize = PAGE_SIZE,
454 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
455 	.isc_tso_maxsegsize = PAGE_SIZE,
456 	.isc_rx_maxsize = PAGE_SIZE*4,
457 	.isc_rx_nsegments = 1,
458 	.isc_rx_maxsegsize = PAGE_SIZE*4,
459 	.isc_nfl = 1,
460 	.isc_ntxqs = 1,
461 	.isc_nrxqs = 1,
462 
463 	.isc_admin_intrcnt = 1,
464 	.isc_vendor_info = ixgbe_vendor_info_array,
465 	.isc_driver_version = ixgbe_driver_version,
466 	.isc_driver = &ixgbe_if_driver,
467 	.isc_flags = IFLIB_TSO_INIT_IP,
468 
469 	.isc_nrxd_min = {MIN_RXD},
470 	.isc_ntxd_min = {MIN_TXD},
471 	.isc_nrxd_max = {MAX_RXD},
472 	.isc_ntxd_max = {MAX_TXD},
473 	.isc_nrxd_default = {DEFAULT_RXD},
474 	.isc_ntxd_default = {DEFAULT_TXD},
475 };
476 
477 /************************************************************************
478  * ixgbe_if_tx_queues_alloc
479  ************************************************************************/
480 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)481 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
482     int ntxqs, int ntxqsets)
483 {
484 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
485 	if_softc_ctx_t scctx = sc->shared;
486 	struct ix_tx_queue *que;
487 	int i, j, error;
488 
489 	MPASS(sc->num_tx_queues > 0);
490 	MPASS(sc->num_tx_queues == ntxqsets);
491 	MPASS(ntxqs == 1);
492 
493 	/* Allocate queue structure memory */
494 	sc->tx_queues =
495 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
496 	    ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO);
497 	if (!sc->tx_queues) {
498 		device_printf(iflib_get_dev(ctx),
499 		    "Unable to allocate TX ring memory\n");
500 		return (ENOMEM);
501 	}
502 
503 	for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
504 		struct tx_ring *txr = &que->txr;
505 
506 		/* In case SR-IOV is enabled, align the index properly */
507 		txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
508 
509 		txr->sc = que->sc = sc;
510 
511 		/* Allocate report status array */
512 		txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
513 		    scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
514 		if (txr->tx_rsq == NULL) {
515 			error = ENOMEM;
516 			goto fail;
517 		}
518 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
519 			txr->tx_rsq[j] = QIDX_INVALID;
520 		/* get virtual and physical address of the hardware queues */
521 		txr->tail = IXGBE_TDT(txr->me);
522 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
523 		txr->tx_paddr = paddrs[i];
524 
525 		txr->bytes = 0;
526 		txr->total_packets = 0;
527 
528 		/* Set the rate at which we sample packets */
529 		if (sc->feat_en & IXGBE_FEATURE_FDIR)
530 			txr->atr_sample = atr_sample_rate;
531 
532 	}
533 
534 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
535 	    sc->num_tx_queues);
536 
537 	return (0);
538 
539 fail:
540 	ixgbe_if_queues_free(ctx);
541 
542 	return (error);
543 } /* ixgbe_if_tx_queues_alloc */
544 
545 /************************************************************************
546  * ixgbe_if_rx_queues_alloc
547  ************************************************************************/
548 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)549 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
550     int nrxqs, int nrxqsets)
551 {
552 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
553 	struct ix_rx_queue *que;
554 	int i;
555 
556 	MPASS(sc->num_rx_queues > 0);
557 	MPASS(sc->num_rx_queues == nrxqsets);
558 	MPASS(nrxqs == 1);
559 
560 	/* Allocate queue structure memory */
561 	sc->rx_queues =
562 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
563 	    M_IXGBE, M_NOWAIT | M_ZERO);
564 	if (!sc->rx_queues) {
565 		device_printf(iflib_get_dev(ctx),
566 		    "Unable to allocate TX ring memory\n");
567 		return (ENOMEM);
568 	}
569 
570 	for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
571 		struct rx_ring *rxr = &que->rxr;
572 
573 		/* In case SR-IOV is enabled, align the index properly */
574 		rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
575 
576 		rxr->sc = que->sc = sc;
577 
578 		/* get the virtual and physical address of the hw queues */
579 		rxr->tail = IXGBE_RDT(rxr->me);
580 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
581 		rxr->rx_paddr = paddrs[i];
582 		rxr->bytes = 0;
583 		rxr->que = que;
584 	}
585 
586 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
587 	    sc->num_rx_queues);
588 
589 	return (0);
590 } /* ixgbe_if_rx_queues_alloc */
591 
592 /************************************************************************
593  * ixgbe_if_queues_free
594  ************************************************************************/
595 static void
ixgbe_if_queues_free(if_ctx_t ctx)596 ixgbe_if_queues_free(if_ctx_t ctx)
597 {
598 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
599 	struct ix_tx_queue *tx_que = sc->tx_queues;
600 	struct ix_rx_queue *rx_que = sc->rx_queues;
601 	int i;
602 
603 	if (tx_que != NULL) {
604 		for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
605 			struct tx_ring *txr = &tx_que->txr;
606 			if (txr->tx_rsq == NULL)
607 				break;
608 
609 			free(txr->tx_rsq, M_IXGBE);
610 			txr->tx_rsq = NULL;
611 		}
612 
613 		free(sc->tx_queues, M_IXGBE);
614 		sc->tx_queues = NULL;
615 	}
616 	if (rx_que != NULL) {
617 		free(sc->rx_queues, M_IXGBE);
618 		sc->rx_queues = NULL;
619 	}
620 } /* ixgbe_if_queues_free */
621 
622 /************************************************************************
623  * ixgbe_initialize_rss_mapping
624  ************************************************************************/
625 static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc * sc)626 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
627 {
628 	struct ixgbe_hw *hw = &sc->hw;
629 	u32 reta = 0, mrqc, rss_key[10];
630 	int queue_id, table_size, index_mult;
631 	int i, j;
632 	u32 rss_hash_config;
633 
634 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
635 		/* Fetch the configured RSS key */
636 		rss_getkey((uint8_t *)&rss_key);
637 	} else {
638 		/* set up random bits */
639 		arc4rand(&rss_key, sizeof(rss_key), 0);
640 	}
641 
642 	/* Set multiplier for RETA setup and table size based on MAC */
643 	index_mult = 0x1;
644 	table_size = 128;
645 	switch (sc->hw.mac.type) {
646 	case ixgbe_mac_82598EB:
647 		index_mult = 0x11;
648 		break;
649 	case ixgbe_mac_X550:
650 	case ixgbe_mac_X550EM_x:
651 	case ixgbe_mac_X550EM_a:
652 	case ixgbe_mac_E610:
653 		table_size = 512;
654 		break;
655 	default:
656 		break;
657 	}
658 
659 	/* Set up the redirection table */
660 	for (i = 0, j = 0; i < table_size; i++, j++) {
661 		if (j == sc->num_rx_queues)
662 			j = 0;
663 
664 		if (sc->feat_en & IXGBE_FEATURE_RSS) {
665 			/*
666 			 * Fetch the RSS bucket id for the given indirection
667 			 * entry. Cap it at the number of configured buckets
668 			 * (which is num_rx_queues.)
669 			 */
670 			queue_id = rss_get_indirection_to_bucket(i);
671 			queue_id = queue_id % sc->num_rx_queues;
672 		} else
673 			queue_id = (j * index_mult);
674 
675 		/*
676 		 * The low 8 bits are for hash value (n+0);
677 		 * The next 8 bits are for hash value (n+1), etc.
678 		 */
679 		reta = reta >> 8;
680 		reta = reta | (((uint32_t)queue_id) << 24);
681 		if ((i & 3) == 3) {
682 			if (i < 128)
683 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
684 			else
685 				IXGBE_WRITE_REG(hw,
686 				    IXGBE_ERETA((i >> 2) - 32), reta);
687 			reta = 0;
688 		}
689 	}
690 
691 	/* Now fill our hash function seeds */
692 	for (i = 0; i < 10; i++)
693 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
694 
695 	/* Perform hash on these packet types */
696 	if (sc->feat_en & IXGBE_FEATURE_RSS)
697 		rss_hash_config = rss_gethashconfig();
698 	else {
699 		/*
700 		 * Disable UDP - IP fragments aren't currently being handled
701 		 * and so we end up with a mix of 2-tuple and 4-tuple
702 		 * traffic.
703 		 */
704 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
705 		    RSS_HASHTYPE_RSS_TCP_IPV4 |
706 		    RSS_HASHTYPE_RSS_IPV6 |
707 		    RSS_HASHTYPE_RSS_TCP_IPV6 |
708 		    RSS_HASHTYPE_RSS_IPV6_EX |
709 		    RSS_HASHTYPE_RSS_TCP_IPV6_EX;
710 	}
711 
712 	mrqc = IXGBE_MRQC_RSSEN;
713 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
714 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
715 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
716 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
717 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
718 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
719 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
720 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
721 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
722 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
723 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
724 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
725 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
726 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
727 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
728 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
729 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
730 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
731 	mrqc |= ixgbe_get_mrqc(sc->iov_mode);
732 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
733 } /* ixgbe_initialize_rss_mapping */
734 
735 /************************************************************************
736  * ixgbe_initialize_receive_units - Setup receive registers and features.
737  ************************************************************************/
738 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
739 
740 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)741 ixgbe_initialize_receive_units(if_ctx_t ctx)
742 {
743 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
744 	if_softc_ctx_t scctx = sc->shared;
745 	struct ixgbe_hw *hw = &sc->hw;
746 	if_t ifp = iflib_get_ifp(ctx);
747 	struct ix_rx_queue *que;
748 	int i, j;
749 	u32 bufsz, fctrl, srrctl, rxcsum;
750 	u32 hlreg;
751 
752 	/*
753 	 * Make sure receives are disabled while
754 	 * setting up the descriptor ring
755 	 */
756 	ixgbe_disable_rx(hw);
757 
758 	/* Enable broadcasts */
759 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
760 	fctrl |= IXGBE_FCTRL_BAM;
761 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
762 		fctrl |= IXGBE_FCTRL_DPF;
763 		fctrl |= IXGBE_FCTRL_PMCF;
764 	}
765 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
766 
767 	/* Set for Jumbo Frames? */
768 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
769 	if (if_getmtu(ifp) > ETHERMTU)
770 		hlreg |= IXGBE_HLREG0_JUMBOEN;
771 	else
772 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
773 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
774 
775 	bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
776 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
777 
778 	/* Setup the Base and Length of the Rx Descriptor Ring */
779 	for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
780 		struct rx_ring *rxr = &que->rxr;
781 		u64 rdba = rxr->rx_paddr;
782 
783 		j = rxr->me;
784 
785 		/* Setup the Base and Length of the Rx Descriptor Ring */
786 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
787 		    (rdba & 0x00000000ffffffffULL));
788 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
789 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
790 		     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
791 
792 		/* Set up the SRRCTL register */
793 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
794 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
795 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
796 		srrctl |= bufsz;
797 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
798 
799 		/*
800 		 * Set DROP_EN iff we have no flow control and >1 queue.
801 		 * Note that srrctl was cleared shortly before during reset,
802 		 * so we do not need to clear the bit, but do it just in case
803 		 * this code is moved elsewhere.
804 		 */
805 		if (sc->num_rx_queues > 1 &&
806 		    sc->hw.fc.requested_mode == ixgbe_fc_none) {
807 			srrctl |= IXGBE_SRRCTL_DROP_EN;
808 		} else {
809 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
810 		}
811 
812 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
813 
814 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
815 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
816 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
817 
818 		/* Set the driver rx tail address */
819 		rxr->tail =  IXGBE_RDT(rxr->me);
820 	}
821 
822 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
823 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
824 		    IXGBE_PSRTYPE_UDPHDR |
825 		    IXGBE_PSRTYPE_IPV4HDR |
826 		    IXGBE_PSRTYPE_IPV6HDR;
827 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
828 	}
829 
830 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
831 
832 	ixgbe_initialize_rss_mapping(sc);
833 
834 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
835 		/* RSS and RX IPP Checksum are mutually exclusive */
836 		rxcsum |= IXGBE_RXCSUM_PCSD;
837 	}
838 
839 	if (if_getcapenable(ifp) & IFCAP_RXCSUM)
840 		rxcsum |= IXGBE_RXCSUM_PCSD;
841 
842 	/* This is useful for calculating UDP/IP fragment checksums */
843 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
844 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
845 
846 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
847 
848 } /* ixgbe_initialize_receive_units */
849 
850 /************************************************************************
851  * ixgbe_initialize_transmit_units - Enable transmit units.
852  ************************************************************************/
853 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)854 ixgbe_initialize_transmit_units(if_ctx_t ctx)
855 {
856 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
857 	struct ixgbe_hw *hw = &sc->hw;
858 	if_softc_ctx_t scctx = sc->shared;
859 	struct ix_tx_queue *que;
860 	int i;
861 
862 	/* Setup the Base and Length of the Tx Descriptor Ring */
863 	for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
864 	    i++, que++) {
865 		struct tx_ring	   *txr = &que->txr;
866 		u64 tdba = txr->tx_paddr;
867 		u32 txctrl = 0;
868 		int j = txr->me;
869 
870 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
871 		    (tdba & 0x00000000ffffffffULL));
872 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
873 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
874 		    scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
875 
876 		/* Setup the HW Tx Head and Tail descriptor pointers */
877 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
878 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
879 
880 		/* Cache the tail address */
881 		txr->tail = IXGBE_TDT(txr->me);
882 
883 		txr->tx_rs_cidx = txr->tx_rs_pidx;
884 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
885 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
886 			txr->tx_rsq[k] = QIDX_INVALID;
887 
888 		/* Disable Head Writeback */
889 		/*
890 		 * Note: for X550 series devices, these registers are actually
891 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
892 		 * fields remain the same.
893 		 */
894 		switch (hw->mac.type) {
895 		case ixgbe_mac_82598EB:
896 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
897 			break;
898 		default:
899 			txctrl =
900 			    IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
901 			break;
902 		}
903 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
904 		switch (hw->mac.type) {
905 		case ixgbe_mac_82598EB:
906 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
907 			break;
908 		default:
909 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
910 			    txctrl);
911 			break;
912 		}
913 
914 	}
915 
916 	if (hw->mac.type != ixgbe_mac_82598EB) {
917 		u32 dmatxctl, rttdcs;
918 
919 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
920 		dmatxctl |= IXGBE_DMATXCTL_TE;
921 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
922 		/* Disable arbiter to set MTQC */
923 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
924 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
925 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
926 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
927 		    ixgbe_get_mtqc(sc->iov_mode));
928 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
929 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
930 	}
931 
932 } /* ixgbe_initialize_transmit_units */
933 
934 static int
ixgbe_check_fw_api_version(struct ixgbe_softc * sc)935 ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
936 {
937 	struct ixgbe_hw *hw = &sc->hw;
938 	if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
939 		device_printf(sc->dev,
940 		    "The driver for the device stopped because the NVM "
941 		    "image is newer than expected. You must install the "
942 		    "most recent version of the network driver.\n");
943 		return (EOPNOTSUPP);
944 	} else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
945 		   hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
946 		device_printf(sc->dev,
947 		    "The driver for the device detected a newer version of "
948 		    "the NVM image than expected. Please install the most "
949 		    "recent version of the network driver.\n");
950 	} else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
951 		   hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
952 		device_printf(sc->dev,
953 			"The driver for the device detected an older version "
954 			"of the NVM image than expected. "
955 			"Please update the NVM image.\n");
956 	}
957 	return (0);
958 }
959 
960 /************************************************************************
961  * ixgbe_register
962  ************************************************************************/
963 static void *
ixgbe_register(device_t dev)964 ixgbe_register(device_t dev)
965 {
966 	return (&ixgbe_sctx_init);
967 } /* ixgbe_register */
968 
969 /************************************************************************
970  * ixgbe_if_attach_pre - Device initialization routine, part 1
971  *
972  *   Called when the driver is being loaded.
973  *   Identifies the type of hardware, initializes the hardware,
974  *   and initializes iflib structures.
975  *
976  *   return 0 on success, positive on failure
977  ************************************************************************/
978 static int
ixgbe_if_attach_pre(if_ctx_t ctx)979 ixgbe_if_attach_pre(if_ctx_t ctx)
980 {
981 	struct ixgbe_softc *sc;
982 	device_t dev;
983 	if_softc_ctx_t scctx;
984 	struct ixgbe_hw *hw;
985 	int error = 0;
986 	u32 ctrl_ext;
987 	size_t i;
988 
989 	INIT_DEBUGOUT("ixgbe_attach: begin");
990 
991 	/* Allocate, clear, and link in our adapter structure */
992 	dev = iflib_get_dev(ctx);
993 	sc = iflib_get_softc(ctx);
994 	sc->hw.back = sc;
995 	sc->ctx = ctx;
996 	sc->dev = dev;
997 	scctx = sc->shared = iflib_get_softc_ctx(ctx);
998 	sc->media = iflib_get_media(ctx);
999 	hw = &sc->hw;
1000 
1001 	/* Determine hardware revision */
1002 	hw->vendor_id = pci_get_vendor(dev);
1003 	hw->device_id = pci_get_device(dev);
1004 	hw->revision_id = pci_get_revid(dev);
1005 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
1006 	hw->subsystem_device_id = pci_get_subdevice(dev);
1007 
1008 	/* Do base PCI setup - map BAR0 */
1009 	if (ixgbe_allocate_pci_resources(ctx)) {
1010 		device_printf(dev, "Allocation of PCI resources failed\n");
1011 		return (ENXIO);
1012 	}
1013 
1014 	/* let hardware know driver is loaded */
1015 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1016 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1017 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1018 
1019 	/*
1020 	 * Initialize the shared code
1021 	 */
1022 	if (ixgbe_init_shared_code(hw) != 0) {
1023 		device_printf(dev, "Unable to initialize the shared code\n");
1024 		error = ENXIO;
1025 		goto err_pci;
1026 	}
1027 
1028 	if (hw->mac.type == ixgbe_mac_E610)
1029 		ixgbe_init_aci(hw);
1030 
1031 	sc->do_debug_dump = false;
1032 
1033 	if (hw->mac.ops.fw_recovery_mode &&
1034 	    hw->mac.ops.fw_recovery_mode(hw)) {
1035 		device_printf(dev,
1036 		    "Firmware recovery mode detected. Limiting "
1037 		    "functionality.\nRefer to the Intel(R) Ethernet Adapters "
1038 		    "and Devices User Guide for details on firmware recovery "
1039 		    "mode.");
1040 		error = ENOSYS;
1041 		goto err_pci;
1042 	}
1043 
1044 	/* 82598 Does not support SR-IOV, initialize everything else */
1045 	if (hw->mac.type >= ixgbe_mac_82599_vf) {
1046 		for (i = 0; i < sc->num_vfs; i++)
1047 			hw->mbx.ops[i].init_params(hw);
1048 	}
1049 
1050 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
1051 
1052 	if (hw->mac.type != ixgbe_mac_82598EB)
1053 		hw->phy.smart_speed = ixgbe_smart_speed;
1054 
1055 	ixgbe_init_device_features(sc);
1056 
1057 	/* Enable WoL (if supported) */
1058 	ixgbe_check_wol_support(sc);
1059 
1060 	/* Verify adapter fan is still functional (if applicable) */
1061 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1062 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1063 		ixgbe_check_fan_failure(sc, esdp, false);
1064 	}
1065 
1066 	/* Ensure SW/FW semaphore is free */
1067 	ixgbe_init_swfw_semaphore(hw);
1068 
1069 	/* Set an initial default flow control value */
1070 	hw->fc.requested_mode = ixgbe_flow_control;
1071 
1072 	hw->phy.reset_if_overtemp = true;
1073 	error = ixgbe_reset_hw(hw);
1074 	hw->phy.reset_if_overtemp = false;
1075 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
1076 		/*
1077 		 * No optics in this port, set up
1078 		 * so the timer routine will probe
1079 		 * for later insertion.
1080 		 */
1081 		sc->sfp_probe = true;
1082 		error = 0;
1083 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1084 		device_printf(dev, "Unsupported SFP+ module detected!\n");
1085 		error = EIO;
1086 		goto err_pci;
1087 	} else if (error) {
1088 		device_printf(dev, "Hardware initialization failed\n");
1089 		error = EIO;
1090 		goto err_pci;
1091 	}
1092 
1093 	/* Make sure we have a good EEPROM before we read from it */
1094 	if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1095 		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
1096 		error = EIO;
1097 		goto err_pci;
1098 	}
1099 
1100 	error = ixgbe_start_hw(hw);
1101 	switch (error) {
1102 	case IXGBE_ERR_EEPROM_VERSION:
1103 		device_printf(dev,
1104 		    "This device is a pre-production adapter/LOM.  Please be"
1105 		    " aware there may be issues associated with your"
1106 		    " hardware.\nIf you are experiencing problems please"
1107 		    " contact your Intel or hardware representative who"
1108 		    " provided you with this hardware.\n");
1109 		break;
1110 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
1111 		device_printf(dev, "Unsupported SFP+ Module\n");
1112 		error = EIO;
1113 		goto err_pci;
1114 	case IXGBE_ERR_SFP_NOT_PRESENT:
1115 		device_printf(dev, "No SFP+ Module found\n");
1116 		/* falls thru */
1117 	default:
1118 		break;
1119 	}
1120 
1121 	/* Check the FW API version */
1122 	if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
1123 		error = EIO;
1124 		goto err_pci;
1125 	}
1126 
1127 	/* Most of the iflib initialization... */
1128 
1129 	iflib_set_mac(ctx, hw->mac.addr);
1130 	switch (sc->hw.mac.type) {
1131 	case ixgbe_mac_X550:
1132 	case ixgbe_mac_X550EM_x:
1133 	case ixgbe_mac_X550EM_a:
1134 		scctx->isc_rss_table_size = 512;
1135 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1136 		break;
1137 	default:
1138 		scctx->isc_rss_table_size = 128;
1139 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1140 	}
1141 
1142 	/* Allow legacy interrupts */
1143 	ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1144 
1145 	scctx->isc_txqsizes[0] =
1146 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1147 	    sizeof(u32), DBA_ALIGN),
1148 	scctx->isc_rxqsizes[0] =
1149 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1150 	    DBA_ALIGN);
1151 
1152 	/* XXX */
1153 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1154 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1155 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1156 		scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1157 	} else {
1158 		scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1159 		scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1160 	}
1161 
1162 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
1163 
1164 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1165 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1166 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1167 
1168 	scctx->isc_txrx = &ixgbe_txrx;
1169 
1170 	scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1171 
1172 	return (0);
1173 
1174 err_pci:
1175 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1176 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1177 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1178 	ixgbe_free_pci_resources(ctx);
1179 
1180 	if (hw->mac.type == ixgbe_mac_E610)
1181 		ixgbe_shutdown_aci(hw);
1182 
1183 	return (error);
1184 } /* ixgbe_if_attach_pre */
1185 
1186  /*********************************************************************
1187  * ixgbe_if_attach_post - Device initialization routine, part 2
1188  *
1189  *   Called during driver load, but after interrupts and
1190  *   resources have been allocated and configured.
1191  *   Sets up some data structures not relevant to iflib.
1192  *
1193  *   return 0 on success, positive on failure
1194  *********************************************************************/
1195 static int
ixgbe_if_attach_post(if_ctx_t ctx)1196 ixgbe_if_attach_post(if_ctx_t ctx)
1197 {
1198 	device_t dev;
1199 	struct ixgbe_softc *sc;
1200 	struct ixgbe_hw *hw;
1201 	int error = 0;
1202 
1203 	dev = iflib_get_dev(ctx);
1204 	sc = iflib_get_softc(ctx);
1205 	hw = &sc->hw;
1206 
1207 	if (sc->intr_type == IFLIB_INTR_LEGACY &&
1208 		(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1209 		device_printf(dev, "Device does not support legacy interrupts");
1210 		error = ENXIO;
1211 		goto err;
1212 	}
1213 
1214 	/* Allocate multicast array memory. */
1215 	sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
1216 	    M_IXGBE, M_NOWAIT);
1217 	if (sc->mta == NULL) {
1218 		device_printf(dev,
1219 		    "Can not allocate multicast setup array\n");
1220 		error = ENOMEM;
1221 		goto err;
1222 	}
1223 
1224 	/* hw.ix defaults init */
1225 	ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1226 
1227 	/* Enable the optics for 82599 SFP+ fiber */
1228 	ixgbe_enable_tx_laser(hw);
1229 
1230 	/* Enable power to the phy. */
1231 	ixgbe_set_phy_power(hw, true);
1232 
1233 	ixgbe_initialize_iov(sc);
1234 
1235 	error = ixgbe_setup_interface(ctx);
1236 	if (error) {
1237 		device_printf(dev, "Interface setup failed: %d\n", error);
1238 		goto err;
1239 	}
1240 
1241 	ixgbe_if_update_admin_status(ctx);
1242 
1243 	/* Initialize statistics */
1244 	ixgbe_update_stats_counters(sc);
1245 	ixgbe_add_hw_stats(sc);
1246 
1247 	/* Check PCIE slot type/speed/width */
1248 	ixgbe_get_slot_info(sc);
1249 
1250 	/*
1251 	 * Do time init and sysctl init here, but
1252 	 * only on the first port of a bypass sc.
1253 	 */
1254 	ixgbe_bypass_init(sc);
1255 
1256 	/* Display NVM and Option ROM versions */
1257 	ixgbe_print_fw_version(ctx);
1258 
1259 	/* Set an initial dmac value */
1260 	sc->dmac = 0;
1261 	/* Set initial advertised speeds (if applicable) */
1262 	sc->advertise = ixgbe_get_default_advertise(sc);
1263 
1264 	if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1265 		ixgbe_define_iov_schemas(dev, &error);
1266 
1267 	/* Add sysctls */
1268 	ixgbe_add_device_sysctls(ctx);
1269 
1270 	/* Init recovery mode timer and state variable */
1271 	if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1272 		sc->recovery_mode = 0;
1273 
1274 		/* Set up the timer callout */
1275 		callout_init(&sc->fw_mode_timer, true);
1276 
1277 		/* Start the task */
1278 		callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1279 	}
1280 
1281 	return (0);
1282 err:
1283 	return (error);
1284 } /* ixgbe_if_attach_post */
1285 
1286 /************************************************************************
1287  * ixgbe_check_wol_support
1288  *
1289  *   Checks whether the adapter's ports are capable of
1290  *   Wake On LAN by reading the adapter's NVM.
1291  *
1292  *   Sets each port's hw->wol_enabled value depending
1293  *   on the value read here.
1294  ************************************************************************/
1295 static void
ixgbe_check_wol_support(struct ixgbe_softc * sc)1296 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1297 {
1298 	struct ixgbe_hw *hw = &sc->hw;
1299 	u16 dev_caps = 0;
1300 
1301 	/* Find out WoL support for port */
1302 	sc->wol_support = hw->wol_enabled = 0;
1303 	ixgbe_get_device_caps(hw, &dev_caps);
1304 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1305 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1306 	     hw->bus.func == 0))
1307 		sc->wol_support = hw->wol_enabled = 1;
1308 
1309 	/* Save initial wake up filter configuration */
1310 	sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1311 
1312 	return;
1313 } /* ixgbe_check_wol_support */
1314 
1315 /************************************************************************
1316  * ixgbe_setup_interface
1317  *
1318  *   Setup networking device structure and register an interface.
1319  ************************************************************************/
1320 static int
ixgbe_setup_interface(if_ctx_t ctx)1321 ixgbe_setup_interface(if_ctx_t ctx)
1322 {
1323 	if_t ifp = iflib_get_ifp(ctx);
1324 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1325 
1326 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1327 
1328 	if_setbaudrate(ifp, IF_Gbps(10));
1329 
1330 	sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1331 
1332 	sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1333 
1334 	ixgbe_add_media_types(ctx);
1335 
1336 	/* Autoselect media by default */
1337 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1338 
1339 	return (0);
1340 } /* ixgbe_setup_interface */
1341 
1342 /************************************************************************
1343  * ixgbe_if_get_counter
1344  ************************************************************************/
1345 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1346 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1347 {
1348 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1349 	if_t ifp = iflib_get_ifp(ctx);
1350 
1351 	switch (cnt) {
1352 	case IFCOUNTER_IPACKETS:
1353 		return (sc->ipackets);
1354 	case IFCOUNTER_OPACKETS:
1355 		return (sc->opackets);
1356 	case IFCOUNTER_IBYTES:
1357 		return (sc->ibytes);
1358 	case IFCOUNTER_OBYTES:
1359 		return (sc->obytes);
1360 	case IFCOUNTER_IMCASTS:
1361 		return (sc->imcasts);
1362 	case IFCOUNTER_OMCASTS:
1363 		return (sc->omcasts);
1364 	case IFCOUNTER_COLLISIONS:
1365 		return (0);
1366 	case IFCOUNTER_IQDROPS:
1367 		return (sc->iqdrops);
1368 	case IFCOUNTER_IERRORS:
1369 		return (sc->ierrors);
1370 	default:
1371 		return (if_get_counter_default(ifp, cnt));
1372 	}
1373 } /* ixgbe_if_get_counter */
1374 
1375 /************************************************************************
1376  * ixgbe_if_i2c_req
1377  ************************************************************************/
1378 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1379 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1380 {
1381 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1382 	struct ixgbe_hw *hw = &sc->hw;
1383 	int i;
1384 
1385 	if (hw->phy.ops.read_i2c_byte == NULL)
1386 		return (ENXIO);
1387 	for (i = 0; i < req->len; i++)
1388 		hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1389 		    req->dev_addr, &req->data[i]);
1390 	return (0);
1391 } /* ixgbe_if_i2c_req */
1392 
1393 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
1394  * reinitialized
1395  * @ctx: iflib context
1396  * @event: event code to check
1397  *
1398  * Defaults to returning false for unknown events.
1399  *
1400  * @returns true if iflib needs to reinit the interface
1401  */
1402 static bool
ixgbe_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1403 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1404 {
1405 	switch (event) {
1406 	case IFLIB_RESTART_VLAN_CONFIG:
1407 	default:
1408 		return (false);
1409 	}
1410 }
1411 
1412 /************************************************************************
1413  * ixgbe_if_priv_ioctl - Ioctl handler for driver
1414  *
1415  *   Handler for custom driver specific ioctls
1416  *
1417  *   return 0 on success, positive on failure
1418  ************************************************************************/
1419 static int
ixgbe_if_priv_ioctl(if_ctx_t ctx,u_long command,caddr_t data)1420 ixgbe_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1421 {
1422 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1423 	struct ifdrv *ifd;
1424 	device_t dev = sc->dev;
1425 
1426 	/* Make sure the command type is valid */
1427 	switch (command) {
1428 	case SIOCSDRVSPEC:
1429 	case SIOCGDRVSPEC:
1430 		/* Accepted commands */
1431 		break;
1432 	case SIOCGPRIVATE_0:
1433 		/*
1434 		 * Although we do not support this ioctl command, it's expected
1435 		 * that iflib will forward it to the IFDI_PRIV_IOCTL handler.
1436 		 * Do not print a message in this case.
1437 		 */
1438 		return (ENOTSUP);
1439 	default:
1440 		/*
1441 		 * If we get a different command for this function, it's
1442 		 * definitely unexpected, so log a message indicating what
1443 		 * command we got for debugging purposes.
1444 		 */
1445 		device_printf(dev,
1446 			"%s: unexpected ioctl command %08lx\n",
1447 			__func__, command);
1448 		return (EINVAL);
1449 	}
1450 
1451 	ifd = (struct ifdrv *)data;
1452 
1453 	switch (ifd->ifd_cmd) {
1454 	case IXGBE_NVM_ACCESS:
1455 		IOCTL_DEBUGOUT("ioctl: NVM ACCESS");
1456 		return (ixgbe_nvm_access_ioctl(sc, ifd));
1457 	case IXGBE_DEBUG_DUMP:
1458 		IOCTL_DEBUGOUT("ioctl: DEBUG DUMP");
1459 		return (ixgbe_debug_dump_ioctl(sc, ifd));
1460 	default:
1461 		IOCTL_DEBUGOUT1(
1462 		    "ioctl: UNKNOWN SIOC(S|G)DRVSPEC (0x%X) command\n",
1463 		    (int)ifd->ifd_cmd);
1464 		return (EINVAL);
1465 	}
1466 
1467 	return (0);
1468 }
1469 
1470 /************************************************************************
1471  * ixgbe_nvm_access_ioctl
1472  *
1473  *   Handles an NVM access ioctl request
1474  ************************************************************************/
1475 static int
ixgbe_nvm_access_ioctl(struct ixgbe_softc * sc,struct ifdrv * ifd)1476 ixgbe_nvm_access_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd)
1477 {
1478 	struct ixgbe_nvm_access_data *data;
1479 	struct ixgbe_nvm_access_cmd *cmd;
1480 	struct ixgbe_hw *hw = &sc->hw;
1481 	size_t ifd_len = ifd->ifd_len;
1482 	size_t malloc_len;
1483 	device_t dev = sc->dev;
1484 	u8 *nvm_buffer;
1485 	s32 error = 0;
1486 
1487 	/*
1488 	 * ifioctl forwards SIOCxDRVSPEC to iflib without conducting
1489 	 * a privilege check. Subsequently, iflib passes the ioctl to the driver
1490 	 * without verifying privileges. To prevent non-privileged threads from
1491 	 * accessing this interface, perform a privilege check at this point.
1492 	 */
1493 	error = priv_check(curthread, PRIV_DRIVER);
1494 	if (error)
1495 		return (error);
1496 
1497 	if (ifd_len < sizeof(*cmd)) {
1498 		device_printf(dev,
1499 		    "%s: ifdrv length is too small. Got %zu, "
1500 		    "but expected %zu\n",
1501 		    __func__, ifd_len, sizeof(*cmd));
1502 		return (EINVAL);
1503 	}
1504 
1505 	if (ifd->ifd_data == NULL) {
1506 		device_printf(dev, "%s: No ifd data buffer.\n",
1507 		     __func__);
1508 		return (EINVAL);
1509 	}
1510 
1511 	malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd));
1512 
1513 	nvm_buffer = (u8 *)malloc(malloc_len, M_IXGBE, M_ZERO | M_NOWAIT);
1514 	if (!nvm_buffer)
1515 		return (ENOMEM);
1516 
1517 	/* Copy the NVM access command and data in from user space */
1518 	error = copyin(ifd->ifd_data, nvm_buffer, ifd_len);
1519 	if (error) {
1520 		device_printf(dev, "%s: Failed to copy data in, error: %d\n",
1521 		    __func__, error);
1522 		goto cleanup_free_nvm_buffer;
1523 	}
1524 
1525 	/*
1526 	 * The NVM command structure is immediately followed by data which
1527 	 * varies in size based on the command.
1528 	 */
1529 	cmd = (struct ixgbe_nvm_access_cmd *)nvm_buffer;
1530 	data = (struct ixgbe_nvm_access_data *)
1531 	    (nvm_buffer + sizeof(struct ixgbe_nvm_access_cmd));
1532 
1533 	/* Handle the NVM access request */
1534 	error = ixgbe_handle_nvm_access(hw, cmd, data);
1535 	if (error) {
1536 		device_printf(dev, "%s: NVM access request failed, error %d\n",
1537 		    __func__, error);
1538 	}
1539 
1540 	/* Copy the possibly modified contents of the handled request out */
1541 	error = copyout(nvm_buffer, ifd->ifd_data, ifd_len);
1542 	if (error) {
1543 		device_printf(dev, "%s: Copying response back to "
1544 		    "user space failed, error %d\n",
1545 		    __func__, error);
1546 		goto cleanup_free_nvm_buffer;
1547 	}
1548 
1549 cleanup_free_nvm_buffer:
1550 	free(nvm_buffer, M_IXGBE);
1551 	return (error);
1552 }
1553 
1554 /************************************************************************
1555  * ixgbe_debug_dump_ioctl
1556  *
1557  *   Makes debug dump of internal FW/HW data.
1558  ************************************************************************/
1559 static int
ixgbe_debug_dump_ioctl(struct ixgbe_softc * sc,struct ifdrv * ifd)1560 ixgbe_debug_dump_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd)
1561 {
1562 	struct ixgbe_debug_dump_cmd *dd_cmd;
1563 	struct ixgbe_hw *hw = &sc->hw;
1564 	size_t ifd_len = ifd->ifd_len;
1565 	device_t dev = sc->dev;
1566 	s32 error = 0;
1567 
1568 	if (!(sc->feat_en & IXGBE_FEATURE_DBG_DUMP))
1569 		return (ENODEV);
1570 
1571 	/* Data returned from ACI command */
1572 	u16 ret_buf_size = 0;
1573 	u16 ret_next_cluster = 0;
1574 	u16 ret_next_table = 0;
1575 	u32 ret_next_index = 0;
1576 
1577 	/*
1578 	 * ifioctl forwards SIOCxDRVSPEC to iflib without conducting
1579 	 * a privilege check. Subsequently, iflib passes the ioctl to the driver
1580 	 * without verifying privileges. To prevent non-privileged threads from
1581 	 * accessing this interface, perform a privilege check at this point.
1582 	 */
1583 	error = priv_check(curthread, PRIV_DRIVER);
1584 	if (error)
1585 		return (error);
1586 
1587 	if (ifd_len < sizeof(*dd_cmd)) {
1588 		device_printf(dev,
1589 		    "%s: ifdrv length is too small. Got %zu, "
1590 		    "but expected %zu\n",
1591 		    __func__, ifd_len, sizeof(*dd_cmd));
1592 		return (EINVAL);
1593 	}
1594 
1595 	if (ifd->ifd_data == NULL) {
1596 		device_printf(dev, "%s: No ifd data buffer.\n",
1597 		     __func__);
1598 		return (EINVAL);
1599 	}
1600 
1601 	dd_cmd = (struct ixgbe_debug_dump_cmd *)malloc(ifd_len, M_IXGBE,
1602 	    M_NOWAIT | M_ZERO);
1603 	if (!dd_cmd) {
1604 		error = -ENOMEM;
1605 		goto out;
1606 	}
1607 	/* copy data from userspace */
1608 	error = copyin(ifd->ifd_data, dd_cmd, ifd_len);
1609 	if (error) {
1610 		device_printf(dev, "%s: Failed to copy data in, error: %d\n",
1611 		    __func__, error);
1612 		goto out;
1613 	}
1614 
1615 	/* ACI command requires buf_size arg to be grater than 0 */
1616 	if (dd_cmd->data_size == 0) {
1617 		device_printf(dev, "%s: data_size must be greater than 0\n",
1618 		    __func__);
1619 		error = EINVAL;
1620 		goto out;
1621 	}
1622 
1623 	/* Zero the data buffer memory space */
1624 	memset(dd_cmd->data, 0, ifd_len - sizeof(*dd_cmd));
1625 
1626 	error = ixgbe_aci_get_internal_data(hw, dd_cmd->cluster_id,
1627 	    dd_cmd->table_id, dd_cmd->offset, dd_cmd->data, dd_cmd->data_size,
1628 	    &ret_buf_size, &ret_next_cluster, &ret_next_table, &ret_next_index);
1629 	if (error) {
1630 		device_printf(dev,
1631 		    "%s: Failed to get internal FW/HW data, error: %d\n",
1632 		    __func__, error);
1633 		goto out;
1634 	}
1635 
1636 	dd_cmd->cluster_id = ret_next_cluster;
1637 	dd_cmd->table_id = ret_next_table;
1638 	dd_cmd->offset = ret_next_index;
1639 	dd_cmd->data_size = ret_buf_size;
1640 
1641 	error = copyout(dd_cmd, ifd->ifd_data, ifd->ifd_len);
1642 	if (error) {
1643 		device_printf(dev,
1644 		    "%s: Failed to copy data out, error: %d\n",
1645 		    __func__, error);
1646 	}
1647 
1648 out:
1649 	free(dd_cmd, M_IXGBE);
1650 
1651 	return (error);
1652 }
1653 
1654 /************************************************************************
1655  * ixgbe_add_media_types
1656  ************************************************************************/
1657 static void
ixgbe_add_media_types(if_ctx_t ctx)1658 ixgbe_add_media_types(if_ctx_t ctx)
1659 {
1660 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1661 	struct ixgbe_hw *hw = &sc->hw;
1662 	device_t dev = iflib_get_dev(ctx);
1663 	u64 layer;
1664 
1665 	layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1666 
1667 	/* Media types with matching FreeBSD media defines */
1668 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1669 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1670 	if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
1671 		ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1672 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1673 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1674 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1675 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1676 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1677 		ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1678 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1679 		ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1680 
1681 	if (hw->mac.type == ixgbe_mac_X550) {
1682 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1683 		ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1684 	}
1685 
1686 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1687 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1688 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1689 		    NULL);
1690 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1691 	}
1692 
1693 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1694 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1695 		if (hw->phy.multispeed_fiber)
1696 			ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1697 			    NULL);
1698 	}
1699 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1700 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1701 		if (hw->phy.multispeed_fiber)
1702 			ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1703 			    NULL);
1704 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1705 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1706 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1707 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1708 
1709 #ifdef IFM_ETH_XTYPE
1710 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1711 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1712 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1713 		ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1714 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1715 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1716 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1717 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1718 #else
1719 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1720 		device_printf(dev, "Media supported: 10GbaseKR\n");
1721 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1722 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1723 	}
1724 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1725 		device_printf(dev, "Media supported: 10GbaseKX4\n");
1726 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1727 		ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1728 	}
1729 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1730 		device_printf(dev, "Media supported: 1000baseKX\n");
1731 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1732 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1733 	}
1734 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1735 		device_printf(dev, "Media supported: 2500baseKX\n");
1736 		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1737 		ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1738 	}
1739 #endif
1740 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
1741 		device_printf(dev, "Media supported: 1000baseBX\n");
1742 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_BX, 0, NULL);
1743 	}
1744 
1745 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1746 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1747 		    0, NULL);
1748 		ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1749 	}
1750 
1751 	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1752 } /* ixgbe_add_media_types */
1753 
1754 /************************************************************************
1755  * ixgbe_is_sfp
1756  ************************************************************************/
1757 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1758 ixgbe_is_sfp(struct ixgbe_hw *hw)
1759 {
1760 	switch (hw->mac.type) {
1761 	case ixgbe_mac_82598EB:
1762 		if (hw->phy.type == ixgbe_phy_nl)
1763 			return (true);
1764 		return (false);
1765 	case ixgbe_mac_82599EB:
1766 		switch (hw->mac.ops.get_media_type(hw)) {
1767 		case ixgbe_media_type_fiber:
1768 		case ixgbe_media_type_fiber_qsfp:
1769 			return (true);
1770 		default:
1771 			return (false);
1772 		}
1773 	case ixgbe_mac_X550EM_x:
1774 	case ixgbe_mac_X550EM_a:
1775 	case ixgbe_mac_E610:
1776 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1777 			return (true);
1778 		return (false);
1779 	default:
1780 		return (false);
1781 	}
1782 } /* ixgbe_is_sfp */
1783 
1784 /************************************************************************
1785  * ixgbe_config_link
1786  ************************************************************************/
1787 static void
ixgbe_config_link(if_ctx_t ctx)1788 ixgbe_config_link(if_ctx_t ctx)
1789 {
1790 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1791 	struct ixgbe_hw *hw = &sc->hw;
1792 	u32 autoneg, err = 0;
1793 	bool sfp, negotiate;
1794 
1795 	sfp = ixgbe_is_sfp(hw);
1796 
1797 	if (sfp) {
1798 		sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1799 		iflib_admin_intr_deferred(ctx);
1800 	} else {
1801 		if (hw->mac.ops.check_link)
1802 			err = ixgbe_check_link(hw, &sc->link_speed,
1803 			    &sc->link_up, false);
1804 		if (err)
1805 			return;
1806 		autoneg = hw->phy.autoneg_advertised;
1807 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1808 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1809 			    &negotiate);
1810 		if (err)
1811 			return;
1812 
1813 		if (hw->mac.type == ixgbe_mac_X550 &&
1814 		    hw->phy.autoneg_advertised == 0) {
1815 			/*
1816 			 * 2.5G and 5G autonegotiation speeds on X550
1817 			 * are disabled by default due to reported
1818 			 * interoperability issues with some switches.
1819 			 *
1820 			 * The second condition checks if any operations
1821 			 * involving setting autonegotiation speeds have
1822 			 * been performed prior to this ixgbe_config_link()
1823 			 * call.
1824 			 *
1825 			 * If hw->phy.autoneg_advertised does not
1826 			 * equal 0, this means that the user might have
1827 			 * set autonegotiation speeds via the sysctl
1828 			 * before bringing the interface up. In this
1829 			 * case, we should not disable 2.5G and 5G
1830 			 * since that speeds might be selected by the
1831 			 * user.
1832 			 *
1833 			 * Otherwise (i.e. if hw->phy.autoneg_advertised
1834 			 * is set to 0), it is the first time we set
1835 			 * autonegotiation preferences and the default
1836 			 * set of speeds should exclude 2.5G and 5G.
1837 			 */
1838 			autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1839 			    IXGBE_LINK_SPEED_5GB_FULL);
1840 		}
1841 
1842 		if (hw->mac.type == ixgbe_mac_E610) {
1843 			hw->phy.ops.init(hw);
1844 			err = ixgbe_enable_lse(sc);
1845 			if (err)
1846 				device_printf(sc->dev,
1847 				    "Failed to enable Link Status Event, "
1848 				    "error: %d", err);
1849 		}
1850 
1851 		if (hw->mac.ops.setup_link)
1852 			err = hw->mac.ops.setup_link(hw, autoneg,
1853 			    sc->link_up);
1854 	}
1855 } /* ixgbe_config_link */
1856 
1857 /************************************************************************
1858  * ixgbe_update_stats_counters - Update board statistics counters.
1859  ************************************************************************/
1860 static void
ixgbe_update_stats_counters(struct ixgbe_softc * sc)1861 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1862 {
1863 	struct ixgbe_hw *hw = &sc->hw;
1864 	struct ixgbe_hw_stats *stats = &sc->stats.pf;
1865 	u32 missed_rx = 0, bprc, lxon, lxoff, total;
1866 	u32 lxoffrxc;
1867 	u64 total_missed_rx = 0;
1868 
1869 	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1870 	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1871 	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1872 	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1873 	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1874 
1875 	for (int i = 0; i < 16; i++) {
1876 		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1877 		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1878 		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1879 	}
1880 	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1881 	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1882 	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1883 
1884 	/* Hardware workaround, gprc counts missed packets */
1885 	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1886 	stats->gprc -= missed_rx;
1887 
1888 	if (hw->mac.type != ixgbe_mac_82598EB) {
1889 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1890 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1891 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1892 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1893 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1894 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1895 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1896 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1897 		stats->lxoffrxc += lxoffrxc;
1898 	} else {
1899 		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1900 		lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1901 		stats->lxoffrxc += lxoffrxc;
1902 		/* 82598 only has a counter in the high register */
1903 		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1904 		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1905 		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1906 	}
1907 
1908 	/*
1909 	 * For watchdog management we need to know if we have been paused
1910 	 * during the last interval, so capture that here.
1911 	*/
1912 	if (lxoffrxc)
1913 		sc->shared->isc_pause_frames = 1;
1914 
1915 	/*
1916 	 * Workaround: mprc hardware is incorrectly counting
1917 	 * broadcasts, so for now we subtract those.
1918 	 */
1919 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1920 	stats->bprc += bprc;
1921 	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1922 	if (hw->mac.type == ixgbe_mac_82598EB)
1923 		stats->mprc -= bprc;
1924 
1925 	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1926 	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1927 	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1928 	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1929 	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1930 	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1931 
1932 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1933 	stats->lxontxc += lxon;
1934 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1935 	stats->lxofftxc += lxoff;
1936 	total = lxon + lxoff;
1937 
1938 	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1939 	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1940 	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1941 	stats->gptc -= total;
1942 	stats->mptc -= total;
1943 	stats->ptc64 -= total;
1944 	stats->gotc -= total * ETHER_MIN_LEN;
1945 
1946 	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1947 	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1948 	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1949 	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1950 	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1951 	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1952 	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1953 	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1954 	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1955 	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1956 	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1957 	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1958 	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1959 	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1960 	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1961 	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1962 	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1963 	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1964 	/* Only read FCOE on 82599 */
1965 	if (hw->mac.type != ixgbe_mac_82598EB) {
1966 		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1967 		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1968 		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1969 		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1970 		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1971 	}
1972 
1973 	/* Fill out the OS statistics structure */
1974 	IXGBE_SET_IPACKETS(sc, stats->gprc);
1975 	IXGBE_SET_OPACKETS(sc, stats->gptc);
1976 	IXGBE_SET_IBYTES(sc, stats->gorc);
1977 	IXGBE_SET_OBYTES(sc, stats->gotc);
1978 	IXGBE_SET_IMCASTS(sc, stats->mprc);
1979 	IXGBE_SET_OMCASTS(sc, stats->mptc);
1980 	IXGBE_SET_COLLISIONS(sc, 0);
1981 	IXGBE_SET_IQDROPS(sc, total_missed_rx);
1982 
1983 	/*
1984 	 * Aggregate following types of errors as RX errors:
1985 	 * - CRC error count,
1986 	 * - illegal byte error count,
1987 	 * - missed packets count,
1988 	 * - length error count,
1989 	 * - undersized packets count,
1990 	 * - fragmented packets count,
1991 	 * - oversized packets count,
1992 	 * - jabber count.
1993 	 */
1994 	IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1995 	    stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
1996 	    stats->roc + stats->rjc);
1997 } /* ixgbe_update_stats_counters */
1998 
1999 /************************************************************************
2000  * ixgbe_add_hw_stats
2001  *
2002  *   Add sysctl variables, one per statistic, to the system.
2003  ************************************************************************/
2004 static void
ixgbe_add_hw_stats(struct ixgbe_softc * sc)2005 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
2006 {
2007 	device_t dev = iflib_get_dev(sc->ctx);
2008 	struct ix_rx_queue *rx_que;
2009 	struct ix_tx_queue *tx_que;
2010 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2011 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2012 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2013 	struct ixgbe_hw_stats *stats = &sc->stats.pf;
2014 	struct sysctl_oid *stat_node, *queue_node;
2015 	struct sysctl_oid_list *stat_list, *queue_list;
2016 	int i;
2017 
2018 #define QUEUE_NAME_LEN 32
2019 	char namebuf[QUEUE_NAME_LEN];
2020 
2021 	/* Driver Statistics */
2022 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2023 	    CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
2024 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2025 	    CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
2026 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
2027 	    CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
2028 
2029 	for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
2030 	    i++, tx_que++) {
2031 		struct tx_ring *txr = &tx_que->txr;
2032 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
2033 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2034 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
2035 		queue_list = SYSCTL_CHILDREN(queue_node);
2036 
2037 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
2038 		    CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
2039 		    ixgbe_sysctl_tdh_handler, "IU",
2040 		    "Transmit Descriptor Head");
2041 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
2042 		    CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
2043 		    ixgbe_sysctl_tdt_handler, "IU",
2044 		    "Transmit Descriptor Tail");
2045 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2046 		    CTLFLAG_RD, &txr->tso_tx, "TSO");
2047 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2048 		    CTLFLAG_RD, &txr->total_packets,
2049 		    "Queue Packets Transmitted");
2050 	}
2051 
2052 	for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
2053 	    i++, rx_que++) {
2054 		struct rx_ring *rxr = &rx_que->rxr;
2055 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
2056 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2057 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
2058 		queue_list = SYSCTL_CHILDREN(queue_node);
2059 
2060 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
2061 		    CTLTYPE_UINT | CTLFLAG_RW,
2062 		    &sc->rx_queues[i], 0,
2063 		    ixgbe_sysctl_interrupt_rate_handler, "IU",
2064 		    "Interrupt Rate");
2065 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2066 		    CTLFLAG_RD, &(sc->rx_queues[i].irqs),
2067 		    "irqs on this queue");
2068 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
2069 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
2070 		    ixgbe_sysctl_rdh_handler, "IU",
2071 		    "Receive Descriptor Head");
2072 		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
2073 		    CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
2074 		    ixgbe_sysctl_rdt_handler, "IU",
2075 		    "Receive Descriptor Tail");
2076 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2077 		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
2078 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2079 		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
2080 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
2081 		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
2082 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2083 		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
2084 	}
2085 
2086 	/* MAC stats get their own sub node */
2087 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
2088 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
2089 	stat_list = SYSCTL_CHILDREN(stat_node);
2090 
2091 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
2092 	    CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
2093 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
2094 	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
2095 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
2096 	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
2097 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
2098 	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
2099 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
2100 	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
2101 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
2102 	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
2103 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
2104 	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
2105 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
2106 	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
2107 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
2108 	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
2109 
2110 	/* Flow Control stats */
2111 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
2112 	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
2113 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
2114 	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
2115 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
2116 	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
2117 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
2118 	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
2119 
2120 	/* Packet Reception Stats */
2121 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
2122 	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
2123 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2124 	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
2125 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
2126 	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
2127 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2128 	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
2129 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2130 	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
2131 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
2132 	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
2133 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
2134 	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
2135 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
2136 	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
2137 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
2138 	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
2139 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
2140 	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
2141 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
2142 	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
2143 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
2144 	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
2145 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
2146 	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
2147 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
2148 	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
2149 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
2150 	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
2151 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
2152 	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
2153 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
2154 	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
2155 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
2156 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
2157 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
2158 	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
2159 
2160 	/* Packet Transmission Stats */
2161 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2162 	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
2163 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
2164 	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
2165 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2166 	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
2167 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
2168 	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
2169 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
2170 	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
2171 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
2172 	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
2173 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
2174 	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
2175 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
2176 	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
2177 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
2178 	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
2179 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
2180 	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
2181 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
2182 	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
2183 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
2184 	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
2185 } /* ixgbe_add_hw_stats */
2186 
2187 /************************************************************************
2188  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2189  *
2190  *   Retrieves the TDH value from the hardware
2191  ************************************************************************/
2192 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)2193 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
2194 {
2195 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
2196 	int error;
2197 	unsigned int val;
2198 
2199 	if (!txr)
2200 		return (0);
2201 
2202 
2203 	if (atomic_load_acq_int(&txr->sc->recovery_mode))
2204 		return (EPERM);
2205 
2206 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
2207 	error = sysctl_handle_int(oidp, &val, 0, req);
2208 	if (error || !req->newptr)
2209 		return error;
2210 
2211 	return (0);
2212 } /* ixgbe_sysctl_tdh_handler */
2213 
2214 /************************************************************************
2215  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2216  *
2217  *   Retrieves the TDT value from the hardware
2218  ************************************************************************/
2219 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)2220 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
2221 {
2222 	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
2223 	int error;
2224 	unsigned int val;
2225 
2226 	if (!txr)
2227 		return (0);
2228 
2229 	if (atomic_load_acq_int(&txr->sc->recovery_mode))
2230 		return (EPERM);
2231 
2232 	val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
2233 	error = sysctl_handle_int(oidp, &val, 0, req);
2234 	if (error || !req->newptr)
2235 		return error;
2236 
2237 	return (0);
2238 } /* ixgbe_sysctl_tdt_handler */
2239 
2240 /************************************************************************
2241  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2242  *
2243  *   Retrieves the RDH value from the hardware
2244  ************************************************************************/
2245 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)2246 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
2247 {
2248 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
2249 	int error;
2250 	unsigned int val;
2251 
2252 	if (!rxr)
2253 		return (0);
2254 
2255 	if (atomic_load_acq_int(&rxr->sc->recovery_mode))
2256 		return (EPERM);
2257 
2258 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
2259 	error = sysctl_handle_int(oidp, &val, 0, req);
2260 	if (error || !req->newptr)
2261 		return error;
2262 
2263 	return (0);
2264 } /* ixgbe_sysctl_rdh_handler */
2265 
2266 /************************************************************************
2267  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2268  *
2269  *   Retrieves the RDT value from the hardware
2270  ************************************************************************/
2271 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)2272 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
2273 {
2274 	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
2275 	int error;
2276 	unsigned int val;
2277 
2278 	if (!rxr)
2279 		return (0);
2280 
2281 	if (atomic_load_acq_int(&rxr->sc->recovery_mode))
2282 		return (EPERM);
2283 
2284 	val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
2285 	error = sysctl_handle_int(oidp, &val, 0, req);
2286 	if (error || !req->newptr)
2287 		return error;
2288 
2289 	return (0);
2290 } /* ixgbe_sysctl_rdt_handler */
2291 
2292 /************************************************************************
2293  * ixgbe_if_vlan_register
2294  *
2295  *   Run via vlan config EVENT, it enables us to use the
2296  *   HW Filter table since we can get the vlan id. This
2297  *   just creates the entry in the soft version of the
2298  *   VFTA, init will repopulate the real table.
2299  ************************************************************************/
2300 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)2301 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
2302 {
2303 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2304 	u16 index, bit;
2305 
2306 	index = (vtag >> 5) & 0x7F;
2307 	bit = vtag & 0x1F;
2308 	sc->shadow_vfta[index] |= (1 << bit);
2309 	++sc->num_vlans;
2310 	ixgbe_setup_vlan_hw_support(ctx);
2311 } /* ixgbe_if_vlan_register */
2312 
2313 /************************************************************************
2314  * ixgbe_if_vlan_unregister
2315  *
2316  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2317  ************************************************************************/
2318 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)2319 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
2320 {
2321 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2322 	u16 index, bit;
2323 
2324 	index = (vtag >> 5) & 0x7F;
2325 	bit = vtag & 0x1F;
2326 	sc->shadow_vfta[index] &= ~(1 << bit);
2327 	--sc->num_vlans;
2328 	/* Re-init to load the changes */
2329 	ixgbe_setup_vlan_hw_support(ctx);
2330 } /* ixgbe_if_vlan_unregister */
2331 
2332 /************************************************************************
2333  * ixgbe_setup_vlan_hw_support
2334  ************************************************************************/
2335 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)2336 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
2337 {
2338 	if_t ifp = iflib_get_ifp(ctx);
2339 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2340 	struct ixgbe_hw *hw = &sc->hw;
2341 	struct rx_ring *rxr;
2342 	int i;
2343 	u32 ctrl;
2344 
2345 
2346 	/*
2347 	 * We get here thru init_locked, meaning
2348 	 * a soft reset, this has already cleared
2349 	 * the VFTA and other state, so if there
2350 	 * have been no vlan's registered do nothing.
2351 	 */
2352 	if (sc->num_vlans == 0 ||
2353 	    (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
2354 		/* Clear the vlan hw flag */
2355 		for (i = 0; i < sc->num_rx_queues; i++) {
2356 			rxr = &sc->rx_queues[i].rxr;
2357 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
2358 			if (hw->mac.type != ixgbe_mac_82598EB) {
2359 				ctrl = IXGBE_READ_REG(hw,
2360 				    IXGBE_RXDCTL(rxr->me));
2361 				ctrl &= ~IXGBE_RXDCTL_VME;
2362 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2363 				    ctrl);
2364 			}
2365 			rxr->vtag_strip = false;
2366 		}
2367 		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2368 		/* Enable the Filter Table if enabled */
2369 		ctrl |= IXGBE_VLNCTRL_CFIEN;
2370 		ctrl &= ~IXGBE_VLNCTRL_VFE;
2371 		if (hw->mac.type == ixgbe_mac_82598EB)
2372 			ctrl &= ~IXGBE_VLNCTRL_VME;
2373 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2374 		return;
2375 	}
2376 
2377 	/* Setup the queues for vlans */
2378 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
2379 		for (i = 0; i < sc->num_rx_queues; i++) {
2380 			rxr = &sc->rx_queues[i].rxr;
2381 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
2382 			if (hw->mac.type != ixgbe_mac_82598EB) {
2383 				ctrl = IXGBE_READ_REG(hw,
2384 				    IXGBE_RXDCTL(rxr->me));
2385 				ctrl |= IXGBE_RXDCTL_VME;
2386 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2387 				    ctrl);
2388 			}
2389 			rxr->vtag_strip = true;
2390 		}
2391 	}
2392 
2393 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2394 		return;
2395 	/*
2396 	 * A soft reset zero's out the VFTA, so
2397 	 * we need to repopulate it now.
2398 	 */
2399 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2400 		if (sc->shadow_vfta[i] != 0)
2401 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2402 			    sc->shadow_vfta[i]);
2403 
2404 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2405 	/* Enable the Filter Table if enabled */
2406 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2407 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2408 		ctrl |= IXGBE_VLNCTRL_VFE;
2409 	}
2410 	if (hw->mac.type == ixgbe_mac_82598EB)
2411 		ctrl |= IXGBE_VLNCTRL_VME;
2412 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2413 } /* ixgbe_setup_vlan_hw_support */
2414 
2415 /************************************************************************
2416  * ixgbe_get_slot_info
2417  *
2418  *   Get the width and transaction speed of
2419  *   the slot this adapter is plugged into.
2420  ************************************************************************/
2421 static void
ixgbe_get_slot_info(struct ixgbe_softc * sc)2422 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2423 {
2424 	device_t dev = iflib_get_dev(sc->ctx);
2425 	struct ixgbe_hw *hw = &sc->hw;
2426 	int bus_info_valid = true;
2427 	u32 offset;
2428 	u16 link;
2429 
2430 	/* Some devices are behind an internal bridge */
2431 	switch (hw->device_id) {
2432 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
2433 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2434 		goto get_parent_info;
2435 	default:
2436 		break;
2437 	}
2438 
2439 	ixgbe_get_bus_info(hw);
2440 
2441 	/*
2442 	 * Some devices don't use PCI-E, but there is no need
2443 	 * to display "Unknown" for bus speed and width.
2444 	 */
2445 	switch (hw->mac.type) {
2446 	case ixgbe_mac_X550EM_x:
2447 	case ixgbe_mac_X550EM_a:
2448 		return;
2449 	default:
2450 		goto display;
2451 	}
2452 
2453 get_parent_info:
2454 	/*
2455 	 * For the Quad port adapter we need to parse back
2456 	 * up the PCI tree to find the speed of the expansion
2457 	 * slot into which this adapter is plugged. A bit more work.
2458 	 */
2459 	dev = device_get_parent(device_get_parent(dev));
2460 #ifdef IXGBE_DEBUG
2461 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2462 	    pci_get_slot(dev), pci_get_function(dev));
2463 #endif
2464 	dev = device_get_parent(device_get_parent(dev));
2465 #ifdef IXGBE_DEBUG
2466 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2467 	    pci_get_slot(dev), pci_get_function(dev));
2468 #endif
2469 	/* Now get the PCI Express Capabilities offset */
2470 	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2471 		/*
2472 		 * Hmm...can't get PCI-Express capabilities.
2473 		 * Falling back to default method.
2474 		 */
2475 		bus_info_valid = false;
2476 		ixgbe_get_bus_info(hw);
2477 		goto display;
2478 	}
2479 	/* ...and read the Link Status Register */
2480 	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2481 	ixgbe_set_pci_config_data_generic(hw, link);
2482 
2483 display:
2484 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2485 	    ((hw->bus.speed == ixgbe_bus_speed_16000)   ? "16.0GT/s" :
2486 	     (hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
2487 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
2488 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
2489 	     "Unknown"),
2490 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2491 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2492 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2493 	     "Unknown"));
2494 
2495 	if (bus_info_valid) {
2496 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2497 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2498 		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
2499 			device_printf(dev,
2500 			    "PCI-Express bandwidth available for this card"
2501 			    " is not sufficient for optimal performance.\n");
2502 			device_printf(dev,
2503 			    "For optimal performance a x8 PCIE, or x4 PCIE"
2504 			    " Gen2 slot is required.\n");
2505 		}
2506 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2507 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2508 		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
2509 			device_printf(dev,
2510 			    "PCI-Express bandwidth available for this card"
2511 			    " is not sufficient for optimal performance.\n");
2512 			device_printf(dev,
2513 			    "For optimal performance a x8 PCIE Gen3 slot is"
2514 			    " required.\n");
2515 		}
2516 	} else
2517 		device_printf(dev,
2518 		    "Unable to determine slot speed/width. The speed/width"
2519 		    " reported are that of the internal switch.\n");
2520 
2521 	return;
2522 } /* ixgbe_get_slot_info */
2523 
2524 /************************************************************************
2525  * ixgbe_if_msix_intr_assign
2526  *
2527  *   Setup MSI-X Interrupt resources and handlers
2528  ************************************************************************/
2529 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)2530 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2531 {
2532 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2533 	struct ix_rx_queue *rx_que = sc->rx_queues;
2534 	struct ix_tx_queue *tx_que;
2535 	int error, rid, vector = 0;
2536 	char buf[16];
2537 
2538 	/* Admin Que is vector 0*/
2539 	rid = vector + 1;
2540 	for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2541 		rid = vector + 1;
2542 
2543 		snprintf(buf, sizeof(buf), "rxq%d", i);
2544 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2545 		    IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
2546 		    buf);
2547 
2548 		if (error) {
2549 			device_printf(iflib_get_dev(ctx),
2550 			    "Failed to allocate que int %d err: %d",
2551 			    i,error);
2552 			sc->num_rx_queues = i + 1;
2553 			goto fail;
2554 		}
2555 
2556 		rx_que->msix = vector;
2557 	}
2558 	for (int i = 0; i < sc->num_tx_queues; i++) {
2559 		snprintf(buf, sizeof(buf), "txq%d", i);
2560 		tx_que = &sc->tx_queues[i];
2561 		tx_que->msix = i % sc->num_rx_queues;
2562 		iflib_softirq_alloc_generic(ctx,
2563 		    &sc->rx_queues[tx_que->msix].que_irq,
2564 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2565 	}
2566 	rid = vector + 1;
2567 	error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2568 	    IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2569 	if (error) {
2570 		device_printf(iflib_get_dev(ctx),
2571 		    "Failed to register admin handler");
2572 		return (error);
2573 	}
2574 
2575 	sc->vector = vector;
2576 
2577 	return (0);
2578 fail:
2579 	iflib_irq_free(ctx, &sc->irq);
2580 	rx_que = sc->rx_queues;
2581 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2582 		iflib_irq_free(ctx, &rx_que->que_irq);
2583 
2584 	return (error);
2585 } /* ixgbe_if_msix_intr_assign */
2586 
2587 static inline void
ixgbe_perform_aim(struct ixgbe_softc * sc,struct ix_rx_queue * que)2588 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2589 {
2590 	uint32_t newitr = 0;
2591 	struct rx_ring *rxr = &que->rxr;
2592 	/* FIXME struct tx_ring *txr = ... ->txr; */
2593 
2594 	/*
2595 	 * Do Adaptive Interrupt Moderation:
2596 	 *  - Write out last calculated setting
2597 	 *  - Calculate based on average size over
2598 	 *    the last interval.
2599 	 */
2600 	if (que->eitr_setting) {
2601 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2602 		    que->eitr_setting);
2603 	}
2604 
2605 	que->eitr_setting = 0;
2606 	/* Idle, do nothing */
2607 	if (rxr->bytes == 0) {
2608 		/* FIXME && txr->bytes == 0 */
2609 		return;
2610 	}
2611 
2612 	if ((rxr->bytes) && (rxr->packets))
2613 		newitr = rxr->bytes / rxr->packets;
2614 	/* FIXME for transmit accounting
2615 	 * if ((txr->bytes) && (txr->packets))
2616 	 * 	newitr = txr->bytes/txr->packets;
2617 	 * if ((rxr->bytes) && (rxr->packets))
2618 	 * 	newitr = max(newitr, (rxr->bytes / rxr->packets));
2619 	 */
2620 
2621 	newitr += 24; /* account for hardware frame, crc */
2622 	/* set an upper boundary */
2623 	newitr = min(newitr, 3000);
2624 
2625 	/* Be nice to the mid range */
2626 	if ((newitr > 300) && (newitr < 1200)) {
2627 		newitr = (newitr / 3);
2628 	} else {
2629 		newitr = (newitr / 2);
2630 	}
2631 
2632 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2633 		newitr |= newitr << 16;
2634 	} else {
2635 		newitr |= IXGBE_EITR_CNT_WDIS;
2636 	}
2637 
2638 	/* save for next interrupt */
2639 	que->eitr_setting = newitr;
2640 
2641 	/* Reset state */
2642 	/* FIXME txr->bytes = 0; */
2643 	/* FIXME txr->packets = 0; */
2644 	rxr->bytes = 0;
2645 	rxr->packets = 0;
2646 
2647 	return;
2648 }
2649 
2650 /*********************************************************************
2651  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2652  **********************************************************************/
2653 static int
ixgbe_msix_que(void * arg)2654 ixgbe_msix_que(void *arg)
2655 {
2656 	struct ix_rx_queue *que = arg;
2657 	struct ixgbe_softc *sc = que->sc;
2658 	if_t ifp = iflib_get_ifp(que->sc->ctx);
2659 
2660 	/* Protect against spurious interrupts */
2661 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2662 		return (FILTER_HANDLED);
2663 
2664 	ixgbe_disable_queue(sc, que->msix);
2665 	++que->irqs;
2666 
2667 	/* Check for AIM */
2668 	if (sc->enable_aim) {
2669 		ixgbe_perform_aim(sc, que);
2670 	}
2671 
2672 	return (FILTER_SCHEDULE_THREAD);
2673 } /* ixgbe_msix_que */
2674 
2675 /************************************************************************
2676  * ixgbe_media_status - Media Ioctl callback
2677  *
2678  *   Called whenever the user queries the status of
2679  *   the interface using ifconfig.
2680  ************************************************************************/
2681 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2682 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2683 {
2684 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2685 	struct ixgbe_hw *hw = &sc->hw;
2686 	int layer;
2687 
2688 	INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2689 
2690 	ifmr->ifm_status = IFM_AVALID;
2691 	ifmr->ifm_active = IFM_ETHER;
2692 
2693 	if (!sc->link_active)
2694 		return;
2695 
2696 	ifmr->ifm_status |= IFM_ACTIVE;
2697 	layer = sc->phy_layer;
2698 
2699 	if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
2700 		switch (sc->link_speed) {
2701 		case IXGBE_LINK_SPEED_10GB_FULL:
2702 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2703 			break;
2704 		case IXGBE_LINK_SPEED_5GB_FULL:
2705 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2706 			break;
2707 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2708 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2709 			break;
2710 		case IXGBE_LINK_SPEED_1GB_FULL:
2711 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2712 			break;
2713 		case IXGBE_LINK_SPEED_100_FULL:
2714 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2715 			break;
2716 		case IXGBE_LINK_SPEED_10_FULL:
2717 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2718 			break;
2719 		}
2720 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2721 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2722 		switch (sc->link_speed) {
2723 		case IXGBE_LINK_SPEED_10GB_FULL:
2724 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2725 			break;
2726 		case IXGBE_LINK_SPEED_1GB_FULL:
2727 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2728 			break;
2729 		}
2730 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2731 		switch (sc->link_speed) {
2732 		case IXGBE_LINK_SPEED_10GB_FULL:
2733 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2734 			break;
2735 		case IXGBE_LINK_SPEED_1GB_FULL:
2736 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2737 			break;
2738 		}
2739 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2740 		switch (sc->link_speed) {
2741 		case IXGBE_LINK_SPEED_10GB_FULL:
2742 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2743 			break;
2744 		case IXGBE_LINK_SPEED_1GB_FULL:
2745 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2746 			break;
2747 		}
2748 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2749 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2750 		switch (sc->link_speed) {
2751 		case IXGBE_LINK_SPEED_10GB_FULL:
2752 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2753 			break;
2754 		case IXGBE_LINK_SPEED_1GB_FULL:
2755 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2756 			break;
2757 		}
2758 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2759 		switch (sc->link_speed) {
2760 		case IXGBE_LINK_SPEED_10GB_FULL:
2761 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2762 			break;
2763 		}
2764 	/*
2765 	 * XXX: These need to use the proper media types once
2766 	 * they're added.
2767 	 */
2768 #ifndef IFM_ETH_XTYPE
2769 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2770 		switch (sc->link_speed) {
2771 		case IXGBE_LINK_SPEED_10GB_FULL:
2772 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2773 			break;
2774 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2775 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2776 			break;
2777 		case IXGBE_LINK_SPEED_1GB_FULL:
2778 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2779 			break;
2780 		}
2781 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2782 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2783 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2784 		switch (sc->link_speed) {
2785 		case IXGBE_LINK_SPEED_10GB_FULL:
2786 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2787 			break;
2788 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2789 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2790 			break;
2791 		case IXGBE_LINK_SPEED_1GB_FULL:
2792 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2793 			break;
2794 		}
2795 #else
2796 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2797 		switch (sc->link_speed) {
2798 		case IXGBE_LINK_SPEED_10GB_FULL:
2799 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2800 			break;
2801 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2802 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2803 			break;
2804 		case IXGBE_LINK_SPEED_1GB_FULL:
2805 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2806 			break;
2807 		}
2808 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2809 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2810 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2811 		switch (sc->link_speed) {
2812 		case IXGBE_LINK_SPEED_10GB_FULL:
2813 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2814 			break;
2815 		case IXGBE_LINK_SPEED_2_5GB_FULL:
2816 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2817 			break;
2818 		case IXGBE_LINK_SPEED_1GB_FULL:
2819 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2820 			break;
2821 		}
2822 #endif
2823 
2824 	/* If nothing is recognized... */
2825 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2826 		ifmr->ifm_active |= IFM_UNKNOWN;
2827 
2828 	/* Display current flow control setting used on link */
2829 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2830 	    hw->fc.current_mode == ixgbe_fc_full)
2831 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2832 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2833 	    hw->fc.current_mode == ixgbe_fc_full)
2834 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2835 } /* ixgbe_media_status */
2836 
2837 /************************************************************************
2838  * ixgbe_media_change - Media Ioctl callback
2839  *
2840  *   Called when the user changes speed/duplex using
2841  *   media/mediopt option with ifconfig.
2842  ************************************************************************/
2843 static int
ixgbe_if_media_change(if_ctx_t ctx)2844 ixgbe_if_media_change(if_ctx_t ctx)
2845 {
2846 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2847 	struct ifmedia *ifm = iflib_get_media(ctx);
2848 	struct ixgbe_hw *hw = &sc->hw;
2849 	ixgbe_link_speed speed = 0;
2850 
2851 	INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2852 
2853 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2854 		return (EINVAL);
2855 
2856 	if (hw->phy.media_type == ixgbe_media_type_backplane)
2857 		return (EPERM);
2858 
2859 	/*
2860 	 * We don't actually need to check against the supported
2861 	 * media types of the adapter; ifmedia will take care of
2862 	 * that for us.
2863 	 */
2864 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2865 	case IFM_AUTO:
2866 	case IFM_10G_T:
2867 		speed |= IXGBE_LINK_SPEED_100_FULL;
2868 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2869 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2870 		break;
2871 	case IFM_10G_LRM:
2872 	case IFM_10G_LR:
2873 #ifndef IFM_ETH_XTYPE
2874 	case IFM_10G_SR: /* KR, too */
2875 	case IFM_10G_CX4: /* KX4 */
2876 #else
2877 	case IFM_10G_KR:
2878 	case IFM_10G_KX4:
2879 #endif
2880 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2881 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2882 		break;
2883 #ifndef IFM_ETH_XTYPE
2884 	case IFM_1000_CX: /* KX */
2885 #else
2886 	case IFM_1000_KX:
2887 #endif
2888 	case IFM_1000_LX:
2889 	case IFM_1000_SX:
2890 	case IFM_1000_BX:
2891 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2892 		break;
2893 	case IFM_1000_T:
2894 		speed |= IXGBE_LINK_SPEED_100_FULL;
2895 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
2896 		break;
2897 	case IFM_10G_TWINAX:
2898 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
2899 		break;
2900 	case IFM_5000_T:
2901 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
2902 		break;
2903 	case IFM_2500_T:
2904 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2905 		break;
2906 	case IFM_100_TX:
2907 		speed |= IXGBE_LINK_SPEED_100_FULL;
2908 		break;
2909 	case IFM_10_T:
2910 		speed |= IXGBE_LINK_SPEED_10_FULL;
2911 		break;
2912 	default:
2913 		goto invalid;
2914 	}
2915 
2916 	hw->mac.autotry_restart = true;
2917 	hw->mac.ops.setup_link(hw, speed, true);
2918 	sc->advertise =
2919 	    ((speed & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x4  : 0) |
2920 	    ((speed & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0) |
2921 	    ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2922 	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x2  : 0) |
2923 	    ((speed & IXGBE_LINK_SPEED_100_FULL)   ? 0x1  : 0) |
2924 	    ((speed & IXGBE_LINK_SPEED_10_FULL)    ? 0x8  : 0);
2925 
2926 	return (0);
2927 
2928 invalid:
2929 	device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2930 
2931 	return (EINVAL);
2932 } /* ixgbe_if_media_change */
2933 
2934 /************************************************************************
2935  * ixgbe_set_promisc
2936  ************************************************************************/
2937 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2938 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2939 {
2940 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
2941 	if_t ifp = iflib_get_ifp(ctx);
2942 	u32 rctl;
2943 	int mcnt = 0;
2944 
2945 	rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2946 	rctl &= (~IXGBE_FCTRL_UPE);
2947 	if (if_getflags(ifp) & IFF_ALLMULTI)
2948 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2949 	else {
2950 		mcnt = min(if_llmaddr_count(ifp),
2951 		    MAX_NUM_MULTICAST_ADDRESSES);
2952 	}
2953 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2954 		rctl &= (~IXGBE_FCTRL_MPE);
2955 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2956 
2957 	if (if_getflags(ifp) & IFF_PROMISC) {
2958 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2959 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2960 	} else if (if_getflags(ifp) & IFF_ALLMULTI) {
2961 		rctl |= IXGBE_FCTRL_MPE;
2962 		rctl &= ~IXGBE_FCTRL_UPE;
2963 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2964 	}
2965 	return (0);
2966 } /* ixgbe_if_promisc_set */
2967 
2968 /************************************************************************
2969  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2970  ************************************************************************/
2971 static int
ixgbe_msix_link(void * arg)2972 ixgbe_msix_link(void *arg)
2973 {
2974 	struct ixgbe_softc *sc = arg;
2975 	struct ixgbe_hw *hw = &sc->hw;
2976 	u32 eicr, eicr_mask;
2977 	s32 retval;
2978 
2979 	++sc->link_irq;
2980 
2981 	/* Pause other interrupts */
2982 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2983 
2984 	/* First get the cause */
2985 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2986 	/* Be sure the queue bits are not cleared */
2987 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2988 	/* Clear interrupt with write */
2989 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2990 
2991 	/* Link status change */
2992 	if (eicr & IXGBE_EICR_LSC) {
2993 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2994 		sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2995 	}
2996 
2997 	if (eicr & IXGBE_EICR_FW_EVENT) {
2998 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
2999 		sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
3000 	}
3001 
3002 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3003 		if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
3004 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
3005 			/* This is probably overkill :) */
3006 			if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
3007 				return (FILTER_HANDLED);
3008 			/* Disable the interrupt */
3009 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
3010 			sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
3011 		} else
3012 			if (eicr & IXGBE_EICR_ECC) {
3013 				device_printf(iflib_get_dev(sc->ctx),
3014 				    "Received ECC Err, initiating reset\n");
3015 				hw->mac.flags |=
3016 				    ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3017 				ixgbe_reset_hw(hw);
3018 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
3019 				    IXGBE_EICR_ECC);
3020 			}
3021 
3022 		/* Check for over temp condition */
3023 		if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3024 			switch (sc->hw.mac.type) {
3025 			case ixgbe_mac_X550EM_a:
3026 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3027 					break;
3028 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3029 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
3030 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
3031 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
3032 				retval = hw->phy.ops.check_overtemp(hw);
3033 				if (retval != IXGBE_ERR_OVERTEMP)
3034 					break;
3035 				device_printf(iflib_get_dev(sc->ctx),
3036 				    "\nCRITICAL: OVER TEMP!!"
3037 				    " PHY IS SHUT DOWN!!\n");
3038 				device_printf(iflib_get_dev(sc->ctx),
3039 				    "System shutdown required!\n");
3040 				break;
3041 			default:
3042 				if (!(eicr & IXGBE_EICR_TS))
3043 					break;
3044 				retval = hw->phy.ops.check_overtemp(hw);
3045 				if (retval != IXGBE_ERR_OVERTEMP)
3046 					break;
3047 				device_printf(iflib_get_dev(sc->ctx),
3048 				    "\nCRITICAL: OVER TEMP!!"
3049 				    " PHY IS SHUT DOWN!!\n");
3050 				device_printf(iflib_get_dev(sc->ctx),
3051 				    "System shutdown required!\n");
3052 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
3053 				    IXGBE_EICR_TS);
3054 				break;
3055 			}
3056 		}
3057 
3058 		/* Check for VF message */
3059 		if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
3060 		    (eicr & IXGBE_EICR_MAILBOX)) {
3061 			sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
3062 		}
3063 	}
3064 
3065 	/*
3066 	 * On E610, the firmware handles PHY configuration, so
3067 	 * there is no need to perform any SFP-specific tasks.
3068 	 */
3069 	if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
3070 		/* Pluggable optics-related interrupt */
3071 		if (hw->mac.type >= ixgbe_mac_X540)
3072 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3073 		else
3074 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3075 
3076 		if (eicr & eicr_mask) {
3077 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3078 			sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
3079 		}
3080 
3081 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3082 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3083 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3084 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3085 			sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3086 		}
3087 	}
3088 
3089 	/* Check for fan failure */
3090 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3091 		ixgbe_check_fan_failure(sc, eicr, true);
3092 		IXGBE_WRITE_REG(hw, IXGBE_EICR,
3093 		    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3094 	}
3095 
3096 	/* External PHY interrupt */
3097 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3098 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3099 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3100 		sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
3101 	}
3102 
3103 	return (sc->task_requests != 0) ?
3104 	    FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
3105 } /* ixgbe_msix_link */
3106 
3107 /************************************************************************
3108  * ixgbe_sysctl_interrupt_rate_handler
3109  ************************************************************************/
3110 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)3111 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3112 {
3113 	struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
3114 	int error;
3115 	unsigned int reg, usec, rate;
3116 
3117 	if (atomic_load_acq_int(&que->sc->recovery_mode))
3118 		return (EPERM);
3119 
3120 	reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
3121 	usec = ((reg & 0x0FF8) >> 3);
3122 	if (usec > 0)
3123 		rate = 500000 / usec;
3124 	else
3125 		rate = 0;
3126 	error = sysctl_handle_int(oidp, &rate, 0, req);
3127 	if (error || !req->newptr)
3128 		return error;
3129 	reg &= ~0xfff; /* default, no limitation */
3130 	ixgbe_max_interrupt_rate = 0;
3131 	if (rate > 0 && rate < 500000) {
3132 		if (rate < 1000)
3133 			rate = 1000;
3134 		ixgbe_max_interrupt_rate = rate;
3135 		reg |= ((4000000/rate) & 0xff8);
3136 	}
3137 	IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
3138 
3139 	return (0);
3140 } /* ixgbe_sysctl_interrupt_rate_handler */
3141 
3142 /************************************************************************
3143  * ixgbe_debug_dump_print_cluster
3144  ************************************************************************/
3145 static u8
ixgbe_debug_dump_print_cluster(struct ixgbe_softc * sc,struct sbuf * sbuf,u8 cluster_id)3146 ixgbe_debug_dump_print_cluster(struct ixgbe_softc *sc, struct sbuf *sbuf,
3147     u8 cluster_id)
3148 {
3149 	u16 data_buf_size = IXGBE_ACI_MAX_BUFFER_SIZE;
3150 	device_t dev = sc->dev;
3151 	struct ixgbe_hw *hw = &sc->hw;
3152 	const u8 reserved_buf[8] = {};
3153 	int max_aci_calls = 1000;
3154 	int error, counter = 0;
3155 	u8 *data_buf;
3156 
3157 	/* Input parameters / loop variables */
3158 	u16 table_id = 0;
3159 	u32 offset = 0;
3160 
3161 	/* Data returned from ACI command */
3162 	u16 ret_buf_size = 0;
3163 	u16 ret_next_cluster = 0;
3164 	u16 ret_next_table = 0;
3165 	u32 ret_next_index = 0;
3166 
3167 	data_buf = (u8 *)malloc(data_buf_size, M_IXGBE, M_NOWAIT | M_ZERO);
3168 	if (!data_buf)
3169 		return (0);
3170 
3171 	DEBUGOUT2("%s: dumping cluster id (relative) %d\n",
3172 	    __func__, cluster_id);
3173 
3174 	do {
3175 		DEBUGOUT3("table_id 0x%04x offset 0x%08x buf_size %d\n",
3176 		    table_id, offset, data_buf_size);
3177 
3178 		error = ixgbe_aci_get_internal_data(hw, cluster_id, table_id,
3179 		    offset, data_buf, data_buf_size, &ret_buf_size,
3180 		    &ret_next_cluster, &ret_next_table, &ret_next_index);
3181 		if (error) {
3182 			device_printf(dev,
3183 			    "%s: Failed to get internal FW/HW data, error: %d, "
3184 			    "last aci status: %d\n",
3185 			    __func__, error, hw->aci.last_status);
3186 			break;
3187 		}
3188 
3189 		DEBUGOUT3("ret_table_id 0x%04x ret_offset 0x%08x "
3190 		    "ret_buf_size %d\n",
3191 		    ret_next_table, ret_next_index, ret_buf_size);
3192 
3193 		/* Print cluster id */
3194 		u32 print_cluster_id = (u32)cluster_id;
3195 		sbuf_bcat(sbuf, &print_cluster_id, sizeof(print_cluster_id));
3196 		/* Print table id */
3197 		u32 print_table_id = (u32)table_id;
3198 		sbuf_bcat(sbuf, &print_table_id, sizeof(print_table_id));
3199 		/* Print table length */
3200 		u32 print_table_length = (u32)ret_buf_size;
3201 		sbuf_bcat(sbuf, &print_table_length,
3202 		    sizeof(print_table_length));
3203 		/* Print current offset */
3204 		u32 print_curr_offset = offset;
3205 		sbuf_bcat(sbuf, &print_curr_offset, sizeof(print_curr_offset));
3206 		/* Print reserved bytes */
3207 		sbuf_bcat(sbuf, reserved_buf, sizeof(reserved_buf));
3208 		/* Print data */
3209 		sbuf_bcat(sbuf, data_buf, ret_buf_size);
3210 
3211 		/* Prepare for the next loop spin */
3212 		memset(data_buf, 0, data_buf_size);
3213 
3214 		bool last_index = (ret_next_index == 0xffffffff);
3215 		bool last_table = ((ret_next_table == 0xff ||
3216 				    ret_next_table == 0xffff) &&
3217 				   last_index);
3218 
3219 		if (last_table) {
3220 			/* End of the cluster */
3221 			DEBUGOUT1("End of the cluster ID %d\n", cluster_id);
3222 			break;
3223 		} else if (last_index) {
3224 			/* End of the table */
3225 			table_id = ret_next_table;
3226 			offset = 0;
3227 		} else {
3228 			/* More data left in the table */
3229 			offset = ret_next_index;
3230 		}
3231 	} while (++counter < max_aci_calls);
3232 
3233 	if (counter >= max_aci_calls)
3234 		device_printf(dev, "Exceeded nr of ACI calls for cluster %d\n",
3235 		    cluster_id);
3236 
3237 	free(data_buf, M_IXGBE);
3238 
3239 	return (++cluster_id);
3240 } /* ixgbe_print_debug_dump_cluster */
3241 
3242 /************************************************************************
3243  * ixgbe_sysctl_debug_dump_set_clusters
3244  *
3245  *   Sets the cluster to dump from FW when Debug Dump requested.
3246  ************************************************************************/
3247 static int
ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS)3248 ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS)
3249 {
3250 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
3251 	u32 clusters = sc->debug_dump_cluster_mask;
3252 	device_t dev = sc->dev;
3253 	int error;
3254 
3255 	error = sysctl_handle_32(oidp, &clusters, 0, req);
3256 	if ((error) || !req->newptr)
3257 		return (error);
3258 
3259 	if (clusters & ~(IXGBE_DBG_DUMP_VALID_CLUSTERS_MASK)) {
3260 		device_printf(dev,
3261 		    "%s: Unrecognized parameter: %u\n",
3262 		    __func__, clusters);
3263 		sc->debug_dump_cluster_mask =
3264 			IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID;
3265 		return (EINVAL);
3266 	}
3267 
3268 	sc->debug_dump_cluster_mask = clusters;
3269 
3270 	return (0);
3271 } /* ixgbe_sysctl_debug_dump_set_clusters */
3272 
3273 /************************************************************************
3274  * ixgbe_sysctl_dump_debug_dump
3275  ************************************************************************/
3276 static int
ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS)3277 ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS)
3278 {
3279 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
3280 	device_t dev = sc->dev;
3281 	struct sbuf *sbuf;
3282 	int error = 0;
3283 
3284 	UNREFERENCED_PARAMETER(arg2);
3285 
3286 	if (!sc->do_debug_dump) {
3287 		if (req->oldptr == NULL && req->newptr == NULL) {
3288 			error = SYSCTL_OUT(req, 0, 0);
3289 			return (error);
3290 		}
3291 
3292 		char input_buf[2] = "";
3293 		error = sysctl_handle_string(oidp, input_buf,
3294 				sizeof(input_buf), req);
3295 		if ((error) || (req->newptr == NULL))
3296 			return (error);
3297 
3298 		if (input_buf[0] == '1') {
3299 			if (sc->debug_dump_cluster_mask ==
3300 				IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID) {
3301 				device_printf(dev,
3302 				    "Debug Dump failed because an invalid "
3303 				    "cluster was specified.\n");
3304 				return (EINVAL);
3305 			}
3306 
3307 			sc->do_debug_dump = true;
3308 			return (0);
3309 		}
3310 
3311 		return (EINVAL);
3312 	}
3313 
3314 	/* Caller just wants the upper bound for size */
3315 	if (req->oldptr == NULL && req->newptr == NULL) {
3316 		size_t est_output_len = IXGBE_DBG_DUMP_BASE_SIZE;
3317 		if (sc->debug_dump_cluster_mask & 0x2)
3318 			est_output_len += IXGBE_DBG_DUMP_BASE_SIZE;
3319 		error = SYSCTL_OUT(req, 0, est_output_len);
3320 		return (error);
3321 	}
3322 
3323 	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3324 	sbuf_clear_flags(sbuf, SBUF_INCLUDENUL);
3325 
3326 	DEBUGOUT("FW Debug Dump running...\n");
3327 
3328 	if (sc->debug_dump_cluster_mask) {
3329 		for (u8 id = 0; id <= IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX; id++) {
3330 			if (sc->debug_dump_cluster_mask & BIT(id)) {
3331 				DEBUGOUT1("Dumping cluster ID %u...\n", id);
3332 				ixgbe_debug_dump_print_cluster(sc, sbuf, id);
3333 			}
3334 		}
3335 	} else {
3336 		u8 next_cluster_id = 0;
3337 		do {
3338 			DEBUGOUT1("Dumping cluster ID %u...\n",
3339 			    next_cluster_id);
3340 			next_cluster_id = ixgbe_debug_dump_print_cluster(sc,
3341 				sbuf, next_cluster_id);
3342 		} while (next_cluster_id != 0 &&
3343 			next_cluster_id <= IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX);
3344 	}
3345 
3346 	sbuf_finish(sbuf);
3347 	sbuf_delete(sbuf);
3348 
3349 	sc->do_debug_dump = false;
3350 
3351 	return (error);
3352 } /* ixgbe_sysctl_dump_debug_dump */
3353 
3354 /************************************************************************
3355  * ixgbe_add_debug_dump_sysctls
3356  ************************************************************************/
3357 static void
ixgbe_add_debug_dump_sysctls(struct ixgbe_softc * sc)3358 ixgbe_add_debug_dump_sysctls(struct ixgbe_softc *sc)
3359 {
3360 	struct sysctl_oid_list *debug_list, *dump_list;
3361 	struct sysctl_oid *dump_node;
3362 	struct sysctl_ctx_list *ctx;
3363 	device_t dev = sc->dev;
3364 
3365 	ctx = device_get_sysctl_ctx(dev);
3366 	debug_list = SYSCTL_CHILDREN(sc->debug_sysctls);
3367 
3368 	dump_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "dump",
3369 	    CTLFLAG_RD, NULL, "Internal FW/HW Dump");
3370 	dump_list = SYSCTL_CHILDREN(dump_node);
3371 
3372 	SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters",
3373 	    CTLTYPE_U32 | CTLFLAG_RW, sc, 0,
3374 	    ixgbe_sysctl_debug_dump_set_clusters, "SU",
3375 	    IXGBE_SYSCTL_DESC_DEBUG_DUMP_SET_CLUSTER);
3376 
3377 	SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "dump",
3378 	    CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
3379 	    ixgbe_sysctl_dump_debug_dump, "",
3380 	    IXGBE_SYSCTL_DESC_DUMP_DEBUG_DUMP);
3381 } /* ixgbe_add_debug_dump_sysctls */
3382 
3383 static void
ixgbe_add_debug_sysctls(struct ixgbe_softc * sc)3384 ixgbe_add_debug_sysctls(struct ixgbe_softc *sc)
3385 {
3386 	struct sysctl_oid_list *ctx_list;
3387 	struct sysctl_ctx_list *ctx;
3388 	device_t dev = sc->dev;
3389 
3390 	ctx = device_get_sysctl_ctx(dev);
3391 	ctx_list  = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3392 
3393 	sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug",
3394 	    CTLFLAG_RD, NULL, "Debug Sysctls");
3395 
3396 	if (sc->feat_en & IXGBE_FEATURE_DBG_DUMP)
3397 		ixgbe_add_debug_dump_sysctls(sc);
3398 } /* ixgbe_add_debug_sysctls */
3399 
3400 /************************************************************************
3401  * ixgbe_add_device_sysctls
3402  ************************************************************************/
3403 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)3404 ixgbe_add_device_sysctls(if_ctx_t ctx)
3405 {
3406 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3407 	device_t dev = iflib_get_dev(ctx);
3408 	struct ixgbe_hw *hw = &sc->hw;
3409 	struct sysctl_oid_list *child;
3410 	struct sysctl_ctx_list *ctx_list;
3411 
3412 	ctx_list = device_get_sysctl_ctx(dev);
3413 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3414 
3415 	/* Sysctls for all devices */
3416 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
3417 	    CTLTYPE_INT | CTLFLAG_RW,
3418 	    sc, 0, ixgbe_sysctl_flowcntl, "I",
3419 	    IXGBE_SYSCTL_DESC_SET_FC);
3420 
3421 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
3422 	    CTLTYPE_INT | CTLFLAG_RW,
3423 	    sc, 0, ixgbe_sysctl_advertise, "I",
3424 	    IXGBE_SYSCTL_DESC_ADV_SPEED);
3425 
3426 	sc->enable_aim = ixgbe_enable_aim;
3427 	SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
3428 	    &sc->enable_aim, 0, "Interrupt Moderation");
3429 
3430 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
3431 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3432 	    ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
3433 
3434 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
3435 	    "tso_tcp_flags_mask_first_segment",
3436 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3437 	    sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
3438 	    "TSO TCP flags mask for first segment");
3439 
3440 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
3441 	    "tso_tcp_flags_mask_middle_segment",
3442 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3443 	    sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
3444 	    "TSO TCP flags mask for middle segment");
3445 
3446 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
3447 	    "tso_tcp_flags_mask_last_segment",
3448 	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3449 	    sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
3450 	    "TSO TCP flags mask for last segment");
3451 
3452 #ifdef IXGBE_DEBUG
3453 	/* testing sysctls (for all devices) */
3454 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
3455 	    CTLTYPE_INT | CTLFLAG_RW,
3456 	    sc, 0, ixgbe_sysctl_power_state,
3457 	    "I", "PCI Power State");
3458 
3459 	SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
3460 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3461 	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
3462 #endif
3463 	/* for X550 series devices */
3464 	if (hw->mac.type >= ixgbe_mac_X550)
3465 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
3466 		    CTLTYPE_U16 | CTLFLAG_RW,
3467 		    sc, 0, ixgbe_sysctl_dmac,
3468 		    "I", "DMA Coalesce");
3469 
3470 	/* for WoL-capable devices */
3471 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3472 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
3473 		    CTLTYPE_INT | CTLFLAG_RW, sc, 0,
3474 		    ixgbe_sysctl_wol_enable, "I",
3475 		    "Enable/Disable Wake on LAN");
3476 
3477 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
3478 		    CTLTYPE_U32 | CTLFLAG_RW,
3479 		    sc, 0, ixgbe_sysctl_wufc,
3480 		    "I", "Enable/Disable Wake Up Filters");
3481 	}
3482 
3483 	/* for X552/X557-AT devices */
3484 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3485 		struct sysctl_oid *phy_node;
3486 		struct sysctl_oid_list *phy_list;
3487 
3488 		phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
3489 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3490 		    "External PHY sysctls");
3491 		phy_list = SYSCTL_CHILDREN(phy_node);
3492 
3493 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
3494 		    CTLTYPE_U16 | CTLFLAG_RD,
3495 		    sc, 0, ixgbe_sysctl_phy_temp,
3496 		    "I", "Current External PHY Temperature (Celsius)");
3497 
3498 		SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
3499 		    "overtemp_occurred",
3500 		    CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
3501 		    ixgbe_sysctl_phy_overtemp_occurred, "I",
3502 		    "External PHY High Temperature Event Occurred");
3503 	}
3504 
3505 	if (sc->feat_cap & IXGBE_FEATURE_EEE) {
3506 		SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
3507 		    CTLTYPE_INT | CTLFLAG_RW, sc, 0,
3508 		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
3509 	}
3510 
3511 	ixgbe_add_debug_sysctls(sc);
3512 } /* ixgbe_add_device_sysctls */
3513 
3514 /************************************************************************
3515  * ixgbe_allocate_pci_resources
3516  ************************************************************************/
3517 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)3518 ixgbe_allocate_pci_resources(if_ctx_t ctx)
3519 {
3520 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3521 	device_t dev = iflib_get_dev(ctx);
3522 	int rid;
3523 
3524 	rid = PCIR_BAR(0);
3525 	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3526 	    RF_ACTIVE);
3527 
3528 	if (!(sc->pci_mem)) {
3529 		device_printf(dev,
3530 		    "Unable to allocate bus resource: memory\n");
3531 		return (ENXIO);
3532 	}
3533 
3534 	/* Save bus_space values for READ/WRITE_REG macros */
3535 	sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
3536 	sc->osdep.mem_bus_space_handle =
3537 	    rman_get_bushandle(sc->pci_mem);
3538 	/* Set hw values for shared code */
3539 	sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
3540 
3541 	return (0);
3542 } /* ixgbe_allocate_pci_resources */
3543 
3544 /************************************************************************
3545  * ixgbe_detach - Device removal routine
3546  *
3547  *   Called when the driver is being removed.
3548  *   Stops the adapter and deallocates all the resources
3549  *   that were allocated for driver operation.
3550  *
3551  *   return 0 on success, positive on failure
3552  ************************************************************************/
3553 static int
ixgbe_if_detach(if_ctx_t ctx)3554 ixgbe_if_detach(if_ctx_t ctx)
3555 {
3556 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3557 	device_t dev = iflib_get_dev(ctx);
3558 	u32 ctrl_ext;
3559 
3560 	INIT_DEBUGOUT("ixgbe_detach: begin");
3561 
3562 	if (ixgbe_pci_iov_detach(dev) != 0) {
3563 		device_printf(dev, "SR-IOV in use; detach first.\n");
3564 		return (EBUSY);
3565 	}
3566 
3567 	ixgbe_setup_low_power_mode(ctx);
3568 
3569 	/* let hardware know driver is unloading */
3570 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
3571 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3572 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
3573 
3574 	callout_drain(&sc->fw_mode_timer);
3575 
3576 	if (sc->hw.mac.type == ixgbe_mac_E610) {
3577 		ixgbe_disable_lse(sc);
3578 		ixgbe_shutdown_aci(&sc->hw);
3579 	}
3580 
3581 	ixgbe_free_pci_resources(ctx);
3582 
3583 	free(sc->mta, M_IXGBE);
3584 
3585 	return (0);
3586 } /* ixgbe_if_detach */
3587 
3588 /************************************************************************
3589  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3590  *
3591  *   Prepare the adapter/port for LPLU and/or WoL
3592  ************************************************************************/
3593 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)3594 ixgbe_setup_low_power_mode(if_ctx_t ctx)
3595 {
3596 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3597 	struct ixgbe_hw *hw = &sc->hw;
3598 	device_t dev = iflib_get_dev(ctx);
3599 	s32 error = 0;
3600 
3601 	if (!hw->wol_enabled)
3602 		ixgbe_set_phy_power(hw, false);
3603 
3604 	/* Limit power management flow to X550EM baseT */
3605 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3606 	    hw->phy.ops.enter_lplu) {
3607 		/* Turn off support for APM wakeup. (Using ACPI instead) */
3608 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3609 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3610 
3611 		/*
3612 		 * Clear Wake Up Status register to prevent any previous
3613 		 * wakeup events from waking us up immediately after we
3614 		 * suspend.
3615 		 */
3616 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3617 
3618 		/*
3619 		 * Program the Wakeup Filter Control register with user filter
3620 		 * settings
3621 		 */
3622 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3623 
3624 		/* Enable wakeups and power management in Wakeup Control */
3625 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
3626 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3627 
3628 		/* X550EM baseT adapters need a special LPLU flow */
3629 		hw->phy.reset_disable = true;
3630 		ixgbe_if_stop(ctx);
3631 		error = hw->phy.ops.enter_lplu(hw);
3632 		if (error)
3633 			device_printf(dev, "Error entering LPLU: %d\n",
3634 			    error);
3635 		hw->phy.reset_disable = false;
3636 	} else {
3637 		/* Just stop for other adapters */
3638 		ixgbe_if_stop(ctx);
3639 	}
3640 
3641 	return error;
3642 } /* ixgbe_setup_low_power_mode */
3643 
3644 /************************************************************************
3645  * ixgbe_shutdown - Shutdown entry point
3646  ************************************************************************/
3647 static int
ixgbe_if_shutdown(if_ctx_t ctx)3648 ixgbe_if_shutdown(if_ctx_t ctx)
3649 {
3650 	int error = 0;
3651 
3652 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
3653 
3654 	error = ixgbe_setup_low_power_mode(ctx);
3655 
3656 	return (error);
3657 } /* ixgbe_if_shutdown */
3658 
3659 /************************************************************************
3660  * ixgbe_suspend
3661  *
3662  *   From D0 to D3
3663  ************************************************************************/
3664 static int
ixgbe_if_suspend(if_ctx_t ctx)3665 ixgbe_if_suspend(if_ctx_t ctx)
3666 {
3667 	int error = 0;
3668 
3669 	INIT_DEBUGOUT("ixgbe_suspend: begin");
3670 
3671 	error = ixgbe_setup_low_power_mode(ctx);
3672 
3673 	return (error);
3674 } /* ixgbe_if_suspend */
3675 
3676 /************************************************************************
3677  * ixgbe_resume
3678  *
3679  *   From D3 to D0
3680  ************************************************************************/
3681 static int
ixgbe_if_resume(if_ctx_t ctx)3682 ixgbe_if_resume(if_ctx_t ctx)
3683 {
3684 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3685 	device_t dev = iflib_get_dev(ctx);
3686 	if_t ifp = iflib_get_ifp(ctx);
3687 	struct ixgbe_hw *hw = &sc->hw;
3688 	u32 wus;
3689 
3690 	INIT_DEBUGOUT("ixgbe_resume: begin");
3691 
3692 	/* Read & clear WUS register */
3693 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3694 	if (wus)
3695 		device_printf(dev, "Woken up by (WUS): %#010x\n",
3696 		    IXGBE_READ_REG(hw, IXGBE_WUS));
3697 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3698 	/* And clear WUFC until next low-power transition */
3699 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3700 
3701 	/*
3702 	 * Required after D3->D0 transition;
3703 	 * will re-advertise all previous advertised speeds
3704 	 */
3705 	if (if_getflags(ifp) & IFF_UP)
3706 		ixgbe_if_init(ctx);
3707 
3708 	return (0);
3709 } /* ixgbe_if_resume */
3710 
3711 /************************************************************************
3712  * ixgbe_if_mtu_set - Ioctl mtu entry point
3713  *
3714  *   Return 0 on success, EINVAL on failure
3715  ************************************************************************/
3716 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)3717 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3718 {
3719 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3720 	int error = 0;
3721 
3722 	IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3723 
3724 	if (mtu > IXGBE_MAX_MTU) {
3725 		error = EINVAL;
3726 	} else {
3727 		sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3728 	}
3729 
3730 	return error;
3731 } /* ixgbe_if_mtu_set */
3732 
3733 /************************************************************************
3734  * ixgbe_if_crcstrip_set
3735  ************************************************************************/
3736 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)3737 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3738 {
3739 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3740 	struct ixgbe_hw *hw = &sc->hw;
3741 	/* crc stripping is set in two places:
3742 	 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3743 	 * IXGBE_RDRXCTL (set by the original driver in
3744 	 *	ixgbe_setup_hw_rsc() called in init_locked.
3745 	 *	We disable the setting when netmap is compiled in).
3746 	 * We update the values here, but also in ixgbe.c because
3747 	 * init_locked sometimes is called outside our control.
3748 	 */
3749 	uint32_t hl, rxc;
3750 
3751 	hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3752 	rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3753 #ifdef NETMAP
3754 	if (netmap_verbose)
3755 		D("%s read  HLREG 0x%x rxc 0x%x",
3756 			onoff ? "enter" : "exit", hl, rxc);
3757 #endif
3758 	/* hw requirements ... */
3759 	rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3760 	rxc |= IXGBE_RDRXCTL_RSCACKC;
3761 	if (onoff && !crcstrip) {
3762 		/* keep the crc. Fast rx */
3763 		hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3764 		rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3765 	} else {
3766 		/* reset default mode */
3767 		hl |= IXGBE_HLREG0_RXCRCSTRP;
3768 		rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3769 	}
3770 #ifdef NETMAP
3771 	if (netmap_verbose)
3772 		D("%s write HLREG 0x%x rxc 0x%x",
3773 			onoff ? "enter" : "exit", hl, rxc);
3774 #endif
3775 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3776 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3777 } /* ixgbe_if_crcstrip_set */
3778 
3779 /*********************************************************************
3780  * ixgbe_if_init - Init entry point
3781  *
3782  *   Used in two ways: It is used by the stack as an init
3783  *   entry point in network interface structure. It is also
3784  *   used by the driver as a hw/sw initialization routine to
3785  *   get to a consistent state.
3786  *
3787  *   Return 0 on success, positive on failure
3788  **********************************************************************/
3789 void
ixgbe_if_init(if_ctx_t ctx)3790 ixgbe_if_init(if_ctx_t ctx)
3791 {
3792 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
3793 	if_t ifp = iflib_get_ifp(ctx);
3794 	device_t dev = iflib_get_dev(ctx);
3795 	struct ixgbe_hw *hw = &sc->hw;
3796 	struct ix_rx_queue *rx_que;
3797 	struct ix_tx_queue *tx_que;
3798 	u32 txdctl, mhadd;
3799 	u32 rxdctl, rxctrl;
3800 	u32 ctrl_ext;
3801 
3802 	int i, j, err;
3803 
3804 	INIT_DEBUGOUT("ixgbe_if_init: begin");
3805 
3806 	/* Queue indices may change with IOV mode */
3807 	ixgbe_align_all_queue_indices(sc);
3808 
3809 	/* reprogram the RAR[0] in case user changed it. */
3810 	ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3811 
3812 	/* Get the latest mac address, User can use a LAA */
3813 	bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3814 	ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3815 	hw->addr_ctrl.rar_used_count = 1;
3816 
3817 	ixgbe_init_hw(hw);
3818 
3819 	ixgbe_initialize_iov(sc);
3820 
3821 	ixgbe_initialize_transmit_units(ctx);
3822 
3823 	/* Setup Multicast table */
3824 	ixgbe_if_multi_set(ctx);
3825 
3826 	/* Determine the correct mbuf pool, based on frame size */
3827 	sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3828 
3829 	/* Configure RX settings */
3830 	ixgbe_initialize_receive_units(ctx);
3831 
3832 	/*
3833 	 * Initialize variable holding task enqueue requests
3834 	 * from MSI-X interrupts
3835 	 */
3836 	sc->task_requests = 0;
3837 
3838 	/* Enable SDP & MSI-X interrupts based on adapter */
3839 	ixgbe_config_gpie(sc);
3840 
3841 	/* Set MTU size */
3842 	if (if_getmtu(ifp) > ETHERMTU) {
3843 		/* aka IXGBE_MAXFRS on 82599 and newer */
3844 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3845 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
3846 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3847 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3848 	}
3849 
3850 	/* Now enable all the queues */
3851 	for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
3852 	    i++, tx_que++) {
3853 		struct tx_ring *txr = &tx_que->txr;
3854 
3855 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3856 		txdctl |= IXGBE_TXDCTL_ENABLE;
3857 		/* Set WTHRESH to 8, burst writeback */
3858 		txdctl |= (8 << 16);
3859 		/*
3860 		 * When the internal queue falls below PTHRESH (32),
3861 		 * start prefetching as long as there are at least
3862 		 * HTHRESH (1) buffers ready. The values are taken
3863 		 * from the Intel linux driver 3.8.21.
3864 		 * Prefetching enables tx line rate even with 1 queue.
3865 		 */
3866 		txdctl |= (32 << 0) | (1 << 8);
3867 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3868 	}
3869 
3870 	for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
3871 	    i++, rx_que++) {
3872 		struct rx_ring *rxr = &rx_que->rxr;
3873 
3874 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3875 		if (hw->mac.type == ixgbe_mac_82598EB) {
3876 			/*
3877 			 * PTHRESH = 21
3878 			 * HTHRESH = 4
3879 			 * WTHRESH = 8
3880 			 */
3881 			rxdctl &= ~0x3FFFFF;
3882 			rxdctl |= 0x080420;
3883 		}
3884 		rxdctl |= IXGBE_RXDCTL_ENABLE;
3885 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3886 		for (j = 0; j < 10; j++) {
3887 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3888 			    IXGBE_RXDCTL_ENABLE)
3889 				break;
3890 			else
3891 				msec_delay(1);
3892 		}
3893 		wmb();
3894 	}
3895 
3896 	/* Enable Receive engine */
3897 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3898 	if (hw->mac.type == ixgbe_mac_82598EB)
3899 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3900 	rxctrl |= IXGBE_RXCTRL_RXEN;
3901 	ixgbe_enable_rx_dma(hw, rxctrl);
3902 
3903 	/* Set up MSI/MSI-X routing */
3904 	if (ixgbe_enable_msix)  {
3905 		ixgbe_configure_ivars(sc);
3906 		/* Set up auto-mask */
3907 		if (hw->mac.type == ixgbe_mac_82598EB)
3908 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3909 		else {
3910 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3911 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3912 		}
3913 	} else {  /* Simple settings for Legacy/MSI */
3914 		ixgbe_set_ivar(sc, 0, 0, 0);
3915 		ixgbe_set_ivar(sc, 0, 0, 1);
3916 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3917 	}
3918 
3919 	ixgbe_init_fdir(sc);
3920 
3921 	/*
3922 	 * Check on any SFP devices that
3923 	 * need to be kick-started
3924 	 */
3925 	if (hw->phy.type == ixgbe_phy_none) {
3926 		err = hw->phy.ops.identify(hw);
3927 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3928 			device_printf(dev,
3929 			    "Unsupported SFP+ module type was detected.\n");
3930 			return;
3931 		}
3932 	}
3933 
3934 	/* Set moderation on the Link interrupt */
3935 	IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3936 
3937 	/* Enable power to the phy. */
3938 	ixgbe_set_phy_power(hw, true);
3939 
3940 	/* Config/Enable Link */
3941 	ixgbe_config_link(ctx);
3942 
3943 	/* Hardware Packet Buffer & Flow Control setup */
3944 	ixgbe_config_delay_values(sc);
3945 
3946 	/* Initialize the FC settings */
3947 	ixgbe_start_hw(hw);
3948 
3949 	/* Set up VLAN support and filter */
3950 	ixgbe_setup_vlan_hw_support(ctx);
3951 
3952 	/* Setup DMA Coalescing */
3953 	ixgbe_config_dmac(sc);
3954 
3955 	/* And now turn on interrupts */
3956 	ixgbe_if_enable_intr(ctx);
3957 
3958 	/* Enable the use of the MBX by the VF's */
3959 	if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3960 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3961 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3962 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3963 	}
3964 
3965 } /* ixgbe_init_locked */
3966 
3967 /************************************************************************
3968  * ixgbe_set_ivar
3969  *
3970  *   Setup the correct IVAR register for a particular MSI-X interrupt
3971  *     (yes this is all very magic and confusing :)
3972  *    - entry is the register array entry
3973  *    - vector is the MSI-X vector for this queue
3974  *    - type is RX/TX/MISC
3975  ************************************************************************/
3976 static void
ixgbe_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)3977 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3978 {
3979 	struct ixgbe_hw *hw = &sc->hw;
3980 	u32 ivar, index;
3981 
3982 	vector |= IXGBE_IVAR_ALLOC_VAL;
3983 
3984 	switch (hw->mac.type) {
3985 	case ixgbe_mac_82598EB:
3986 		if (type == -1)
3987 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3988 		else
3989 			entry += (type * 64);
3990 		index = (entry >> 2) & 0x1F;
3991 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3992 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3993 		ivar |= (vector << (8 * (entry & 0x3)));
3994 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3995 		break;
3996 	case ixgbe_mac_82599EB:
3997 	case ixgbe_mac_X540:
3998 	case ixgbe_mac_X550:
3999 	case ixgbe_mac_X550EM_x:
4000 	case ixgbe_mac_X550EM_a:
4001 	case ixgbe_mac_E610:
4002 		if (type == -1) { /* MISC IVAR */
4003 			index = (entry & 1) * 8;
4004 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4005 			ivar &= ~(0xFF << index);
4006 			ivar |= (vector << index);
4007 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4008 		} else {          /* RX/TX IVARS */
4009 			index = (16 * (entry & 1)) + (8 * type);
4010 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4011 			ivar &= ~(0xFF << index);
4012 			ivar |= (vector << index);
4013 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4014 		}
4015 	default:
4016 		break;
4017 	}
4018 } /* ixgbe_set_ivar */
4019 
4020 /************************************************************************
4021  * ixgbe_configure_ivars
4022  ************************************************************************/
4023 static void
ixgbe_configure_ivars(struct ixgbe_softc * sc)4024 ixgbe_configure_ivars(struct ixgbe_softc *sc)
4025 {
4026 	struct ix_rx_queue *rx_que = sc->rx_queues;
4027 	struct ix_tx_queue *tx_que = sc->tx_queues;
4028 	u32 newitr;
4029 
4030 	if (ixgbe_max_interrupt_rate > 0)
4031 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4032 	else {
4033 		/*
4034 		 * Disable DMA coalescing if interrupt moderation is
4035 		 * disabled.
4036 		 */
4037 		sc->dmac = 0;
4038 		newitr = 0;
4039 	}
4040 
4041 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
4042 		struct rx_ring *rxr = &rx_que->rxr;
4043 
4044 		/* First the RX queue entry */
4045 		ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
4046 
4047 		/* Set an Initial EITR value */
4048 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
4049 	}
4050 	for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
4051 		struct tx_ring *txr = &tx_que->txr;
4052 
4053 		/* ... and the TX */
4054 		ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
4055 	}
4056 	/* For the Link interrupt */
4057 	ixgbe_set_ivar(sc, 1, sc->vector, -1);
4058 } /* ixgbe_configure_ivars */
4059 
4060 /************************************************************************
4061  * ixgbe_config_gpie
4062  ************************************************************************/
4063 static void
ixgbe_config_gpie(struct ixgbe_softc * sc)4064 ixgbe_config_gpie(struct ixgbe_softc *sc)
4065 {
4066 	struct ixgbe_hw *hw = &sc->hw;
4067 	u32 gpie;
4068 
4069 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4070 
4071 	if (sc->intr_type == IFLIB_INTR_MSIX) {
4072 		/* Enable Enhanced MSI-X mode */
4073 		gpie |= IXGBE_GPIE_MSIX_MODE |
4074 		    IXGBE_GPIE_EIAME |
4075 		    IXGBE_GPIE_PBA_SUPPORT |
4076 		    IXGBE_GPIE_OCD;
4077 	}
4078 
4079 	/* Fan Failure Interrupt */
4080 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4081 		gpie |= IXGBE_SDP1_GPIEN;
4082 
4083 	/* Thermal Sensor Interrupt */
4084 	if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4085 		gpie |= IXGBE_SDP0_GPIEN_X540;
4086 
4087 	/* Link detection */
4088 	switch (hw->mac.type) {
4089 	case ixgbe_mac_82599EB:
4090 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4091 		break;
4092 	case ixgbe_mac_X550EM_x:
4093 	case ixgbe_mac_X550EM_a:
4094 		gpie |= IXGBE_SDP0_GPIEN_X540;
4095 		break;
4096 	default:
4097 		break;
4098 	}
4099 
4100 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4101 
4102 } /* ixgbe_config_gpie */
4103 
4104 /************************************************************************
4105  * ixgbe_config_delay_values
4106  *
4107  *   Requires sc->max_frame_size to be set.
4108  ************************************************************************/
4109 static void
ixgbe_config_delay_values(struct ixgbe_softc * sc)4110 ixgbe_config_delay_values(struct ixgbe_softc *sc)
4111 {
4112 	struct ixgbe_hw *hw = &sc->hw;
4113 	u32 rxpb, frame, size, tmp;
4114 
4115 	frame = sc->max_frame_size;
4116 
4117 	/* Calculate High Water */
4118 	switch (hw->mac.type) {
4119 	case ixgbe_mac_X540:
4120 	case ixgbe_mac_X550:
4121 	case ixgbe_mac_X550EM_x:
4122 	case ixgbe_mac_X550EM_a:
4123 		tmp = IXGBE_DV_X540(frame, frame);
4124 		break;
4125 	default:
4126 		tmp = IXGBE_DV(frame, frame);
4127 		break;
4128 	}
4129 	size = IXGBE_BT2KB(tmp);
4130 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4131 	hw->fc.high_water[0] = rxpb - size;
4132 
4133 	/* Now calculate Low Water */
4134 	switch (hw->mac.type) {
4135 	case ixgbe_mac_X540:
4136 	case ixgbe_mac_X550:
4137 	case ixgbe_mac_X550EM_x:
4138 	case ixgbe_mac_X550EM_a:
4139 		tmp = IXGBE_LOW_DV_X540(frame);
4140 		break;
4141 	default:
4142 		tmp = IXGBE_LOW_DV(frame);
4143 		break;
4144 	}
4145 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4146 
4147 	hw->fc.pause_time = IXGBE_FC_PAUSE;
4148 	hw->fc.send_xon = true;
4149 } /* ixgbe_config_delay_values */
4150 
4151 /************************************************************************
4152  * ixgbe_set_multi - Multicast Update
4153  *
4154  *   Called whenever multicast address list is updated.
4155  ************************************************************************/
4156 static u_int
ixgbe_mc_filter_apply(void * arg,struct sockaddr_dl * sdl,u_int idx)4157 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
4158 {
4159 	struct ixgbe_softc *sc = arg;
4160 	struct ixgbe_mc_addr *mta = sc->mta;
4161 
4162 	if (idx == MAX_NUM_MULTICAST_ADDRESSES)
4163 		return (0);
4164 	bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4165 	mta[idx].vmdq = sc->pool;
4166 
4167 	return (1);
4168 } /* ixgbe_mc_filter_apply */
4169 
4170 static void
ixgbe_if_multi_set(if_ctx_t ctx)4171 ixgbe_if_multi_set(if_ctx_t ctx)
4172 {
4173 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4174 	struct ixgbe_mc_addr *mta;
4175 	if_t ifp = iflib_get_ifp(ctx);
4176 	u8 *update_ptr;
4177 	u32 fctrl;
4178 	u_int mcnt;
4179 
4180 	IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
4181 
4182 	mta = sc->mta;
4183 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4184 
4185 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
4186 	    sc);
4187 
4188 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4189 		update_ptr = (u8 *)mta;
4190 		ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
4191 		    ixgbe_mc_array_itr, true);
4192 	}
4193 
4194 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
4195 
4196 	if (if_getflags(ifp) & IFF_PROMISC)
4197 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4198 	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
4199 	    if_getflags(ifp) & IFF_ALLMULTI) {
4200 		fctrl |= IXGBE_FCTRL_MPE;
4201 		fctrl &= ~IXGBE_FCTRL_UPE;
4202 	} else
4203 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4204 
4205 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
4206 } /* ixgbe_if_multi_set */
4207 
4208 /************************************************************************
4209  * ixgbe_mc_array_itr
4210  *
4211  *   An iterator function needed by the multicast shared code.
4212  *   It feeds the shared code routine the addresses in the
4213  *   array of ixgbe_set_multi() one by one.
4214  ************************************************************************/
4215 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)4216 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4217 {
4218 	struct ixgbe_mc_addr *mta;
4219 
4220 	mta = (struct ixgbe_mc_addr *)*update_ptr;
4221 	*vmdq = mta->vmdq;
4222 
4223 	*update_ptr = (u8*)(mta + 1);
4224 
4225 	return (mta->addr);
4226 } /* ixgbe_mc_array_itr */
4227 
4228 /************************************************************************
4229  * ixgbe_local_timer - Timer routine
4230  *
4231  *   Checks for link status, updates statistics,
4232  *   and runs the watchdog check.
4233  ************************************************************************/
4234 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)4235 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
4236 {
4237 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4238 
4239 	if (qid != 0)
4240 		return;
4241 
4242 	/* Check for pluggable optics */
4243 	if (sc->sfp_probe)
4244 		if (!ixgbe_sfp_probe(ctx))
4245 			return; /* Nothing to do */
4246 
4247 	ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
4248 
4249 	/* Fire off the adminq task */
4250 	iflib_admin_intr_deferred(ctx);
4251 
4252 } /* ixgbe_if_timer */
4253 
4254 /************************************************************************
4255  * ixgbe_fw_mode_timer - FW mode timer routine
4256  ************************************************************************/
4257 static void
ixgbe_fw_mode_timer(void * arg)4258 ixgbe_fw_mode_timer(void *arg)
4259 {
4260 	struct ixgbe_softc *sc = arg;
4261 	struct ixgbe_hw *hw = &sc->hw;
4262 
4263 	if (ixgbe_fw_recovery_mode(hw)) {
4264 		if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
4265 			/* Firmware error detected, entering recovery mode */
4266 			device_printf(sc->dev,
4267 			    "Firmware recovery mode detected. Limiting"
4268 			    " functionality. Refer to the Intel(R) Ethernet"
4269 			    " Adapters and Devices User Guide for details on"
4270 			    " firmware recovery mode.\n");
4271 
4272 			if (hw->adapter_stopped == FALSE)
4273 				ixgbe_if_stop(sc->ctx);
4274 		}
4275 	} else
4276 		atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
4277 
4278 
4279 	callout_reset(&sc->fw_mode_timer, hz,
4280 	    ixgbe_fw_mode_timer, sc);
4281 } /* ixgbe_fw_mode_timer */
4282 
4283 /************************************************************************
4284  * ixgbe_sfp_probe
4285  *
4286  *   Determine if a port had optics inserted.
4287  ************************************************************************/
4288 static bool
ixgbe_sfp_probe(if_ctx_t ctx)4289 ixgbe_sfp_probe(if_ctx_t ctx)
4290 {
4291 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4292 	struct ixgbe_hw *hw = &sc->hw;
4293 	device_t dev = iflib_get_dev(ctx);
4294 	bool result = false;
4295 
4296 	if ((hw->phy.type == ixgbe_phy_nl) &&
4297 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4298 		s32 ret = hw->phy.ops.identify_sfp(hw);
4299 		if (ret)
4300 			goto out;
4301 		ret = hw->phy.ops.reset(hw);
4302 		sc->sfp_probe = false;
4303 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4304 			device_printf(dev,
4305 			    "Unsupported SFP+ module detected!");
4306 			device_printf(dev,
4307 			    "Reload driver with supported module.\n");
4308 			goto out;
4309 		} else
4310 			device_printf(dev, "SFP+ module detected!\n");
4311 		/* We now have supported optics */
4312 		result = true;
4313 	}
4314 out:
4315 
4316 	return (result);
4317 } /* ixgbe_sfp_probe */
4318 
4319 /************************************************************************
4320  * ixgbe_handle_mod - Tasklet for SFP module interrupts
4321  ************************************************************************/
4322 static void
ixgbe_handle_mod(void * context)4323 ixgbe_handle_mod(void *context)
4324 {
4325 	if_ctx_t ctx = context;
4326 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4327 	struct ixgbe_hw *hw = &sc->hw;
4328 	device_t dev = iflib_get_dev(ctx);
4329 	u32 err, cage_full = 0;
4330 
4331 	if (sc->hw.need_crosstalk_fix) {
4332 		switch (hw->mac.type) {
4333 		case ixgbe_mac_82599EB:
4334 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4335 			    IXGBE_ESDP_SDP2;
4336 			break;
4337 		case ixgbe_mac_X550EM_x:
4338 		case ixgbe_mac_X550EM_a:
4339 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4340 			    IXGBE_ESDP_SDP0;
4341 			break;
4342 		default:
4343 			break;
4344 		}
4345 
4346 		if (!cage_full)
4347 			goto handle_mod_out;
4348 	}
4349 
4350 	err = hw->phy.ops.identify_sfp(hw);
4351 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4352 		device_printf(dev,
4353 		    "Unsupported SFP+ module type was detected.\n");
4354 		goto handle_mod_out;
4355 	}
4356 
4357 	if (hw->mac.type == ixgbe_mac_82598EB)
4358 		err = hw->phy.ops.reset(hw);
4359 	else
4360 		err = hw->mac.ops.setup_sfp(hw);
4361 
4362 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4363 		device_printf(dev,
4364 		    "Setup failure - unsupported SFP+ module type.\n");
4365 		goto handle_mod_out;
4366 	}
4367 	sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4368 	return;
4369 
4370 handle_mod_out:
4371 	sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
4372 } /* ixgbe_handle_mod */
4373 
4374 
4375 /************************************************************************
4376  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4377  ************************************************************************/
4378 static void
ixgbe_handle_msf(void * context)4379 ixgbe_handle_msf(void *context)
4380 {
4381 	if_ctx_t ctx = context;
4382 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4383 	struct ixgbe_hw *hw = &sc->hw;
4384 	u32 autoneg;
4385 	bool negotiate;
4386 
4387 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4388 	sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
4389 
4390 	autoneg = hw->phy.autoneg_advertised;
4391 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4392 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4393 	if (hw->mac.ops.setup_link)
4394 		hw->mac.ops.setup_link(hw, autoneg, true);
4395 
4396 	/* Adjust media types shown in ifconfig */
4397 	ifmedia_removeall(sc->media);
4398 	ixgbe_add_media_types(sc->ctx);
4399 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
4400 } /* ixgbe_handle_msf */
4401 
4402 /************************************************************************
4403  * ixgbe_handle_phy - Tasklet for external PHY interrupts
4404  ************************************************************************/
4405 static void
ixgbe_handle_phy(void * context)4406 ixgbe_handle_phy(void *context)
4407 {
4408 	if_ctx_t ctx = context;
4409 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4410 	struct ixgbe_hw *hw = &sc->hw;
4411 	int error;
4412 
4413 	error = hw->phy.ops.handle_lasi(hw);
4414 	if (error == IXGBE_ERR_OVERTEMP)
4415 		device_printf(sc->dev,
4416 		    "CRITICAL: EXTERNAL PHY OVER TEMP!!"
4417 		    "  PHY will downshift to lower power state!\n");
4418 	else if (error)
4419 		device_printf(sc->dev,
4420 		    "Error handling LASI interrupt: %d\n", error);
4421 } /* ixgbe_handle_phy */
4422 
4423 /************************************************************************
4424  * ixgbe_enable_lse - enable link status events
4425  *
4426  *   Sets mask and enables link status events
4427  ************************************************************************/
ixgbe_enable_lse(struct ixgbe_softc * sc)4428 s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
4429 {
4430 	s32 error;
4431 
4432 	u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
4433 			   IXGBE_ACI_LINK_EVENT_MEDIA_NA |
4434 			   IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
4435 			   IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
4436 
4437 	error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
4438 	if (error)
4439 		return (error);
4440 
4441 	sc->lse_mask = mask;
4442 	return (IXGBE_SUCCESS);
4443 } /* ixgbe_enable_lse */
4444 
4445 /************************************************************************
4446  * ixgbe_disable_lse - disable link status events
4447  ************************************************************************/
ixgbe_disable_lse(struct ixgbe_softc * sc)4448 s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
4449 {
4450 	s32 error;
4451 
4452 	error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
4453 	if (error)
4454 		return (error);
4455 
4456 	sc->lse_mask = 0;
4457 	return (IXGBE_SUCCESS);
4458 } /* ixgbe_disable_lse */
4459 
4460 /************************************************************************
4461  * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
4462  ************************************************************************/
4463 static void
ixgbe_handle_fw_event(void * context)4464 ixgbe_handle_fw_event(void *context)
4465 {
4466 	if_ctx_t ctx = context;
4467 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4468 	struct ixgbe_hw *hw = &sc->hw;
4469 	struct ixgbe_aci_event event;
4470 	bool pending = false;
4471 	s32 error;
4472 
4473 	event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
4474 	event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
4475 	if (!event.msg_buf) {
4476 		device_printf(sc->dev, "Can not allocate buffer for "
4477 		    "event message\n");
4478 		return;
4479 	}
4480 
4481 	do {
4482 		error = ixgbe_aci_get_event(hw, &event, &pending);
4483 		if (error) {
4484 			device_printf(sc->dev, "Error getting event from "
4485 			    "FW:%d\n", error);
4486 			break;
4487 		}
4488 
4489 		switch (le16toh(event.desc.opcode)) {
4490 		case ixgbe_aci_opc_get_link_status:
4491 			sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
4492 			break;
4493 
4494 		case ixgbe_aci_opc_temp_tca_event:
4495 			if (hw->adapter_stopped == FALSE)
4496 				ixgbe_if_stop(ctx);
4497 			device_printf(sc->dev,
4498 			    "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
4499 			device_printf(sc->dev, "System shutdown required!\n");
4500 			break;
4501 
4502 		default:
4503 			device_printf(sc->dev,
4504 			    "Unknown FW event captured, opcode=0x%04X\n",
4505 			    le16toh(event.desc.opcode));
4506 			break;
4507 		}
4508 	} while (pending);
4509 
4510 	free(event.msg_buf, M_IXGBE);
4511 } /* ixgbe_handle_fw_event */
4512 
4513 /************************************************************************
4514  * ixgbe_if_stop - Stop the hardware
4515  *
4516  *   Disables all traffic on the adapter by issuing a
4517  *   global reset on the MAC and deallocates TX/RX buffers.
4518  ************************************************************************/
4519 static void
ixgbe_if_stop(if_ctx_t ctx)4520 ixgbe_if_stop(if_ctx_t ctx)
4521 {
4522 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4523 	struct ixgbe_hw *hw = &sc->hw;
4524 
4525 	INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
4526 
4527 	ixgbe_reset_hw(hw);
4528 	hw->adapter_stopped = false;
4529 	ixgbe_stop_adapter(hw);
4530 	if (hw->mac.type == ixgbe_mac_82599EB)
4531 		ixgbe_stop_mac_link_on_d3_82599(hw);
4532 	/* Turn off the laser - noop with no optics */
4533 	ixgbe_disable_tx_laser(hw);
4534 
4535 	/* Update the stack */
4536 	sc->link_up = false;
4537 	ixgbe_if_update_admin_status(ctx);
4538 
4539 	/* reprogram the RAR[0] in case user changed it. */
4540 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
4541 
4542 	return;
4543 } /* ixgbe_if_stop */
4544 
4545 /************************************************************************
4546  * ixgbe_link_speed_to_str - Convert link speed to string
4547  *
4548  *   Helper function to convert link speed constants to human-readable
4549  *   string representations in conventional Gbps or Mbps.
4550  ************************************************************************/
4551 static const char *
ixgbe_link_speed_to_str(u32 link_speed)4552 ixgbe_link_speed_to_str(u32 link_speed)
4553 {
4554     switch (link_speed) {
4555     case IXGBE_LINK_SPEED_10GB_FULL:
4556         return "10 Gbps";
4557     case IXGBE_LINK_SPEED_5GB_FULL:
4558         return "5 Gbps";
4559     case IXGBE_LINK_SPEED_2_5GB_FULL:
4560         return "2.5 Gbps";
4561     case IXGBE_LINK_SPEED_1GB_FULL:
4562         return "1 Gbps";
4563     case IXGBE_LINK_SPEED_100_FULL:
4564         return "100 Mbps";
4565     case IXGBE_LINK_SPEED_10_FULL:
4566         return "10 Mbps";
4567     default:
4568         return "Unknown";
4569     }
4570 } /* ixgbe_link_speed_to_str */
4571 
4572 /************************************************************************
4573  * ixgbe_update_link_status - Update OS on link state
4574  *
4575  * Note: Only updates the OS on the cached link state.
4576  *       The real check of the hardware only happens with
4577  *       a link interrupt.
4578  ************************************************************************/
4579 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)4580 ixgbe_if_update_admin_status(if_ctx_t ctx)
4581 {
4582 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4583 	device_t dev = iflib_get_dev(ctx);
4584 
4585 	if (sc->link_up) {
4586 		if (sc->link_active == false) {
4587 			if (bootverbose)
4588 				device_printf(dev,
4589 				    "Link is up %s Full Duplex\n",
4590 				    ixgbe_link_speed_to_str(sc->link_speed));
4591 			sc->link_active = true;
4592 			/* Update any Flow Control changes */
4593 			ixgbe_fc_enable(&sc->hw);
4594 			/* Update DMA coalescing config */
4595 			ixgbe_config_dmac(sc);
4596 			iflib_link_state_change(ctx, LINK_STATE_UP,
4597 			    ixgbe_link_speed_to_baudrate(sc->link_speed));
4598 
4599 			if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4600 				ixgbe_ping_all_vfs(sc);
4601 		}
4602 	} else { /* Link down */
4603 		if (sc->link_active == true) {
4604 			if (bootverbose)
4605 				device_printf(dev, "Link is Down\n");
4606 			iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
4607 			sc->link_active = false;
4608 			if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4609 				ixgbe_ping_all_vfs(sc);
4610 		}
4611 	}
4612 
4613 	/* Handle task requests from msix_link() */
4614 	if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
4615 		ixgbe_handle_fw_event(ctx);
4616 	if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
4617 		ixgbe_handle_mod(ctx);
4618 	if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
4619 		ixgbe_handle_msf(ctx);
4620 	if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
4621 		ixgbe_handle_mbx(ctx);
4622 	if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
4623 		ixgbe_reinit_fdir(ctx);
4624 	if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
4625 		ixgbe_handle_phy(ctx);
4626 	sc->task_requests = 0;
4627 
4628 	ixgbe_update_stats_counters(sc);
4629 } /* ixgbe_if_update_admin_status */
4630 
4631 /************************************************************************
4632  * ixgbe_config_dmac - Configure DMA Coalescing
4633  ************************************************************************/
4634 static void
ixgbe_config_dmac(struct ixgbe_softc * sc)4635 ixgbe_config_dmac(struct ixgbe_softc *sc)
4636 {
4637 	struct ixgbe_hw *hw = &sc->hw;
4638 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4639 
4640 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4641 		return;
4642 
4643 	if (dcfg->watchdog_timer ^ sc->dmac ||
4644 	    dcfg->link_speed ^ sc->link_speed) {
4645 		dcfg->watchdog_timer = sc->dmac;
4646 		dcfg->fcoe_en = false;
4647 		dcfg->link_speed = sc->link_speed;
4648 		dcfg->num_tcs = 1;
4649 
4650 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4651 		    dcfg->watchdog_timer, dcfg->link_speed);
4652 
4653 		hw->mac.ops.dmac_config(hw);
4654 	}
4655 } /* ixgbe_config_dmac */
4656 
4657 /************************************************************************
4658  * ixgbe_if_enable_intr
4659  ************************************************************************/
4660 void
ixgbe_if_enable_intr(if_ctx_t ctx)4661 ixgbe_if_enable_intr(if_ctx_t ctx)
4662 {
4663 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4664 	struct ixgbe_hw *hw = &sc->hw;
4665 	struct ix_rx_queue *que = sc->rx_queues;
4666 	u32 mask, fwsm;
4667 
4668 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4669 
4670 	switch (sc->hw.mac.type) {
4671 	case ixgbe_mac_82599EB:
4672 		mask |= IXGBE_EIMS_ECC;
4673 		/* Temperature sensor on some scs */
4674 		mask |= IXGBE_EIMS_GPI_SDP0;
4675 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
4676 		mask |= IXGBE_EIMS_GPI_SDP1;
4677 		mask |= IXGBE_EIMS_GPI_SDP2;
4678 		break;
4679 	case ixgbe_mac_X540:
4680 		/* Detect if Thermal Sensor is enabled */
4681 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4682 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
4683 			mask |= IXGBE_EIMS_TS;
4684 		mask |= IXGBE_EIMS_ECC;
4685 		break;
4686 	case ixgbe_mac_X550:
4687 		/* MAC thermal sensor is automatically enabled */
4688 		mask |= IXGBE_EIMS_TS;
4689 		mask |= IXGBE_EIMS_ECC;
4690 		break;
4691 	case ixgbe_mac_X550EM_x:
4692 	case ixgbe_mac_X550EM_a:
4693 		/* Some devices use SDP0 for important information */
4694 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4695 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4696 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4697 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4698 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4699 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4700 			mask |= IXGBE_EICR_GPI_SDP0_X540;
4701 		mask |= IXGBE_EIMS_ECC;
4702 		break;
4703 	case ixgbe_mac_E610:
4704 		mask |= IXGBE_EIMS_FW_EVENT;
4705 		break;
4706 	default:
4707 		break;
4708 	}
4709 
4710 	/* Enable Fan Failure detection */
4711 	if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4712 		mask |= IXGBE_EIMS_GPI_SDP1;
4713 	/* Enable SR-IOV */
4714 	if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4715 		mask |= IXGBE_EIMS_MAILBOX;
4716 	/* Enable Flow Director */
4717 	if (sc->feat_en & IXGBE_FEATURE_FDIR)
4718 		mask |= IXGBE_EIMS_FLOW_DIR;
4719 
4720 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4721 
4722 	/* With MSI-X we use auto clear */
4723 	if (sc->intr_type == IFLIB_INTR_MSIX) {
4724 		mask = IXGBE_EIMS_ENABLE_MASK;
4725 		/* Don't autoclear Link */
4726 		mask &= ~IXGBE_EIMS_OTHER;
4727 		mask &= ~IXGBE_EIMS_LSC;
4728 		mask &= ~IXGBE_EIMS_FW_EVENT;
4729 		if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
4730 			mask &= ~IXGBE_EIMS_MAILBOX;
4731 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4732 	}
4733 
4734 	/*
4735 	 * Now enable all queues, this is done separately to
4736 	 * allow for handling the extended (beyond 32) MSI-X
4737 	 * vectors that can be used by 82599
4738 	 */
4739 	for (int i = 0; i < sc->num_rx_queues; i++, que++)
4740 		ixgbe_enable_queue(sc, que->msix);
4741 
4742 	IXGBE_WRITE_FLUSH(hw);
4743 
4744 } /* ixgbe_if_enable_intr */
4745 
4746 /************************************************************************
4747  * ixgbe_if_disable_intr
4748  ************************************************************************/
4749 static void
ixgbe_if_disable_intr(if_ctx_t ctx)4750 ixgbe_if_disable_intr(if_ctx_t ctx)
4751 {
4752 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4753 
4754 	if (sc->intr_type == IFLIB_INTR_MSIX)
4755 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
4756 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
4757 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
4758 	} else {
4759 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
4760 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
4761 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
4762 	}
4763 	IXGBE_WRITE_FLUSH(&sc->hw);
4764 
4765 } /* ixgbe_if_disable_intr */
4766 
4767 /************************************************************************
4768  * ixgbe_link_intr_enable
4769  ************************************************************************/
4770 static void
ixgbe_link_intr_enable(if_ctx_t ctx)4771 ixgbe_link_intr_enable(if_ctx_t ctx)
4772 {
4773 	struct ixgbe_hw *hw =
4774 	    &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
4775 
4776 	/* Re-enable other interrupts */
4777 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
4778 } /* ixgbe_link_intr_enable */
4779 
4780 /************************************************************************
4781  * ixgbe_if_rx_queue_intr_enable
4782  ************************************************************************/
4783 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)4784 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
4785 {
4786 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4787 	struct ix_rx_queue *que = &sc->rx_queues[rxqid];
4788 
4789 	ixgbe_enable_queue(sc, que->msix);
4790 
4791 	return (0);
4792 } /* ixgbe_if_rx_queue_intr_enable */
4793 
4794 /************************************************************************
4795  * ixgbe_enable_queue
4796  ************************************************************************/
4797 static void
ixgbe_enable_queue(struct ixgbe_softc * sc,u32 vector)4798 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
4799 {
4800 	struct ixgbe_hw *hw = &sc->hw;
4801 	u64 queue = 1ULL << vector;
4802 	u32 mask;
4803 
4804 	if (hw->mac.type == ixgbe_mac_82598EB) {
4805 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4806 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4807 	} else {
4808 		mask = (queue & 0xFFFFFFFF);
4809 		if (mask)
4810 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4811 		mask = (queue >> 32);
4812 		if (mask)
4813 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4814 	}
4815 } /* ixgbe_enable_queue */
4816 
4817 /************************************************************************
4818  * ixgbe_disable_queue
4819  ************************************************************************/
4820 static void
ixgbe_disable_queue(struct ixgbe_softc * sc,u32 vector)4821 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
4822 {
4823 	struct ixgbe_hw *hw = &sc->hw;
4824 	u64 queue = 1ULL << vector;
4825 	u32 mask;
4826 
4827 	if (hw->mac.type == ixgbe_mac_82598EB) {
4828 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4829 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
4830 	} else {
4831 		mask = (queue & 0xFFFFFFFF);
4832 		if (mask)
4833 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
4834 		mask = (queue >> 32);
4835 		if (mask)
4836 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
4837 	}
4838 } /* ixgbe_disable_queue */
4839 
4840 /************************************************************************
4841  * ixgbe_intr - Legacy Interrupt Service Routine
4842  ************************************************************************/
4843 int
ixgbe_intr(void * arg)4844 ixgbe_intr(void *arg)
4845 {
4846 	struct ixgbe_softc *sc = arg;
4847 	struct ix_rx_queue *que = sc->rx_queues;
4848 	struct ixgbe_hw *hw = &sc->hw;
4849 	if_ctx_t ctx = sc->ctx;
4850 	u32 eicr, eicr_mask;
4851 
4852 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4853 
4854 	++que->irqs;
4855 	if (eicr == 0) {
4856 		ixgbe_if_enable_intr(ctx);
4857 		return (FILTER_HANDLED);
4858 	}
4859 
4860 	/* Check for fan failure */
4861 	if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4862 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
4863 		device_printf(sc->dev,
4864 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4865 		IXGBE_WRITE_REG(hw, IXGBE_EIMS,
4866 		    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4867 	}
4868 
4869 	/* Link status change */
4870 	if (eicr & IXGBE_EICR_LSC) {
4871 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4872 		iflib_admin_intr_deferred(ctx);
4873 	}
4874 
4875 	if (ixgbe_is_sfp(hw)) {
4876 		/* Pluggable optics-related interrupt */
4877 		if (hw->mac.type >= ixgbe_mac_X540)
4878 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4879 		else
4880 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4881 
4882 		if (eicr & eicr_mask) {
4883 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4884 			sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4885 		}
4886 
4887 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
4888 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4889 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
4890 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4891 			sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4892 		}
4893 	}
4894 
4895 	/* External PHY interrupt */
4896 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4897 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4898 		sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4899 	}
4900 
4901 	return (FILTER_SCHEDULE_THREAD);
4902 } /* ixgbe_intr */
4903 
4904 /************************************************************************
4905  * ixgbe_free_pci_resources
4906  ************************************************************************/
4907 static void
ixgbe_free_pci_resources(if_ctx_t ctx)4908 ixgbe_free_pci_resources(if_ctx_t ctx)
4909 {
4910 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
4911 	struct ix_rx_queue *que = sc->rx_queues;
4912 	device_t dev = iflib_get_dev(ctx);
4913 
4914 	/* Release all MSI-X queue resources */
4915 	if (sc->intr_type == IFLIB_INTR_MSIX)
4916 		iflib_irq_free(ctx, &sc->irq);
4917 
4918 	if (que != NULL) {
4919 		for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4920 			iflib_irq_free(ctx, &que->que_irq);
4921 		}
4922 	}
4923 
4924 	if (sc->pci_mem != NULL)
4925 		bus_release_resource(dev, SYS_RES_MEMORY,
4926 		    rman_get_rid(sc->pci_mem), sc->pci_mem);
4927 } /* ixgbe_free_pci_resources */
4928 
4929 /************************************************************************
4930  * ixgbe_sysctl_flowcntl
4931  *
4932  *   SYSCTL wrapper around setting Flow Control
4933  ************************************************************************/
4934 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)4935 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4936 {
4937 	struct ixgbe_softc *sc;
4938 	int error, fc;
4939 
4940 	sc = (struct ixgbe_softc *)arg1;
4941 	fc = sc->hw.fc.requested_mode;
4942 
4943 	error = sysctl_handle_int(oidp, &fc, 0, req);
4944 	if ((error) || (req->newptr == NULL))
4945 		return (error);
4946 
4947 	/* Don't bother if it's not changed */
4948 	if (fc == sc->hw.fc.current_mode)
4949 		return (0);
4950 
4951 	return ixgbe_set_flowcntl(sc, fc);
4952 } /* ixgbe_sysctl_flowcntl */
4953 
4954 /************************************************************************
4955  * ixgbe_set_flowcntl - Set flow control
4956  *
4957  *   Flow control values:
4958  *     0 - off
4959  *     1 - rx pause
4960  *     2 - tx pause
4961  *     3 - full
4962  ************************************************************************/
4963 static int
ixgbe_set_flowcntl(struct ixgbe_softc * sc,int fc)4964 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4965 {
4966 	switch (fc) {
4967 	case ixgbe_fc_rx_pause:
4968 	case ixgbe_fc_tx_pause:
4969 	case ixgbe_fc_full:
4970 		if (sc->num_rx_queues > 1)
4971 			ixgbe_disable_rx_drop(sc);
4972 		break;
4973 	case ixgbe_fc_none:
4974 		if (sc->num_rx_queues > 1)
4975 			ixgbe_enable_rx_drop(sc);
4976 		break;
4977 	default:
4978 		return (EINVAL);
4979 	}
4980 
4981 	sc->hw.fc.requested_mode = fc;
4982 
4983 	/* Don't autoneg if forcing a value */
4984 	sc->hw.fc.disable_fc_autoneg = true;
4985 	ixgbe_fc_enable(&sc->hw);
4986 
4987 	return (0);
4988 } /* ixgbe_set_flowcntl */
4989 
4990 /************************************************************************
4991  * ixgbe_enable_rx_drop
4992  *
4993  *   Enable the hardware to drop packets when the buffer is
4994  *   full. This is useful with multiqueue, so that no single
4995  *   queue being full stalls the entire RX engine. We only
4996  *   enable this when Multiqueue is enabled AND Flow Control
4997  *   is disabled.
4998  ************************************************************************/
4999 static void
ixgbe_enable_rx_drop(struct ixgbe_softc * sc)5000 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
5001 {
5002 	struct ixgbe_hw *hw = &sc->hw;
5003 	struct rx_ring *rxr;
5004 	u32 srrctl;
5005 
5006 	for (int i = 0; i < sc->num_rx_queues; i++) {
5007 		rxr = &sc->rx_queues[i].rxr;
5008 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5009 		srrctl |= IXGBE_SRRCTL_DROP_EN;
5010 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5011 	}
5012 
5013 	/* enable drop for each vf */
5014 	for (int i = 0; i < sc->num_vfs; i++) {
5015 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
5016 		    (IXGBE_QDE_WRITE |
5017 		    (i << IXGBE_QDE_IDX_SHIFT) |
5018 		    IXGBE_QDE_ENABLE));
5019 	}
5020 } /* ixgbe_enable_rx_drop */
5021 
5022 /************************************************************************
5023  * ixgbe_disable_rx_drop
5024  ************************************************************************/
5025 static void
ixgbe_disable_rx_drop(struct ixgbe_softc * sc)5026 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
5027 {
5028 	struct ixgbe_hw *hw = &sc->hw;
5029 	struct rx_ring *rxr;
5030 	u32 srrctl;
5031 
5032 	for (int i = 0; i < sc->num_rx_queues; i++) {
5033 		rxr = &sc->rx_queues[i].rxr;
5034 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5035 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5036 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5037 	}
5038 
5039 	/* disable drop for each vf */
5040 	for (int i = 0; i < sc->num_vfs; i++) {
5041 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
5042 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5043 	}
5044 } /* ixgbe_disable_rx_drop */
5045 
5046 /************************************************************************
5047  * ixgbe_sysctl_advertise
5048  *
5049  *   SYSCTL wrapper around setting advertised speed
5050  ************************************************************************/
5051 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)5052 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
5053 {
5054 	struct ixgbe_softc *sc;
5055 	int error, advertise;
5056 
5057 	sc = (struct ixgbe_softc *)arg1;
5058 	if (atomic_load_acq_int(&sc->recovery_mode))
5059 		return (EPERM);
5060 
5061 	advertise = sc->advertise;
5062 
5063 	error = sysctl_handle_int(oidp, &advertise, 0, req);
5064 	if ((error) || (req->newptr == NULL))
5065 		return (error);
5066 
5067 	return ixgbe_set_advertise(sc, advertise);
5068 } /* ixgbe_sysctl_advertise */
5069 
5070 /************************************************************************
5071  * ixgbe_set_advertise - Control advertised link speed
5072  *
5073  *   Flags:
5074  *     0x1  - advertise 100 Mb
5075  *     0x2  - advertise 1G
5076  *     0x4  - advertise 10G
5077  *     0x8  - advertise 10 Mb (yes, Mb)
5078  *     0x10 - advertise 2.5G (disabled by default)
5079  *     0x20 - advertise 5G (disabled by default)
5080  *
5081  ************************************************************************/
5082 static int
ixgbe_set_advertise(struct ixgbe_softc * sc,int advertise)5083 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
5084 {
5085 	device_t dev = iflib_get_dev(sc->ctx);
5086 	struct ixgbe_hw *hw;
5087 	ixgbe_link_speed speed = 0;
5088 	ixgbe_link_speed link_caps = 0;
5089 	s32 err = IXGBE_NOT_IMPLEMENTED;
5090 	bool negotiate = false;
5091 
5092 	/* Checks to validate new value */
5093 	if (sc->advertise == advertise) /* no change */
5094 		return (0);
5095 
5096 	hw = &sc->hw;
5097 
5098 	/* No speed changes for backplane media */
5099 	if (hw->phy.media_type == ixgbe_media_type_backplane)
5100 		return (ENODEV);
5101 
5102 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5103 	      (hw->phy.multispeed_fiber))) {
5104 		device_printf(dev,
5105 		    "Advertised speed can only be set on copper or multispeed"
5106 		    " fiber media types.\n");
5107 		return (EINVAL);
5108 	}
5109 
5110 	if (advertise < 0x1 || advertise > 0x3F) {
5111 		device_printf(dev,
5112 		    "Invalid advertised speed; valid modes are 0x1 through"
5113 		    " 0x3F\n");
5114 		return (EINVAL);
5115 	}
5116 
5117 	if (hw->mac.ops.get_link_capabilities) {
5118 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5119 		    &negotiate);
5120 		if (err != IXGBE_SUCCESS) {
5121 			device_printf(dev,
5122 			    "Unable to determine supported advertise speeds"
5123 			    "\n");
5124 			return (ENODEV);
5125 		}
5126 	}
5127 
5128 	/* Set new value and report new advertised mode */
5129 	if (advertise & 0x1) {
5130 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5131 			device_printf(dev,
5132 			    "Interface does not support 100Mb advertised"
5133 			    " speed\n");
5134 			return (EINVAL);
5135 		}
5136 		speed |= IXGBE_LINK_SPEED_100_FULL;
5137 	}
5138 	if (advertise & 0x2) {
5139 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5140 			device_printf(dev,
5141 			    "Interface does not support 1Gb advertised speed"
5142 			    "\n");
5143 			return (EINVAL);
5144 		}
5145 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
5146 	}
5147 	if (advertise & 0x4) {
5148 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5149 			device_printf(dev,
5150 			    "Interface does not support 10Gb advertised speed"
5151 			    "\n");
5152 			return (EINVAL);
5153 		}
5154 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
5155 	}
5156 	if (advertise & 0x8) {
5157 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5158 			device_printf(dev,
5159 			    "Interface does not support 10Mb advertised speed"
5160 			    "\n");
5161 			return (EINVAL);
5162 		}
5163 		speed |= IXGBE_LINK_SPEED_10_FULL;
5164 	}
5165 	if (advertise & 0x10) {
5166 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5167 			device_printf(dev,
5168 			    "Interface does not support 2.5G advertised speed"
5169 			    "\n");
5170 			return (EINVAL);
5171 		}
5172 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5173 	}
5174 	if (advertise & 0x20) {
5175 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5176 			device_printf(dev,
5177 			    "Interface does not support 5G advertised speed"
5178 			    "\n");
5179 			return (EINVAL);
5180 		}
5181 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
5182 	}
5183 
5184 	hw->mac.autotry_restart = true;
5185 	hw->mac.ops.setup_link(hw, speed, true);
5186 	sc->advertise = advertise;
5187 
5188 	return (0);
5189 } /* ixgbe_set_advertise */
5190 
5191 /************************************************************************
5192  * ixgbe_get_default_advertise - Get default advertised speed settings
5193  *
5194  *   Formatted for sysctl usage.
5195  *   Flags:
5196  *     0x1 - advertise 100 Mb
5197  *     0x2 - advertise 1G
5198  *     0x4 - advertise 10G
5199  *     0x8 - advertise 10 Mb (yes, Mb)
5200  *     0x10 - advertise 2.5G (disabled by default)
5201  *     0x20 - advertise 5G (disabled by default)
5202  ************************************************************************/
5203 static int
ixgbe_get_default_advertise(struct ixgbe_softc * sc)5204 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
5205 {
5206 	struct ixgbe_hw *hw = &sc->hw;
5207 	int speed;
5208 	ixgbe_link_speed link_caps = 0;
5209 	s32 err;
5210 	bool negotiate = false;
5211 
5212 	/*
5213 	 * Advertised speed means nothing unless it's copper or
5214 	 * multi-speed fiber
5215 	 */
5216 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5217 	    !(hw->phy.multispeed_fiber))
5218 		return (0);
5219 
5220 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5221 	if (err != IXGBE_SUCCESS)
5222 		return (0);
5223 
5224 	if (hw->mac.type == ixgbe_mac_X550) {
5225 		/*
5226 		 * 2.5G and 5G autonegotiation speeds on X550
5227 		 * are disabled by default due to reported
5228 		 * interoperability issues with some switches.
5229 		 */
5230 		link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
5231 		    IXGBE_LINK_SPEED_5GB_FULL);
5232 	}
5233 
5234 	speed =
5235 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x4  : 0) |
5236 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0) |
5237 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5238 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x2  : 0) |
5239 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x1  : 0) |
5240 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x8  : 0);
5241 
5242 	return speed;
5243 } /* ixgbe_get_default_advertise */
5244 
5245 /************************************************************************
5246  * ixgbe_sysctl_dmac - Manage DMA Coalescing
5247  *
5248  *   Control values:
5249  *     0/1 - off / on (use default value of 1000)
5250  *
5251  *     Legal timer values are:
5252  *     50,100,250,500,1000,2000,5000,10000
5253  *
5254  *     Turning off interrupt moderation will also turn this off.
5255  ************************************************************************/
5256 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)5257 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
5258 {
5259 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5260 	if_t ifp = iflib_get_ifp(sc->ctx);
5261 	int error;
5262 	u16 newval;
5263 
5264 	newval = sc->dmac;
5265 	error = sysctl_handle_16(oidp, &newval, 0, req);
5266 	if ((error) || (req->newptr == NULL))
5267 		return (error);
5268 
5269 	switch (newval) {
5270 	case 0:
5271 		/* Disabled */
5272 		sc->dmac = 0;
5273 		break;
5274 	case 1:
5275 		/* Enable and use default */
5276 		sc->dmac = 1000;
5277 		break;
5278 	case 50:
5279 	case 100:
5280 	case 250:
5281 	case 500:
5282 	case 1000:
5283 	case 2000:
5284 	case 5000:
5285 	case 10000:
5286 		/* Legal values - allow */
5287 		sc->dmac = newval;
5288 		break;
5289 	default:
5290 		/* Do nothing, illegal value */
5291 		return (EINVAL);
5292 	}
5293 
5294 	/* Re-initialize hardware if it's already running */
5295 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
5296 		if_init(ifp, ifp);
5297 
5298 	return (0);
5299 } /* ixgbe_sysctl_dmac */
5300 
5301 #ifdef IXGBE_DEBUG
5302 /************************************************************************
5303  * ixgbe_sysctl_power_state
5304  *
5305  *   Sysctl to test power states
5306  *   Values:
5307  *     0      - set device to D0
5308  *     3      - set device to D3
5309  *     (none) - get current device power state
5310  ************************************************************************/
5311 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)5312 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
5313 {
5314 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5315 	device_t dev = sc->dev;
5316 	int curr_ps, new_ps, error = 0;
5317 
5318 	curr_ps = new_ps = pci_get_powerstate(dev);
5319 
5320 	error = sysctl_handle_int(oidp, &new_ps, 0, req);
5321 	if ((error) || (req->newptr == NULL))
5322 		return (error);
5323 
5324 	if (new_ps == curr_ps)
5325 		return (0);
5326 
5327 	if (new_ps == 3 && curr_ps == 0)
5328 		error = DEVICE_SUSPEND(dev);
5329 	else if (new_ps == 0 && curr_ps == 3)
5330 		error = DEVICE_RESUME(dev);
5331 	else
5332 		return (EINVAL);
5333 
5334 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5335 
5336 	return (error);
5337 } /* ixgbe_sysctl_power_state */
5338 #endif
5339 
5340 /************************************************************************
5341  * ixgbe_sysctl_wol_enable
5342  *
5343  *   Sysctl to enable/disable the WoL capability,
5344  *   if supported by the adapter.
5345  *
5346  *   Values:
5347  *     0 - disabled
5348  *     1 - enabled
5349  ************************************************************************/
5350 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)5351 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
5352 {
5353 	struct ixgbe_softc  *sc = (struct ixgbe_softc *)arg1;
5354 	struct ixgbe_hw *hw = &sc->hw;
5355 	int new_wol_enabled;
5356 	int error = 0;
5357 
5358 	new_wol_enabled = hw->wol_enabled;
5359 	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5360 	if ((error) || (req->newptr == NULL))
5361 		return (error);
5362 	new_wol_enabled = !!(new_wol_enabled);
5363 	if (new_wol_enabled == hw->wol_enabled)
5364 		return (0);
5365 
5366 	if (new_wol_enabled > 0 && !sc->wol_support)
5367 		return (ENODEV);
5368 	else
5369 		hw->wol_enabled = new_wol_enabled;
5370 
5371 	return (0);
5372 } /* ixgbe_sysctl_wol_enable */
5373 
5374 /************************************************************************
5375  * ixgbe_sysctl_wufc - Wake Up Filter Control
5376  *
5377  *   Sysctl to enable/disable the types of packets that the
5378  *   adapter will wake up on upon receipt.
5379  *   Flags:
5380  *     0x1  - Link Status Change
5381  *     0x2  - Magic Packet
5382  *     0x4  - Direct Exact
5383  *     0x8  - Directed Multicast
5384  *     0x10 - Broadcast
5385  *     0x20 - ARP/IPv4 Request Packet
5386  *     0x40 - Direct IPv4 Packet
5387  *     0x80 - Direct IPv6 Packet
5388  *
5389  *   Settings not listed above will cause the sysctl to return an error.
5390  ************************************************************************/
5391 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)5392 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5393 {
5394 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5395 	int error = 0;
5396 	u32 new_wufc;
5397 
5398 	new_wufc = sc->wufc;
5399 
5400 	error = sysctl_handle_32(oidp, &new_wufc, 0, req);
5401 	if ((error) || (req->newptr == NULL))
5402 		return (error);
5403 	if (new_wufc == sc->wufc)
5404 		return (0);
5405 
5406 	if (new_wufc & 0xffffff00)
5407 		return (EINVAL);
5408 
5409 	new_wufc &= 0xff;
5410 	new_wufc |= (0xffffff & sc->wufc);
5411 	sc->wufc = new_wufc;
5412 
5413 	return (0);
5414 } /* ixgbe_sysctl_wufc */
5415 
5416 #ifdef IXGBE_DEBUG
5417 /************************************************************************
5418  * ixgbe_sysctl_print_rss_config
5419  ************************************************************************/
5420 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)5421 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5422 {
5423 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5424 	struct ixgbe_hw *hw = &sc->hw;
5425 	device_t dev = sc->dev;
5426 	struct sbuf *buf;
5427 	int error = 0, reta_size;
5428 	u32 reg;
5429 
5430 	if (atomic_load_acq_int(&sc->recovery_mode))
5431 		return (EPERM);
5432 
5433 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5434 	if (!buf) {
5435 		device_printf(dev, "Could not allocate sbuf for output.\n");
5436 		return (ENOMEM);
5437 	}
5438 
5439 	// TODO: use sbufs to make a string to print out
5440 	/* Set multiplier for RETA setup and table size based on MAC */
5441 	switch (sc->hw.mac.type) {
5442 	case ixgbe_mac_X550:
5443 	case ixgbe_mac_X550EM_x:
5444 	case ixgbe_mac_X550EM_a:
5445 		reta_size = 128;
5446 		break;
5447 	default:
5448 		reta_size = 32;
5449 		break;
5450 	}
5451 
5452 	/* Print out the redirection table */
5453 	sbuf_cat(buf, "\n");
5454 	for (int i = 0; i < reta_size; i++) {
5455 		if (i < 32) {
5456 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5457 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5458 		} else {
5459 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5460 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5461 		}
5462 	}
5463 
5464 	// TODO: print more config
5465 
5466 	error = sbuf_finish(buf);
5467 	if (error)
5468 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5469 
5470 	sbuf_delete(buf);
5471 
5472 	return (0);
5473 } /* ixgbe_sysctl_print_rss_config */
5474 #endif /* IXGBE_DEBUG */
5475 
5476 /************************************************************************
5477  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5478  *
5479  *   For X552/X557-AT devices using an external PHY
5480  ************************************************************************/
5481 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)5482 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
5483 {
5484 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5485 	struct ixgbe_hw *hw = &sc->hw;
5486 	u16 reg;
5487 
5488 	if (atomic_load_acq_int(&sc->recovery_mode))
5489 		return (EPERM);
5490 
5491 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5492 		device_printf(iflib_get_dev(sc->ctx),
5493 		    "Device has no supported external thermal sensor.\n");
5494 		return (ENODEV);
5495 	}
5496 
5497 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5498 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
5499 		device_printf(iflib_get_dev(sc->ctx),
5500 		    "Error reading from PHY's current temperature register"
5501 		    "\n");
5502 		return (EAGAIN);
5503 	}
5504 
5505 	/* Shift temp for output */
5506 	reg = reg >> 8;
5507 
5508 	return (sysctl_handle_16(oidp, NULL, reg, req));
5509 } /* ixgbe_sysctl_phy_temp */
5510 
5511 /************************************************************************
5512  * ixgbe_sysctl_phy_overtemp_occurred
5513  *
5514  *   Reports (directly from the PHY) whether the current PHY
5515  *   temperature is over the overtemp threshold.
5516  ************************************************************************/
5517 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)5518 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
5519 {
5520 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5521 	struct ixgbe_hw *hw = &sc->hw;
5522 	u16 reg;
5523 
5524 	if (atomic_load_acq_int(&sc->recovery_mode))
5525 		return (EPERM);
5526 
5527 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5528 		device_printf(iflib_get_dev(sc->ctx),
5529 		    "Device has no supported external thermal sensor.\n");
5530 		return (ENODEV);
5531 	}
5532 
5533 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5534 	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
5535 		device_printf(iflib_get_dev(sc->ctx),
5536 		    "Error reading from PHY's temperature status register\n");
5537 		return (EAGAIN);
5538 	}
5539 
5540 	/* Get occurrence bit */
5541 	reg = !!(reg & 0x4000);
5542 
5543 	return (sysctl_handle_16(oidp, 0, reg, req));
5544 } /* ixgbe_sysctl_phy_overtemp_occurred */
5545 
5546 /************************************************************************
5547  * ixgbe_sysctl_eee_state
5548  *
5549  *   Sysctl to set EEE power saving feature
5550  *   Values:
5551  *     0      - disable EEE
5552  *     1      - enable EEE
5553  *     (none) - get current device EEE state
5554  ************************************************************************/
5555 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)5556 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5557 {
5558 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5559 	device_t dev = sc->dev;
5560 	if_t ifp = iflib_get_ifp(sc->ctx);
5561 	int curr_eee, new_eee, error = 0;
5562 	s32 retval;
5563 
5564 	if (atomic_load_acq_int(&sc->recovery_mode))
5565 		return (EPERM);
5566 
5567 	curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
5568 
5569 	error = sysctl_handle_int(oidp, &new_eee, 0, req);
5570 	if ((error) || (req->newptr == NULL))
5571 		return (error);
5572 
5573 	/* Nothing to do */
5574 	if (new_eee == curr_eee)
5575 		return (0);
5576 
5577 	/* Not supported */
5578 	if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
5579 		return (EINVAL);
5580 
5581 	/* Bounds checking */
5582 	if ((new_eee < 0) || (new_eee > 1))
5583 		return (EINVAL);
5584 
5585 	retval = ixgbe_setup_eee(&sc->hw, new_eee);
5586 	if (retval) {
5587 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5588 		return (EINVAL);
5589 	}
5590 
5591 	/* Restart auto-neg */
5592 	if_init(ifp, ifp);
5593 
5594 	device_printf(dev, "New EEE state: %d\n", new_eee);
5595 
5596 	/* Cache new value */
5597 	if (new_eee)
5598 		sc->feat_en |= IXGBE_FEATURE_EEE;
5599 	else
5600 		sc->feat_en &= ~IXGBE_FEATURE_EEE;
5601 
5602 	return (error);
5603 } /* ixgbe_sysctl_eee_state */
5604 
5605 static int
ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)5606 ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
5607 {
5608 	struct ixgbe_softc *sc;
5609 	u32 reg, val, shift;
5610 	int error, mask;
5611 
5612 	sc = oidp->oid_arg1;
5613 	switch (oidp->oid_arg2) {
5614 	case 0:
5615 		reg = IXGBE_DTXTCPFLGL;
5616 		shift = 0;
5617 		break;
5618 	case 1:
5619 		reg = IXGBE_DTXTCPFLGL;
5620 		shift = 16;
5621 		break;
5622 	case 2:
5623 		reg = IXGBE_DTXTCPFLGH;
5624 		shift = 0;
5625 		break;
5626 	default:
5627 		return (EINVAL);
5628 		break;
5629 	}
5630 	val = IXGBE_READ_REG(&sc->hw, reg);
5631 	mask = (val >> shift) & 0xfff;
5632 	error = sysctl_handle_int(oidp, &mask, 0, req);
5633 	if (error != 0 || req->newptr == NULL)
5634 		return (error);
5635 	if (mask < 0 || mask > 0xfff)
5636 		return (EINVAL);
5637 	val = (val & ~(0xfff << shift)) | (mask << shift);
5638 	IXGBE_WRITE_REG(&sc->hw, reg, val);
5639 	return (0);
5640 }
5641 
5642 /************************************************************************
5643  * ixgbe_init_device_features
5644  ************************************************************************/
5645 static void
ixgbe_init_device_features(struct ixgbe_softc * sc)5646 ixgbe_init_device_features(struct ixgbe_softc *sc)
5647 {
5648 	sc->feat_cap = IXGBE_FEATURE_NETMAP |
5649 	    IXGBE_FEATURE_RSS |
5650 	    IXGBE_FEATURE_MSI |
5651 	    IXGBE_FEATURE_MSIX |
5652 	    IXGBE_FEATURE_LEGACY_IRQ;
5653 
5654 	/* Set capabilities first... */
5655 	switch (sc->hw.mac.type) {
5656 	case ixgbe_mac_82598EB:
5657 		if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
5658 			sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5659 		break;
5660 	case ixgbe_mac_X540:
5661 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5662 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
5663 		if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5664 		    (sc->hw.bus.func == 0))
5665 			sc->feat_cap |= IXGBE_FEATURE_BYPASS;
5666 		break;
5667 	case ixgbe_mac_X550:
5668 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5669 		sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5670 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5671 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
5672 		break;
5673 	case ixgbe_mac_X550EM_x:
5674 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5675 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5676 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
5677 		if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5678 			sc->feat_cap |= IXGBE_FEATURE_EEE;
5679 		break;
5680 	case ixgbe_mac_X550EM_a:
5681 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5682 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5683 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
5684 		sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5685 		if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5686 		    (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5687 			sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5688 			sc->feat_cap |= IXGBE_FEATURE_EEE;
5689 		}
5690 		break;
5691 	case ixgbe_mac_82599EB:
5692 		sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5693 		sc->feat_cap |= IXGBE_FEATURE_FDIR;
5694 		if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5695 		    (sc->hw.bus.func == 0))
5696 			sc->feat_cap |= IXGBE_FEATURE_BYPASS;
5697 		if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5698 			sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5699 		break;
5700 	case ixgbe_mac_E610:
5701 		sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5702 		sc->feat_cap |= IXGBE_FEATURE_DBG_DUMP;
5703 		break;
5704 	default:
5705 		break;
5706 	}
5707 
5708 	/* Enabled by default... */
5709 	/* Fan failure detection */
5710 	if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5711 		sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5712 	/* Netmap */
5713 	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
5714 		sc->feat_en |= IXGBE_FEATURE_NETMAP;
5715 	/* EEE */
5716 	if (sc->feat_cap & IXGBE_FEATURE_EEE)
5717 		sc->feat_en |= IXGBE_FEATURE_EEE;
5718 	/* Thermal Sensor */
5719 	if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5720 		sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5721 	/* Recovery mode */
5722 	if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
5723 		sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
5724 	/* FW Debug Dump */
5725 	if (sc->feat_cap & IXGBE_FEATURE_DBG_DUMP)
5726 		sc->feat_en |= IXGBE_FEATURE_DBG_DUMP;
5727 
5728 	/* Enabled via global sysctl... */
5729 	/* Flow Director */
5730 	if (ixgbe_enable_fdir) {
5731 		if (sc->feat_cap & IXGBE_FEATURE_FDIR)
5732 			sc->feat_en |= IXGBE_FEATURE_FDIR;
5733 		else
5734 			device_printf(sc->dev,
5735 			    "Device does not support Flow Director."
5736 			    " Leaving disabled.");
5737 	}
5738 	/*
5739 	 * Message Signal Interrupts - Extended (MSI-X)
5740 	 * Normal MSI is only enabled if MSI-X calls fail.
5741 	 */
5742 	if (!ixgbe_enable_msix)
5743 		sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
5744 	/* Receive-Side Scaling (RSS) */
5745 	if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5746 		sc->feat_en |= IXGBE_FEATURE_RSS;
5747 
5748 	/* Disable features with unmet dependencies... */
5749 	/* No MSI-X */
5750 	if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
5751 		sc->feat_cap &= ~IXGBE_FEATURE_RSS;
5752 		sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5753 		sc->feat_en &= ~IXGBE_FEATURE_RSS;
5754 		sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
5755 	}
5756 } /* ixgbe_init_device_features */
5757 
5758 /************************************************************************
5759  * ixgbe_check_fan_failure
5760  ************************************************************************/
5761 static void
ixgbe_check_fan_failure(struct ixgbe_softc * sc,u32 reg,bool in_interrupt)5762 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
5763 {
5764 	u32 mask;
5765 
5766 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
5767 	    IXGBE_ESDP_SDP1;
5768 
5769 	if (reg & mask)
5770 		device_printf(sc->dev,
5771 		    "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5772 } /* ixgbe_check_fan_failure */
5773 
5774 /************************************************************************
5775  * ixgbe_sbuf_fw_version
5776  ************************************************************************/
5777 static void
ixgbe_sbuf_fw_version(struct ixgbe_hw * hw,struct sbuf * buf)5778 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
5779 {
5780 	struct ixgbe_nvm_version nvm_ver = {0};
5781 	const char *space = "";
5782 
5783 	ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
5784 	ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
5785 	ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
5786 	ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
5787 
5788 	/* FW version */
5789 	if ((nvm_ver.phy_fw_maj == 0x0 &&
5790 	    nvm_ver.phy_fw_min == 0x0 &&
5791 	    nvm_ver.phy_fw_id == 0x0) ||
5792 		(nvm_ver.phy_fw_maj == 0xF &&
5793 	    nvm_ver.phy_fw_min == 0xFF &&
5794 	    nvm_ver.phy_fw_id == 0xF)) {
5795 		/* If major, minor and id numbers are set to 0,
5796 		 * reading FW version is unsupported. If major number
5797 		 * is set to 0xF, minor is set to 0xFF and id is set
5798 		 * to 0xF, this means that number read is invalid. */
5799 	} else
5800 		sbuf_printf(buf, "fw %d.%d.%d ",
5801 		    nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min,
5802 		    nvm_ver.phy_fw_id);
5803 
5804 	/* NVM version */
5805 	if ((nvm_ver.nvm_major == 0x0 &&
5806 	    nvm_ver.nvm_minor == 0x0 &&
5807 	    nvm_ver.nvm_id == 0x0) ||
5808 		(nvm_ver.nvm_major == 0xF &&
5809 	    nvm_ver.nvm_minor == 0xFF &&
5810 	    nvm_ver.nvm_id == 0xF)) {
5811 		/* If major, minor and id numbers are set to 0,
5812 		 * reading NVM version is unsupported. If major number
5813 		 * is set to 0xF, minor is set to 0xFF and id is set
5814 		 * to 0xF, this means that number read is invalid. */
5815 	} else
5816 		sbuf_printf(buf, "nvm %x.%02x.%x ",
5817 		    nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
5818 
5819 	if (nvm_ver.oem_valid) {
5820 		sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
5821 		    nvm_ver.oem_minor, nvm_ver.oem_release);
5822 		space = " ";
5823 	}
5824 
5825 	if (nvm_ver.or_valid) {
5826 		sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
5827 		    space, nvm_ver.or_major, nvm_ver.or_build,
5828 		    nvm_ver.or_patch);
5829 		space = " ";
5830 	}
5831 
5832 	if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
5833 	    NVM_VER_INVALID | 0xFFFFFFFF)) {
5834 		sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
5835 	}
5836 } /* ixgbe_sbuf_fw_version */
5837 
5838 /************************************************************************
5839  * ixgbe_print_fw_version
5840  ************************************************************************/
5841 static void
ixgbe_print_fw_version(if_ctx_t ctx)5842 ixgbe_print_fw_version(if_ctx_t ctx)
5843 {
5844 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
5845 	struct ixgbe_hw *hw = &sc->hw;
5846 	device_t dev = sc->dev;
5847 	struct sbuf *buf;
5848 	int error = 0;
5849 
5850 	buf = sbuf_new_auto();
5851 	if (!buf) {
5852 		device_printf(dev, "Could not allocate sbuf for output.\n");
5853 		return;
5854 	}
5855 
5856 	ixgbe_sbuf_fw_version(hw, buf);
5857 
5858 	error = sbuf_finish(buf);
5859 	if (error)
5860 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5861 	else if (sbuf_len(buf))
5862 		device_printf(dev, "%s\n", sbuf_data(buf));
5863 
5864 	sbuf_delete(buf);
5865 } /* ixgbe_print_fw_version */
5866 
5867 /************************************************************************
5868  * ixgbe_sysctl_print_fw_version
5869  ************************************************************************/
5870 static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)5871 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
5872 {
5873 	struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5874 	struct ixgbe_hw *hw = &sc->hw;
5875 	device_t dev = sc->dev;
5876 	struct sbuf *buf;
5877 	int error = 0;
5878 
5879 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5880 	if (!buf) {
5881 		device_printf(dev, "Could not allocate sbuf for output.\n");
5882 		return (ENOMEM);
5883 	}
5884 
5885 	ixgbe_sbuf_fw_version(hw, buf);
5886 
5887 	error = sbuf_finish(buf);
5888 	if (error)
5889 		device_printf(dev, "Error finishing sbuf: %d\n", error);
5890 
5891 	sbuf_delete(buf);
5892 
5893 	return (0);
5894 } /* ixgbe_sysctl_print_fw_version */
5895