1 /*****************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 *****************************************************************************/
33
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46 * Driver version
47 ************************************************************************/
48 static const char ixgbe_driver_version[] = "5.0.1-k";
49
50 /************************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,
62 "Intel(R) 82598EB AF (Dual Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,
64 "Intel(R) 82598EB AF (Fiber)"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,
66 "Intel(R) 82598EB AT (CX4)"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,
68 "Intel(R) 82598EB AT"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,
70 "Intel(R) 82598EB AT2"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,
73 "Intel(R) 82598EB AF DA (Dual Fiber)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,
75 "Intel(R) 82598EB AT (Dual CX4)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,
77 "Intel(R) 82598EB AF (Dual Fiber LR)"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,
79 "Intel(R) 82598EB AF (Dual Fiber SR)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,
81 "Intel(R) 82598EB LOM"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,
83 "Intel(R) X520 82599 (KX4)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,
85 "Intel(R) X520 82599 (KX4 Mezzanine)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,
87 "Intel(R) X520 82599ES (SFI/SFP+)"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,
89 "Intel(R) X520 82599 (XAUI/BX4)"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,
91 "Intel(R) X520 82599 (Dual CX4)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,
93 "Intel(R) X520-T 82599 LOM"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,
95 "Intel(R) X520 82599 LS"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,
97 "Intel(R) X520 82599 (Combined Backplane)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,
99 "Intel(R) X520 82599 (Backplane w/FCoE)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,
101 "Intel(R) X520 82599 (Dual SFP+)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,
103 "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,
105 "Intel(R) X520-1 82599EN (SFP+)"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,
107 "Intel(R) X520-4 82599 (Quad SFP+)"),
108 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,
109 "Intel(R) X520-Q1 82599 (QSFP+)"),
110 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,
111 "Intel(R) X540-AT2"),
112 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
113 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
114 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
115 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,
116 "Intel(R) X552 (KR Backplane)"),
117 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,
118 "Intel(R) X552 (KX4 Backplane)"),
119 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,
120 "Intel(R) X552/X557-AT (10GBASE-T)"),
121 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,
122 "Intel(R) X552 (1000BASE-T)"),
123 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP,
124 "Intel(R) X552 (SFP+)"),
125 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR,
126 "Intel(R) X553 (KR Backplane)"),
127 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L,
128 "Intel(R) X553 L (KR Backplane)"),
129 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP,
130 "Intel(R) X553 (SFP+)"),
131 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N,
132 "Intel(R) X553 N (SFP+)"),
133 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII,
134 "Intel(R) X553 (1GbE SGMII)"),
135 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L,
136 "Intel(R) X553 L (1GbE SGMII)"),
137 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T,
138 "Intel(R) X553/X557-AT (10GBASE-T)"),
139 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T,
140 "Intel(R) X553 (1GbE)"),
141 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L,
142 "Intel(R) X553 L (1GbE)"),
143 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS,
144 "Intel(R) X540-T2 (Bypass)"),
145 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
146 "Intel(R) X520 82599 (Bypass)"),
147 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
148 "Intel(R) E610 (Backplane)"),
149 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
150 "Intel(R) E610 (SFP)"),
151 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
152 "Intel(R) E610 (2.5 GbE)"),
153 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
154 "Intel(R) E610 (10 GbE)"),
155 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
156 "Intel(R) E610 (SGMII)"),
157 /* required last entry */
158 PVID_END
159 };
160
161 static void *ixgbe_register(device_t);
162 static int ixgbe_if_attach_pre(if_ctx_t);
163 static int ixgbe_if_attach_post(if_ctx_t);
164 static int ixgbe_if_detach(if_ctx_t);
165 static int ixgbe_if_shutdown(if_ctx_t);
166 static int ixgbe_if_suspend(if_ctx_t);
167 static int ixgbe_if_resume(if_ctx_t);
168
169 static void ixgbe_if_stop(if_ctx_t);
170 void ixgbe_if_enable_intr(if_ctx_t);
171 static void ixgbe_if_disable_intr(if_ctx_t);
172 static void ixgbe_link_intr_enable(if_ctx_t);
173 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
174 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
175 static int ixgbe_if_media_change(if_ctx_t);
176 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
177 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
178 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
179 static void ixgbe_if_multi_set(if_ctx_t);
180 static int ixgbe_if_promisc_set(if_ctx_t, int);
181 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
182 int);
183 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
184 int);
185 static void ixgbe_if_queues_free(if_ctx_t);
186 static void ixgbe_if_timer(if_ctx_t, uint16_t);
187 static const char *ixgbe_link_speed_to_str(u32 link_speed);
188 static void ixgbe_if_update_admin_status(if_ctx_t);
189 static void ixgbe_if_vlan_register(if_ctx_t, u16);
190 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
191 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
192 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
193 int ixgbe_intr(void *);
194
195 /************************************************************************
196 * Function prototypes
197 ************************************************************************/
198 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
199
200 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
201 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
202 static void ixgbe_add_device_sysctls(if_ctx_t);
203 static int ixgbe_allocate_pci_resources(if_ctx_t);
204 static int ixgbe_setup_low_power_mode(if_ctx_t);
205
206 static void ixgbe_config_dmac(struct ixgbe_softc *);
207 static void ixgbe_configure_ivars(struct ixgbe_softc *);
208 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
209 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
210 static bool ixgbe_sfp_probe(if_ctx_t);
211
212 static void ixgbe_free_pci_resources(if_ctx_t);
213
214 static int ixgbe_msix_link(void *);
215 static int ixgbe_msix_que(void *);
216 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
217 static void ixgbe_initialize_receive_units(if_ctx_t);
218 static void ixgbe_initialize_transmit_units(if_ctx_t);
219
220 static int ixgbe_setup_interface(if_ctx_t);
221 static void ixgbe_init_device_features(struct ixgbe_softc *);
222 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
223 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
224 static void ixgbe_print_fw_version(if_ctx_t);
225 static void ixgbe_add_media_types(if_ctx_t);
226 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
227 static void ixgbe_config_link(if_ctx_t);
228 static void ixgbe_get_slot_info(struct ixgbe_softc *);
229 static void ixgbe_fw_mode_timer(void *);
230 static void ixgbe_check_wol_support(struct ixgbe_softc *);
231 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
232 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
233
234 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
235 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
236 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
237 static int ixgbe_get_default_advertise(struct ixgbe_softc *);
238 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
239 static void ixgbe_config_gpie(struct ixgbe_softc *);
240 static void ixgbe_config_delay_values(struct ixgbe_softc *);
241
242 /* Sysctl handlers */
243 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
244 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
245 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
246 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
247 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
248 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
249 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
250 #ifdef IXGBE_DEBUG
251 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
252 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
253 #endif
254 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
255 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
256 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
257 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
258 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
259 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
260 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
261 static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
262
263 /* Deferred interrupt tasklets */
264 static void ixgbe_handle_msf(void *);
265 static void ixgbe_handle_mod(void *);
266 static void ixgbe_handle_phy(void *);
267 static void ixgbe_handle_fw_event(void *);
268
269 static int ixgbe_enable_lse(struct ixgbe_softc *sc);
270 static int ixgbe_disable_lse(struct ixgbe_softc *sc);
271
272 /************************************************************************
273 * FreeBSD Device Interface Entry Points
274 ************************************************************************/
275 static device_method_t ix_methods[] = {
276 /* Device interface */
277 DEVMETHOD(device_register, ixgbe_register),
278 DEVMETHOD(device_probe, iflib_device_probe),
279 DEVMETHOD(device_attach, iflib_device_attach),
280 DEVMETHOD(device_detach, iflib_device_detach),
281 DEVMETHOD(device_shutdown, iflib_device_shutdown),
282 DEVMETHOD(device_suspend, iflib_device_suspend),
283 DEVMETHOD(device_resume, iflib_device_resume),
284 #ifdef PCI_IOV
285 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
286 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
287 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
288 #endif /* PCI_IOV */
289 DEVMETHOD_END
290 };
291
292 static driver_t ix_driver = {
293 "ix", ix_methods, sizeof(struct ixgbe_softc),
294 };
295
296 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
297 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
298 MODULE_DEPEND(ix, pci, 1, 1, 1);
299 MODULE_DEPEND(ix, ether, 1, 1, 1);
300 MODULE_DEPEND(ix, iflib, 1, 1, 1);
301
302 static device_method_t ixgbe_if_methods[] = {
303 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
304 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
305 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
306 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
307 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
308 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
309 DEVMETHOD(ifdi_init, ixgbe_if_init),
310 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
311 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
312 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
313 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
314 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
315 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
316 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
317 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
318 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
319 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
320 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
321 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
322 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
323 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
324 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
325 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
326 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
327 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
328 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
329 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
330 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
331 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
332 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
333 #ifdef PCI_IOV
334 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
335 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
336 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
337 #endif /* PCI_IOV */
338 DEVMETHOD_END
339 };
340
341 /*
342 * TUNEABLE PARAMETERS:
343 */
344
345 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
346 "IXGBE driver parameters");
347 static driver_t ixgbe_if_driver = {
348 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
349 };
350
351 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
352 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
353 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
354
355 /* Flow control setting, default to full */
356 static int ixgbe_flow_control = ixgbe_fc_full;
357 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
358 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
359
360 /* Advertise Speed, default to 0 (auto) */
361 static int ixgbe_advertise_speed = 0;
362 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
363 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
364
365 /*
366 * Smart speed setting, default to on
367 * this only works as a compile option
368 * right now as its during attach, set
369 * this to 'ixgbe_smart_speed_off' to
370 * disable.
371 */
372 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
373
374 /*
375 * MSI-X should be the default for best performance,
376 * but this allows it to be forced off for testing.
377 */
378 static int ixgbe_enable_msix = 1;
379 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix,
380 0,
381 "Enable MSI-X interrupts");
382
383 /*
384 * Defining this on will allow the use
385 * of unsupported SFP+ modules, note that
386 * doing so you are on your own :)
387 */
388 static int allow_unsupported_sfp = false;
389 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
390 &allow_unsupported_sfp, 0,
391 "Allow unsupported SFP modules...use at your own risk");
392
393 /*
394 * Not sure if Flow Director is fully baked,
395 * so we'll default to turning it off.
396 */
397 static int ixgbe_enable_fdir = 0;
398 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir,
399 0,
400 "Enable Flow Director");
401
402 /* Receive-Side Scaling */
403 static int ixgbe_enable_rss = 1;
404 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss,
405 0,
406 "Enable Receive-Side Scaling (RSS)");
407
408 /*
409 * AIM: Adaptive Interrupt Moderation
410 * which means that the interrupt rate
411 * is varied over time based on the
412 * traffic for that interrupt vector
413 */
414 static int ixgbe_enable_aim = false;
415 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim,
416 0,
417 "Enable adaptive interrupt moderation");
418
419 #if 0
420 /* Keep running tab on them for sanity check */
421 static int ixgbe_total_ports;
422 #endif
423
424 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
425
426 /*
427 * For Flow Director: this is the number of TX packets we sample
428 * for the filter pool, this means every 20th packet will be probed.
429 *
430 * This feature can be disabled by setting this to 0.
431 */
432 static int atr_sample_rate = 20;
433
434 extern struct if_txrx ixgbe_txrx;
435
436 static struct if_shared_ctx ixgbe_sctx_init = {
437 .isc_magic = IFLIB_MAGIC,
438 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
439 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
440 .isc_tx_maxsegsize = PAGE_SIZE,
441 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
442 .isc_tso_maxsegsize = PAGE_SIZE,
443 .isc_rx_maxsize = PAGE_SIZE*4,
444 .isc_rx_nsegments = 1,
445 .isc_rx_maxsegsize = PAGE_SIZE*4,
446 .isc_nfl = 1,
447 .isc_ntxqs = 1,
448 .isc_nrxqs = 1,
449
450 .isc_admin_intrcnt = 1,
451 .isc_vendor_info = ixgbe_vendor_info_array,
452 .isc_driver_version = ixgbe_driver_version,
453 .isc_driver = &ixgbe_if_driver,
454 .isc_flags = IFLIB_TSO_INIT_IP,
455
456 .isc_nrxd_min = {MIN_RXD},
457 .isc_ntxd_min = {MIN_TXD},
458 .isc_nrxd_max = {MAX_RXD},
459 .isc_ntxd_max = {MAX_TXD},
460 .isc_nrxd_default = {DEFAULT_RXD},
461 .isc_ntxd_default = {DEFAULT_TXD},
462 };
463
464 /************************************************************************
465 * ixgbe_if_tx_queues_alloc
466 ************************************************************************/
467 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)468 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
469 int ntxqs, int ntxqsets)
470 {
471 struct ixgbe_softc *sc = iflib_get_softc(ctx);
472 if_softc_ctx_t scctx = sc->shared;
473 struct ix_tx_queue *que;
474 int i, j, error;
475
476 MPASS(sc->num_tx_queues > 0);
477 MPASS(sc->num_tx_queues == ntxqsets);
478 MPASS(ntxqs == 1);
479
480 /* Allocate queue structure memory */
481 sc->tx_queues =
482 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
483 ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO);
484 if (!sc->tx_queues) {
485 device_printf(iflib_get_dev(ctx),
486 "Unable to allocate TX ring memory\n");
487 return (ENOMEM);
488 }
489
490 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
491 struct tx_ring *txr = &que->txr;
492
493 /* In case SR-IOV is enabled, align the index properly */
494 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
495
496 txr->sc = que->sc = sc;
497
498 /* Allocate report status array */
499 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
500 scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
501 if (txr->tx_rsq == NULL) {
502 error = ENOMEM;
503 goto fail;
504 }
505 for (j = 0; j < scctx->isc_ntxd[0]; j++)
506 txr->tx_rsq[j] = QIDX_INVALID;
507 /* get virtual and physical address of the hardware queues */
508 txr->tail = IXGBE_TDT(txr->me);
509 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
510 txr->tx_paddr = paddrs[i];
511
512 txr->bytes = 0;
513 txr->total_packets = 0;
514
515 /* Set the rate at which we sample packets */
516 if (sc->feat_en & IXGBE_FEATURE_FDIR)
517 txr->atr_sample = atr_sample_rate;
518
519 }
520
521 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
522 sc->num_tx_queues);
523
524 return (0);
525
526 fail:
527 ixgbe_if_queues_free(ctx);
528
529 return (error);
530 } /* ixgbe_if_tx_queues_alloc */
531
532 /************************************************************************
533 * ixgbe_if_rx_queues_alloc
534 ************************************************************************/
535 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)536 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
537 int nrxqs, int nrxqsets)
538 {
539 struct ixgbe_softc *sc = iflib_get_softc(ctx);
540 struct ix_rx_queue *que;
541 int i;
542
543 MPASS(sc->num_rx_queues > 0);
544 MPASS(sc->num_rx_queues == nrxqsets);
545 MPASS(nrxqs == 1);
546
547 /* Allocate queue structure memory */
548 sc->rx_queues =
549 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
550 M_IXGBE, M_NOWAIT | M_ZERO);
551 if (!sc->rx_queues) {
552 device_printf(iflib_get_dev(ctx),
553 "Unable to allocate TX ring memory\n");
554 return (ENOMEM);
555 }
556
557 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
558 struct rx_ring *rxr = &que->rxr;
559
560 /* In case SR-IOV is enabled, align the index properly */
561 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
562
563 rxr->sc = que->sc = sc;
564
565 /* get the virtual and physical address of the hw queues */
566 rxr->tail = IXGBE_RDT(rxr->me);
567 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
568 rxr->rx_paddr = paddrs[i];
569 rxr->bytes = 0;
570 rxr->que = que;
571 }
572
573 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
574 sc->num_rx_queues);
575
576 return (0);
577 } /* ixgbe_if_rx_queues_alloc */
578
579 /************************************************************************
580 * ixgbe_if_queues_free
581 ************************************************************************/
582 static void
ixgbe_if_queues_free(if_ctx_t ctx)583 ixgbe_if_queues_free(if_ctx_t ctx)
584 {
585 struct ixgbe_softc *sc = iflib_get_softc(ctx);
586 struct ix_tx_queue *tx_que = sc->tx_queues;
587 struct ix_rx_queue *rx_que = sc->rx_queues;
588 int i;
589
590 if (tx_que != NULL) {
591 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
592 struct tx_ring *txr = &tx_que->txr;
593 if (txr->tx_rsq == NULL)
594 break;
595
596 free(txr->tx_rsq, M_IXGBE);
597 txr->tx_rsq = NULL;
598 }
599
600 free(sc->tx_queues, M_IXGBE);
601 sc->tx_queues = NULL;
602 }
603 if (rx_que != NULL) {
604 free(sc->rx_queues, M_IXGBE);
605 sc->rx_queues = NULL;
606 }
607 } /* ixgbe_if_queues_free */
608
609 /************************************************************************
610 * ixgbe_initialize_rss_mapping
611 ************************************************************************/
612 static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc * sc)613 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
614 {
615 struct ixgbe_hw *hw = &sc->hw;
616 u32 reta = 0, mrqc, rss_key[10];
617 int queue_id, table_size, index_mult;
618 int i, j;
619 u32 rss_hash_config;
620
621 if (sc->feat_en & IXGBE_FEATURE_RSS) {
622 /* Fetch the configured RSS key */
623 rss_getkey((uint8_t *)&rss_key);
624 } else {
625 /* set up random bits */
626 arc4rand(&rss_key, sizeof(rss_key), 0);
627 }
628
629 /* Set multiplier for RETA setup and table size based on MAC */
630 index_mult = 0x1;
631 table_size = 128;
632 switch (sc->hw.mac.type) {
633 case ixgbe_mac_82598EB:
634 index_mult = 0x11;
635 break;
636 case ixgbe_mac_X550:
637 case ixgbe_mac_X550EM_x:
638 case ixgbe_mac_X550EM_a:
639 case ixgbe_mac_E610:
640 table_size = 512;
641 break;
642 default:
643 break;
644 }
645
646 /* Set up the redirection table */
647 for (i = 0, j = 0; i < table_size; i++, j++) {
648 if (j == sc->num_rx_queues)
649 j = 0;
650
651 if (sc->feat_en & IXGBE_FEATURE_RSS) {
652 /*
653 * Fetch the RSS bucket id for the given indirection
654 * entry. Cap it at the number of configured buckets
655 * (which is num_rx_queues.)
656 */
657 queue_id = rss_get_indirection_to_bucket(i);
658 queue_id = queue_id % sc->num_rx_queues;
659 } else
660 queue_id = (j * index_mult);
661
662 /*
663 * The low 8 bits are for hash value (n+0);
664 * The next 8 bits are for hash value (n+1), etc.
665 */
666 reta = reta >> 8;
667 reta = reta | (((uint32_t)queue_id) << 24);
668 if ((i & 3) == 3) {
669 if (i < 128)
670 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
671 else
672 IXGBE_WRITE_REG(hw,
673 IXGBE_ERETA((i >> 2) - 32), reta);
674 reta = 0;
675 }
676 }
677
678 /* Now fill our hash function seeds */
679 for (i = 0; i < 10; i++)
680 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
681
682 /* Perform hash on these packet types */
683 if (sc->feat_en & IXGBE_FEATURE_RSS)
684 rss_hash_config = rss_gethashconfig();
685 else {
686 /*
687 * Disable UDP - IP fragments aren't currently being handled
688 * and so we end up with a mix of 2-tuple and 4-tuple
689 * traffic.
690 */
691 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
692 RSS_HASHTYPE_RSS_TCP_IPV4 |
693 RSS_HASHTYPE_RSS_IPV6 |
694 RSS_HASHTYPE_RSS_TCP_IPV6 |
695 RSS_HASHTYPE_RSS_IPV6_EX |
696 RSS_HASHTYPE_RSS_TCP_IPV6_EX;
697 }
698
699 mrqc = IXGBE_MRQC_RSSEN;
700 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
701 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
702 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
703 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
704 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
705 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
706 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
707 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
708 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
709 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
710 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
711 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
712 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
713 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
714 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
715 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
716 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
717 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
718 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
719 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
720 } /* ixgbe_initialize_rss_mapping */
721
722 /************************************************************************
723 * ixgbe_initialize_receive_units - Setup receive registers and features.
724 ************************************************************************/
725 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
726
727 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)728 ixgbe_initialize_receive_units(if_ctx_t ctx)
729 {
730 struct ixgbe_softc *sc = iflib_get_softc(ctx);
731 if_softc_ctx_t scctx = sc->shared;
732 struct ixgbe_hw *hw = &sc->hw;
733 if_t ifp = iflib_get_ifp(ctx);
734 struct ix_rx_queue *que;
735 int i, j;
736 u32 bufsz, fctrl, srrctl, rxcsum;
737 u32 hlreg;
738
739 /*
740 * Make sure receives are disabled while
741 * setting up the descriptor ring
742 */
743 ixgbe_disable_rx(hw);
744
745 /* Enable broadcasts */
746 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
747 fctrl |= IXGBE_FCTRL_BAM;
748 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
749 fctrl |= IXGBE_FCTRL_DPF;
750 fctrl |= IXGBE_FCTRL_PMCF;
751 }
752 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
753
754 /* Set for Jumbo Frames? */
755 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
756 if (if_getmtu(ifp) > ETHERMTU)
757 hlreg |= IXGBE_HLREG0_JUMBOEN;
758 else
759 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
760 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
761
762 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
763 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
764
765 /* Setup the Base and Length of the Rx Descriptor Ring */
766 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
767 struct rx_ring *rxr = &que->rxr;
768 u64 rdba = rxr->rx_paddr;
769
770 j = rxr->me;
771
772 /* Setup the Base and Length of the Rx Descriptor Ring */
773 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
774 (rdba & 0x00000000ffffffffULL));
775 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
776 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
777 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
778
779 /* Set up the SRRCTL register */
780 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
781 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
782 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
783 srrctl |= bufsz;
784 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
785
786 /*
787 * Set DROP_EN iff we have no flow control and >1 queue.
788 * Note that srrctl was cleared shortly before during reset,
789 * so we do not need to clear the bit, but do it just in case
790 * this code is moved elsewhere.
791 */
792 if (sc->num_rx_queues > 1 &&
793 sc->hw.fc.requested_mode == ixgbe_fc_none) {
794 srrctl |= IXGBE_SRRCTL_DROP_EN;
795 } else {
796 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
797 }
798
799 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
800
801 /* Setup the HW Rx Head and Tail Descriptor Pointers */
802 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
803 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
804
805 /* Set the driver rx tail address */
806 rxr->tail = IXGBE_RDT(rxr->me);
807 }
808
809 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
810 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
811 IXGBE_PSRTYPE_UDPHDR |
812 IXGBE_PSRTYPE_IPV4HDR |
813 IXGBE_PSRTYPE_IPV6HDR;
814 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
815 }
816
817 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
818
819 ixgbe_initialize_rss_mapping(sc);
820
821 if (sc->feat_en & IXGBE_FEATURE_RSS) {
822 /* RSS and RX IPP Checksum are mutually exclusive */
823 rxcsum |= IXGBE_RXCSUM_PCSD;
824 }
825
826 if (if_getcapenable(ifp) & IFCAP_RXCSUM)
827 rxcsum |= IXGBE_RXCSUM_PCSD;
828
829 /* This is useful for calculating UDP/IP fragment checksums */
830 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
831 rxcsum |= IXGBE_RXCSUM_IPPCSE;
832
833 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
834
835 } /* ixgbe_initialize_receive_units */
836
837 /************************************************************************
838 * ixgbe_initialize_transmit_units - Enable transmit units.
839 ************************************************************************/
840 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)841 ixgbe_initialize_transmit_units(if_ctx_t ctx)
842 {
843 struct ixgbe_softc *sc = iflib_get_softc(ctx);
844 struct ixgbe_hw *hw = &sc->hw;
845 if_softc_ctx_t scctx = sc->shared;
846 struct ix_tx_queue *que;
847 int i;
848
849 /* Setup the Base and Length of the Tx Descriptor Ring */
850 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
851 i++, que++) {
852 struct tx_ring *txr = &que->txr;
853 u64 tdba = txr->tx_paddr;
854 u32 txctrl = 0;
855 int j = txr->me;
856
857 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
858 (tdba & 0x00000000ffffffffULL));
859 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
860 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
861 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
862
863 /* Setup the HW Tx Head and Tail descriptor pointers */
864 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
865 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
866
867 /* Cache the tail address */
868 txr->tail = IXGBE_TDT(txr->me);
869
870 txr->tx_rs_cidx = txr->tx_rs_pidx;
871 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
872 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
873 txr->tx_rsq[k] = QIDX_INVALID;
874
875 /* Disable Head Writeback */
876 /*
877 * Note: for X550 series devices, these registers are actually
878 * prefixed with TPH_ isntead of DCA_, but the addresses and
879 * fields remain the same.
880 */
881 switch (hw->mac.type) {
882 case ixgbe_mac_82598EB:
883 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
884 break;
885 default:
886 txctrl =
887 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
888 break;
889 }
890 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
891 switch (hw->mac.type) {
892 case ixgbe_mac_82598EB:
893 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
894 break;
895 default:
896 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
897 txctrl);
898 break;
899 }
900
901 }
902
903 if (hw->mac.type != ixgbe_mac_82598EB) {
904 u32 dmatxctl, rttdcs;
905
906 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
907 dmatxctl |= IXGBE_DMATXCTL_TE;
908 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
909 /* Disable arbiter to set MTQC */
910 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
911 rttdcs |= IXGBE_RTTDCS_ARBDIS;
912 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
913 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
914 ixgbe_get_mtqc(sc->iov_mode));
915 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
916 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
917 }
918
919 } /* ixgbe_initialize_transmit_units */
920
921 static int
ixgbe_check_fw_api_version(struct ixgbe_softc * sc)922 ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
923 {
924 struct ixgbe_hw *hw = &sc->hw;
925 if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
926 device_printf(sc->dev,
927 "The driver for the device stopped because the NVM "
928 "image is newer than expected. You must install the "
929 "most recent version of the network driver.\n");
930 return (EOPNOTSUPP);
931 } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
932 hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
933 device_printf(sc->dev,
934 "The driver for the device detected a newer version of "
935 "the NVM image than expected. Please install the most "
936 "recent version of the network driver.\n");
937 } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
938 hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
939 device_printf(sc->dev,
940 "The driver for the device detected an older version "
941 "of the NVM image than expected. "
942 "Please update the NVM image.\n");
943 }
944 return (0);
945 }
946
947 /************************************************************************
948 * ixgbe_register
949 ************************************************************************/
950 static void *
ixgbe_register(device_t dev)951 ixgbe_register(device_t dev)
952 {
953 return (&ixgbe_sctx_init);
954 } /* ixgbe_register */
955
956 /************************************************************************
957 * ixgbe_if_attach_pre - Device initialization routine, part 1
958 *
959 * Called when the driver is being loaded.
960 * Identifies the type of hardware, initializes the hardware,
961 * and initializes iflib structures.
962 *
963 * return 0 on success, positive on failure
964 ************************************************************************/
965 static int
ixgbe_if_attach_pre(if_ctx_t ctx)966 ixgbe_if_attach_pre(if_ctx_t ctx)
967 {
968 struct ixgbe_softc *sc;
969 device_t dev;
970 if_softc_ctx_t scctx;
971 struct ixgbe_hw *hw;
972 int error = 0;
973 u32 ctrl_ext;
974 size_t i;
975
976 INIT_DEBUGOUT("ixgbe_attach: begin");
977
978 /* Allocate, clear, and link in our adapter structure */
979 dev = iflib_get_dev(ctx);
980 sc = iflib_get_softc(ctx);
981 sc->hw.back = sc;
982 sc->ctx = ctx;
983 sc->dev = dev;
984 scctx = sc->shared = iflib_get_softc_ctx(ctx);
985 sc->media = iflib_get_media(ctx);
986 hw = &sc->hw;
987
988 /* Determine hardware revision */
989 hw->vendor_id = pci_get_vendor(dev);
990 hw->device_id = pci_get_device(dev);
991 hw->revision_id = pci_get_revid(dev);
992 hw->subsystem_vendor_id = pci_get_subvendor(dev);
993 hw->subsystem_device_id = pci_get_subdevice(dev);
994
995 /* Do base PCI setup - map BAR0 */
996 if (ixgbe_allocate_pci_resources(ctx)) {
997 device_printf(dev, "Allocation of PCI resources failed\n");
998 return (ENXIO);
999 }
1000
1001 /* let hardware know driver is loaded */
1002 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1003 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1004 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1005
1006 /*
1007 * Initialize the shared code
1008 */
1009 if (ixgbe_init_shared_code(hw) != 0) {
1010 device_printf(dev, "Unable to initialize the shared code\n");
1011 error = ENXIO;
1012 goto err_pci;
1013 }
1014
1015 if (hw->mac.type == ixgbe_mac_E610)
1016 ixgbe_init_aci(hw);
1017
1018 if (hw->mac.ops.fw_recovery_mode &&
1019 hw->mac.ops.fw_recovery_mode(hw)) {
1020 device_printf(dev,
1021 "Firmware recovery mode detected. Limiting "
1022 "functionality.\nRefer to the Intel(R) Ethernet Adapters "
1023 "and Devices User Guide for details on firmware recovery "
1024 "mode.");
1025 error = ENOSYS;
1026 goto err_pci;
1027 }
1028
1029 /* 82598 Does not support SR-IOV, initialize everything else */
1030 if (hw->mac.type >= ixgbe_mac_82599_vf) {
1031 for (i = 0; i < sc->num_vfs; i++)
1032 hw->mbx.ops[i].init_params(hw);
1033 }
1034
1035 hw->allow_unsupported_sfp = allow_unsupported_sfp;
1036
1037 if (hw->mac.type != ixgbe_mac_82598EB)
1038 hw->phy.smart_speed = ixgbe_smart_speed;
1039
1040 ixgbe_init_device_features(sc);
1041
1042 /* Enable WoL (if supported) */
1043 ixgbe_check_wol_support(sc);
1044
1045 /* Verify adapter fan is still functional (if applicable) */
1046 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1047 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1048 ixgbe_check_fan_failure(sc, esdp, false);
1049 }
1050
1051 /* Ensure SW/FW semaphore is free */
1052 ixgbe_init_swfw_semaphore(hw);
1053
1054 /* Set an initial default flow control value */
1055 hw->fc.requested_mode = ixgbe_flow_control;
1056
1057 hw->phy.reset_if_overtemp = true;
1058 error = ixgbe_reset_hw(hw);
1059 hw->phy.reset_if_overtemp = false;
1060 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
1061 /*
1062 * No optics in this port, set up
1063 * so the timer routine will probe
1064 * for later insertion.
1065 */
1066 sc->sfp_probe = true;
1067 error = 0;
1068 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1069 device_printf(dev, "Unsupported SFP+ module detected!\n");
1070 error = EIO;
1071 goto err_pci;
1072 } else if (error) {
1073 device_printf(dev, "Hardware initialization failed\n");
1074 error = EIO;
1075 goto err_pci;
1076 }
1077
1078 /* Make sure we have a good EEPROM before we read from it */
1079 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1080 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
1081 error = EIO;
1082 goto err_pci;
1083 }
1084
1085 error = ixgbe_start_hw(hw);
1086 switch (error) {
1087 case IXGBE_ERR_EEPROM_VERSION:
1088 device_printf(dev,
1089 "This device is a pre-production adapter/LOM. Please be"
1090 " aware there may be issues associated with your"
1091 " hardware.\nIf you are experiencing problems please"
1092 " contact your Intel or hardware representative who"
1093 " provided you with this hardware.\n");
1094 break;
1095 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1096 device_printf(dev, "Unsupported SFP+ Module\n");
1097 error = EIO;
1098 goto err_pci;
1099 case IXGBE_ERR_SFP_NOT_PRESENT:
1100 device_printf(dev, "No SFP+ Module found\n");
1101 /* falls thru */
1102 default:
1103 break;
1104 }
1105
1106 /* Check the FW API version */
1107 if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
1108 error = EIO;
1109 goto err_pci;
1110 }
1111
1112 /* Most of the iflib initialization... */
1113
1114 iflib_set_mac(ctx, hw->mac.addr);
1115 switch (sc->hw.mac.type) {
1116 case ixgbe_mac_X550:
1117 case ixgbe_mac_X550EM_x:
1118 case ixgbe_mac_X550EM_a:
1119 scctx->isc_rss_table_size = 512;
1120 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1121 break;
1122 default:
1123 scctx->isc_rss_table_size = 128;
1124 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1125 }
1126
1127 /* Allow legacy interrupts */
1128 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1129
1130 scctx->isc_txqsizes[0] =
1131 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1132 sizeof(u32), DBA_ALIGN),
1133 scctx->isc_rxqsizes[0] =
1134 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1135 DBA_ALIGN);
1136
1137 /* XXX */
1138 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1139 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1140 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1141 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1142 } else {
1143 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1144 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1145 }
1146
1147 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1148
1149 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1150 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1151 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1152
1153 scctx->isc_txrx = &ixgbe_txrx;
1154
1155 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1156
1157 return (0);
1158
1159 err_pci:
1160 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1161 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1162 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1163 ixgbe_free_pci_resources(ctx);
1164
1165 if (hw->mac.type == ixgbe_mac_E610)
1166 ixgbe_shutdown_aci(hw);
1167
1168 return (error);
1169 } /* ixgbe_if_attach_pre */
1170
1171 /*********************************************************************
1172 * ixgbe_if_attach_post - Device initialization routine, part 2
1173 *
1174 * Called during driver load, but after interrupts and
1175 * resources have been allocated and configured.
1176 * Sets up some data structures not relevant to iflib.
1177 *
1178 * return 0 on success, positive on failure
1179 *********************************************************************/
1180 static int
ixgbe_if_attach_post(if_ctx_t ctx)1181 ixgbe_if_attach_post(if_ctx_t ctx)
1182 {
1183 device_t dev;
1184 struct ixgbe_softc *sc;
1185 struct ixgbe_hw *hw;
1186 int error = 0;
1187
1188 dev = iflib_get_dev(ctx);
1189 sc = iflib_get_softc(ctx);
1190 hw = &sc->hw;
1191
1192 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1193 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1194 device_printf(dev, "Device does not support legacy interrupts");
1195 error = ENXIO;
1196 goto err;
1197 }
1198
1199 /* Allocate multicast array memory. */
1200 sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
1201 M_IXGBE, M_NOWAIT);
1202 if (sc->mta == NULL) {
1203 device_printf(dev,
1204 "Can not allocate multicast setup array\n");
1205 error = ENOMEM;
1206 goto err;
1207 }
1208
1209 /* hw.ix defaults init */
1210 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1211
1212 /* Enable the optics for 82599 SFP+ fiber */
1213 ixgbe_enable_tx_laser(hw);
1214
1215 /* Enable power to the phy. */
1216 ixgbe_set_phy_power(hw, true);
1217
1218 ixgbe_initialize_iov(sc);
1219
1220 error = ixgbe_setup_interface(ctx);
1221 if (error) {
1222 device_printf(dev, "Interface setup failed: %d\n", error);
1223 goto err;
1224 }
1225
1226 ixgbe_if_update_admin_status(ctx);
1227
1228 /* Initialize statistics */
1229 ixgbe_update_stats_counters(sc);
1230 ixgbe_add_hw_stats(sc);
1231
1232 /* Check PCIE slot type/speed/width */
1233 ixgbe_get_slot_info(sc);
1234
1235 /*
1236 * Do time init and sysctl init here, but
1237 * only on the first port of a bypass sc.
1238 */
1239 ixgbe_bypass_init(sc);
1240
1241 /* Display NVM and Option ROM versions */
1242 ixgbe_print_fw_version(ctx);
1243
1244 /* Set an initial dmac value */
1245 sc->dmac = 0;
1246 /* Set initial advertised speeds (if applicable) */
1247 sc->advertise = ixgbe_get_default_advertise(sc);
1248
1249 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1250 ixgbe_define_iov_schemas(dev, &error);
1251
1252 /* Add sysctls */
1253 ixgbe_add_device_sysctls(ctx);
1254
1255 /* Init recovery mode timer and state variable */
1256 if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1257 sc->recovery_mode = 0;
1258
1259 /* Set up the timer callout */
1260 callout_init(&sc->fw_mode_timer, true);
1261
1262 /* Start the task */
1263 callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1264 }
1265
1266 return (0);
1267 err:
1268 return (error);
1269 } /* ixgbe_if_attach_post */
1270
1271 /************************************************************************
1272 * ixgbe_check_wol_support
1273 *
1274 * Checks whether the adapter's ports are capable of
1275 * Wake On LAN by reading the adapter's NVM.
1276 *
1277 * Sets each port's hw->wol_enabled value depending
1278 * on the value read here.
1279 ************************************************************************/
1280 static void
ixgbe_check_wol_support(struct ixgbe_softc * sc)1281 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1282 {
1283 struct ixgbe_hw *hw = &sc->hw;
1284 u16 dev_caps = 0;
1285
1286 /* Find out WoL support for port */
1287 sc->wol_support = hw->wol_enabled = 0;
1288 ixgbe_get_device_caps(hw, &dev_caps);
1289 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1290 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1291 hw->bus.func == 0))
1292 sc->wol_support = hw->wol_enabled = 1;
1293
1294 /* Save initial wake up filter configuration */
1295 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1296
1297 return;
1298 } /* ixgbe_check_wol_support */
1299
1300 /************************************************************************
1301 * ixgbe_setup_interface
1302 *
1303 * Setup networking device structure and register an interface.
1304 ************************************************************************/
1305 static int
ixgbe_setup_interface(if_ctx_t ctx)1306 ixgbe_setup_interface(if_ctx_t ctx)
1307 {
1308 if_t ifp = iflib_get_ifp(ctx);
1309 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1310
1311 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1312
1313 if_setbaudrate(ifp, IF_Gbps(10));
1314
1315 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1316
1317 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1318
1319 ixgbe_add_media_types(ctx);
1320
1321 /* Autoselect media by default */
1322 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1323
1324 return (0);
1325 } /* ixgbe_setup_interface */
1326
1327 /************************************************************************
1328 * ixgbe_if_get_counter
1329 ************************************************************************/
1330 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1331 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1332 {
1333 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1334 if_t ifp = iflib_get_ifp(ctx);
1335
1336 switch (cnt) {
1337 case IFCOUNTER_IPACKETS:
1338 return (sc->ipackets);
1339 case IFCOUNTER_OPACKETS:
1340 return (sc->opackets);
1341 case IFCOUNTER_IBYTES:
1342 return (sc->ibytes);
1343 case IFCOUNTER_OBYTES:
1344 return (sc->obytes);
1345 case IFCOUNTER_IMCASTS:
1346 return (sc->imcasts);
1347 case IFCOUNTER_OMCASTS:
1348 return (sc->omcasts);
1349 case IFCOUNTER_COLLISIONS:
1350 return (0);
1351 case IFCOUNTER_IQDROPS:
1352 return (sc->iqdrops);
1353 case IFCOUNTER_IERRORS:
1354 return (sc->ierrors);
1355 default:
1356 return (if_get_counter_default(ifp, cnt));
1357 }
1358 } /* ixgbe_if_get_counter */
1359
1360 /************************************************************************
1361 * ixgbe_if_i2c_req
1362 ************************************************************************/
1363 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1364 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1365 {
1366 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1367 struct ixgbe_hw *hw = &sc->hw;
1368 int i;
1369
1370 if (hw->phy.ops.read_i2c_byte == NULL)
1371 return (ENXIO);
1372 for (i = 0; i < req->len; i++)
1373 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1374 req->dev_addr, &req->data[i]);
1375 return (0);
1376 } /* ixgbe_if_i2c_req */
1377
1378 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
1379 * reinitialized
1380 * @ctx: iflib context
1381 * @event: event code to check
1382 *
1383 * Defaults to returning false for unknown events.
1384 *
1385 * @returns true if iflib needs to reinit the interface
1386 */
1387 static bool
ixgbe_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1388 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1389 {
1390 switch (event) {
1391 case IFLIB_RESTART_VLAN_CONFIG:
1392 default:
1393 return (false);
1394 }
1395 }
1396
1397 /************************************************************************
1398 * ixgbe_add_media_types
1399 ************************************************************************/
1400 static void
ixgbe_add_media_types(if_ctx_t ctx)1401 ixgbe_add_media_types(if_ctx_t ctx)
1402 {
1403 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1404 struct ixgbe_hw *hw = &sc->hw;
1405 device_t dev = iflib_get_dev(ctx);
1406 u64 layer;
1407
1408 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1409
1410 /* Media types with matching FreeBSD media defines */
1411 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1412 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1413 if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
1414 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1415 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1416 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1417 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1418 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1419 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1420 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1421 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1422 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1423
1424 if (hw->mac.type == ixgbe_mac_X550) {
1425 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1426 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1427 }
1428
1429 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1430 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1431 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1432 NULL);
1433 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1434 }
1435
1436 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1437 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1438 if (hw->phy.multispeed_fiber)
1439 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1440 NULL);
1441 }
1442 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1443 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1444 if (hw->phy.multispeed_fiber)
1445 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1446 NULL);
1447 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1448 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1449 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1450 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1451
1452 #ifdef IFM_ETH_XTYPE
1453 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1454 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1455 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1456 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1457 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1458 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1459 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1460 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1461 #else
1462 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1463 device_printf(dev, "Media supported: 10GbaseKR\n");
1464 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1465 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1466 }
1467 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1468 device_printf(dev, "Media supported: 10GbaseKX4\n");
1469 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1470 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1471 }
1472 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1473 device_printf(dev, "Media supported: 1000baseKX\n");
1474 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1475 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1476 }
1477 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1478 device_printf(dev, "Media supported: 2500baseKX\n");
1479 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1480 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1481 }
1482 #endif
1483 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
1484 device_printf(dev, "Media supported: 1000baseBX\n");
1485 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_BX, 0, NULL);
1486 }
1487
1488 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1489 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1490 0, NULL);
1491 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1492 }
1493
1494 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1495 } /* ixgbe_add_media_types */
1496
1497 /************************************************************************
1498 * ixgbe_is_sfp
1499 ************************************************************************/
1500 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1501 ixgbe_is_sfp(struct ixgbe_hw *hw)
1502 {
1503 switch (hw->mac.type) {
1504 case ixgbe_mac_82598EB:
1505 if (hw->phy.type == ixgbe_phy_nl)
1506 return (true);
1507 return (false);
1508 case ixgbe_mac_82599EB:
1509 switch (hw->mac.ops.get_media_type(hw)) {
1510 case ixgbe_media_type_fiber:
1511 case ixgbe_media_type_fiber_qsfp:
1512 return (true);
1513 default:
1514 return (false);
1515 }
1516 case ixgbe_mac_X550EM_x:
1517 case ixgbe_mac_X550EM_a:
1518 case ixgbe_mac_E610:
1519 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1520 return (true);
1521 return (false);
1522 default:
1523 return (false);
1524 }
1525 } /* ixgbe_is_sfp */
1526
1527 /************************************************************************
1528 * ixgbe_config_link
1529 ************************************************************************/
1530 static void
ixgbe_config_link(if_ctx_t ctx)1531 ixgbe_config_link(if_ctx_t ctx)
1532 {
1533 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1534 struct ixgbe_hw *hw = &sc->hw;
1535 u32 autoneg, err = 0;
1536 bool sfp, negotiate;
1537
1538 sfp = ixgbe_is_sfp(hw);
1539
1540 if (sfp) {
1541 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1542 iflib_admin_intr_deferred(ctx);
1543 } else {
1544 if (hw->mac.ops.check_link)
1545 err = ixgbe_check_link(hw, &sc->link_speed,
1546 &sc->link_up, false);
1547 if (err)
1548 return;
1549 autoneg = hw->phy.autoneg_advertised;
1550 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1551 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1552 &negotiate);
1553 if (err)
1554 return;
1555
1556 if (hw->mac.type == ixgbe_mac_X550 &&
1557 hw->phy.autoneg_advertised == 0) {
1558 /*
1559 * 2.5G and 5G autonegotiation speeds on X550
1560 * are disabled by default due to reported
1561 * interoperability issues with some switches.
1562 *
1563 * The second condition checks if any operations
1564 * involving setting autonegotiation speeds have
1565 * been performed prior to this ixgbe_config_link()
1566 * call.
1567 *
1568 * If hw->phy.autoneg_advertised does not
1569 * equal 0, this means that the user might have
1570 * set autonegotiation speeds via the sysctl
1571 * before bringing the interface up. In this
1572 * case, we should not disable 2.5G and 5G
1573 * since that speeds might be selected by the
1574 * user.
1575 *
1576 * Otherwise (i.e. if hw->phy.autoneg_advertised
1577 * is set to 0), it is the first time we set
1578 * autonegotiation preferences and the default
1579 * set of speeds should exclude 2.5G and 5G.
1580 */
1581 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1582 IXGBE_LINK_SPEED_5GB_FULL);
1583 }
1584
1585 if (hw->mac.type == ixgbe_mac_E610) {
1586 hw->phy.ops.init(hw);
1587 err = ixgbe_enable_lse(sc);
1588 if (err)
1589 device_printf(sc->dev,
1590 "Failed to enable Link Status Event, "
1591 "error: %d", err);
1592 }
1593
1594 if (hw->mac.ops.setup_link)
1595 err = hw->mac.ops.setup_link(hw, autoneg,
1596 sc->link_up);
1597 }
1598 } /* ixgbe_config_link */
1599
1600 /************************************************************************
1601 * ixgbe_update_stats_counters - Update board statistics counters.
1602 ************************************************************************/
1603 static void
ixgbe_update_stats_counters(struct ixgbe_softc * sc)1604 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1605 {
1606 struct ixgbe_hw *hw = &sc->hw;
1607 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1608 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1609 u32 lxoffrxc;
1610 u64 total_missed_rx = 0;
1611
1612 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1613 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1614 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1615 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1616 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1617
1618 for (int i = 0; i < 16; i++) {
1619 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1620 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1621 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1622 }
1623 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1624 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1625 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1626
1627 /* Hardware workaround, gprc counts missed packets */
1628 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1629 stats->gprc -= missed_rx;
1630
1631 if (hw->mac.type != ixgbe_mac_82598EB) {
1632 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1633 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1634 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1635 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1636 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1637 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1638 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1639 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1640 stats->lxoffrxc += lxoffrxc;
1641 } else {
1642 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1643 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1644 stats->lxoffrxc += lxoffrxc;
1645 /* 82598 only has a counter in the high register */
1646 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1647 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1648 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1649 }
1650
1651 /*
1652 * For watchdog management we need to know if we have been paused
1653 * during the last interval, so capture that here.
1654 */
1655 if (lxoffrxc)
1656 sc->shared->isc_pause_frames = 1;
1657
1658 /*
1659 * Workaround: mprc hardware is incorrectly counting
1660 * broadcasts, so for now we subtract those.
1661 */
1662 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1663 stats->bprc += bprc;
1664 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1665 if (hw->mac.type == ixgbe_mac_82598EB)
1666 stats->mprc -= bprc;
1667
1668 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1669 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1670 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1671 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1672 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1673 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1674
1675 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1676 stats->lxontxc += lxon;
1677 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1678 stats->lxofftxc += lxoff;
1679 total = lxon + lxoff;
1680
1681 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1682 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1683 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1684 stats->gptc -= total;
1685 stats->mptc -= total;
1686 stats->ptc64 -= total;
1687 stats->gotc -= total * ETHER_MIN_LEN;
1688
1689 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1690 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1691 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1692 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1693 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1694 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1695 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1696 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1697 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1698 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1699 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1700 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1701 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1702 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1703 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1704 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1705 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1706 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1707 /* Only read FCOE on 82599 */
1708 if (hw->mac.type != ixgbe_mac_82598EB) {
1709 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1710 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1711 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1712 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1713 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1714 }
1715
1716 /* Fill out the OS statistics structure */
1717 IXGBE_SET_IPACKETS(sc, stats->gprc);
1718 IXGBE_SET_OPACKETS(sc, stats->gptc);
1719 IXGBE_SET_IBYTES(sc, stats->gorc);
1720 IXGBE_SET_OBYTES(sc, stats->gotc);
1721 IXGBE_SET_IMCASTS(sc, stats->mprc);
1722 IXGBE_SET_OMCASTS(sc, stats->mptc);
1723 IXGBE_SET_COLLISIONS(sc, 0);
1724 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1725
1726 /*
1727 * Aggregate following types of errors as RX errors:
1728 * - CRC error count,
1729 * - illegal byte error count,
1730 * - missed packets count,
1731 * - length error count,
1732 * - undersized packets count,
1733 * - fragmented packets count,
1734 * - oversized packets count,
1735 * - jabber count.
1736 */
1737 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1738 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
1739 stats->roc + stats->rjc);
1740 } /* ixgbe_update_stats_counters */
1741
1742 /************************************************************************
1743 * ixgbe_add_hw_stats
1744 *
1745 * Add sysctl variables, one per statistic, to the system.
1746 ************************************************************************/
1747 static void
ixgbe_add_hw_stats(struct ixgbe_softc * sc)1748 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1749 {
1750 device_t dev = iflib_get_dev(sc->ctx);
1751 struct ix_rx_queue *rx_que;
1752 struct ix_tx_queue *tx_que;
1753 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1754 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1755 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1756 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1757 struct sysctl_oid *stat_node, *queue_node;
1758 struct sysctl_oid_list *stat_list, *queue_list;
1759 int i;
1760
1761 #define QUEUE_NAME_LEN 32
1762 char namebuf[QUEUE_NAME_LEN];
1763
1764 /* Driver Statistics */
1765 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1766 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1767 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1768 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1769 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1770 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1771
1772 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
1773 i++, tx_que++) {
1774 struct tx_ring *txr = &tx_que->txr;
1775 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1776 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1777 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1778 queue_list = SYSCTL_CHILDREN(queue_node);
1779
1780 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1781 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1782 ixgbe_sysctl_tdh_handler, "IU",
1783 "Transmit Descriptor Head");
1784 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1785 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1786 ixgbe_sysctl_tdt_handler, "IU",
1787 "Transmit Descriptor Tail");
1788 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1789 CTLFLAG_RD, &txr->tso_tx, "TSO");
1790 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1791 CTLFLAG_RD, &txr->total_packets,
1792 "Queue Packets Transmitted");
1793 }
1794
1795 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
1796 i++, rx_que++) {
1797 struct rx_ring *rxr = &rx_que->rxr;
1798 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1799 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1800 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1801 queue_list = SYSCTL_CHILDREN(queue_node);
1802
1803 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1804 CTLTYPE_UINT | CTLFLAG_RW,
1805 &sc->rx_queues[i], 0,
1806 ixgbe_sysctl_interrupt_rate_handler, "IU",
1807 "Interrupt Rate");
1808 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1809 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1810 "irqs on this queue");
1811 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1812 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1813 ixgbe_sysctl_rdh_handler, "IU",
1814 "Receive Descriptor Head");
1815 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1816 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1817 ixgbe_sysctl_rdt_handler, "IU",
1818 "Receive Descriptor Tail");
1819 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1820 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1821 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1822 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1823 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1824 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1825 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1826 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1827 }
1828
1829 /* MAC stats get their own sub node */
1830 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1831 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1832 stat_list = SYSCTL_CHILDREN(stat_node);
1833
1834 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1835 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1836 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1837 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1838 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1839 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1840 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1841 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1842 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1843 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1844 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1845 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1846 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1847 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1848 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1849 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1850 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1851 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1852
1853 /* Flow Control stats */
1854 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1855 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1856 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1857 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1858 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1859 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1860 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1861 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1862
1863 /* Packet Reception Stats */
1864 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1865 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1866 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1867 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1868 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1869 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1870 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1871 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1872 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1873 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1874 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1875 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1876 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1877 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1878 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1879 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1880 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1881 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1882 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1883 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1884 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1885 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1886 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1887 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1888 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1889 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1890 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1891 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1892 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1893 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1894 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1895 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1896 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1897 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1898 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1899 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1900 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1901 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1902
1903 /* Packet Transmission Stats */
1904 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1905 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1906 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1907 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1908 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1909 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1910 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1911 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1912 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1913 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1914 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1915 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1916 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1917 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1918 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1919 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1920 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1921 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1922 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1923 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1924 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1925 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1926 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1927 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1928 } /* ixgbe_add_hw_stats */
1929
1930 /************************************************************************
1931 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1932 *
1933 * Retrieves the TDH value from the hardware
1934 ************************************************************************/
1935 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)1936 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1937 {
1938 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1939 int error;
1940 unsigned int val;
1941
1942 if (!txr)
1943 return (0);
1944
1945
1946 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1947 return (EPERM);
1948
1949 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1950 error = sysctl_handle_int(oidp, &val, 0, req);
1951 if (error || !req->newptr)
1952 return error;
1953
1954 return (0);
1955 } /* ixgbe_sysctl_tdh_handler */
1956
1957 /************************************************************************
1958 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1959 *
1960 * Retrieves the TDT value from the hardware
1961 ************************************************************************/
1962 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)1963 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1964 {
1965 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1966 int error;
1967 unsigned int val;
1968
1969 if (!txr)
1970 return (0);
1971
1972 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1973 return (EPERM);
1974
1975 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1976 error = sysctl_handle_int(oidp, &val, 0, req);
1977 if (error || !req->newptr)
1978 return error;
1979
1980 return (0);
1981 } /* ixgbe_sysctl_tdt_handler */
1982
1983 /************************************************************************
1984 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1985 *
1986 * Retrieves the RDH value from the hardware
1987 ************************************************************************/
1988 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)1989 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1990 {
1991 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1992 int error;
1993 unsigned int val;
1994
1995 if (!rxr)
1996 return (0);
1997
1998 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1999 return (EPERM);
2000
2001 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
2002 error = sysctl_handle_int(oidp, &val, 0, req);
2003 if (error || !req->newptr)
2004 return error;
2005
2006 return (0);
2007 } /* ixgbe_sysctl_rdh_handler */
2008
2009 /************************************************************************
2010 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2011 *
2012 * Retrieves the RDT value from the hardware
2013 ************************************************************************/
2014 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)2015 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
2016 {
2017 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
2018 int error;
2019 unsigned int val;
2020
2021 if (!rxr)
2022 return (0);
2023
2024 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
2025 return (EPERM);
2026
2027 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
2028 error = sysctl_handle_int(oidp, &val, 0, req);
2029 if (error || !req->newptr)
2030 return error;
2031
2032 return (0);
2033 } /* ixgbe_sysctl_rdt_handler */
2034
2035 /************************************************************************
2036 * ixgbe_if_vlan_register
2037 *
2038 * Run via vlan config EVENT, it enables us to use the
2039 * HW Filter table since we can get the vlan id. This
2040 * just creates the entry in the soft version of the
2041 * VFTA, init will repopulate the real table.
2042 ************************************************************************/
2043 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)2044 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
2045 {
2046 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2047 u16 index, bit;
2048
2049 index = (vtag >> 5) & 0x7F;
2050 bit = vtag & 0x1F;
2051 sc->shadow_vfta[index] |= (1 << bit);
2052 ++sc->num_vlans;
2053 ixgbe_setup_vlan_hw_support(ctx);
2054 } /* ixgbe_if_vlan_register */
2055
2056 /************************************************************************
2057 * ixgbe_if_vlan_unregister
2058 *
2059 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2060 ************************************************************************/
2061 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)2062 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
2063 {
2064 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2065 u16 index, bit;
2066
2067 index = (vtag >> 5) & 0x7F;
2068 bit = vtag & 0x1F;
2069 sc->shadow_vfta[index] &= ~(1 << bit);
2070 --sc->num_vlans;
2071 /* Re-init to load the changes */
2072 ixgbe_setup_vlan_hw_support(ctx);
2073 } /* ixgbe_if_vlan_unregister */
2074
2075 /************************************************************************
2076 * ixgbe_setup_vlan_hw_support
2077 ************************************************************************/
2078 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)2079 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
2080 {
2081 if_t ifp = iflib_get_ifp(ctx);
2082 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2083 struct ixgbe_hw *hw = &sc->hw;
2084 struct rx_ring *rxr;
2085 int i;
2086 u32 ctrl;
2087
2088
2089 /*
2090 * We get here thru init_locked, meaning
2091 * a soft reset, this has already cleared
2092 * the VFTA and other state, so if there
2093 * have been no vlan's registered do nothing.
2094 */
2095 if (sc->num_vlans == 0 ||
2096 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
2097 /* Clear the vlan hw flag */
2098 for (i = 0; i < sc->num_rx_queues; i++) {
2099 rxr = &sc->rx_queues[i].rxr;
2100 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2101 if (hw->mac.type != ixgbe_mac_82598EB) {
2102 ctrl = IXGBE_READ_REG(hw,
2103 IXGBE_RXDCTL(rxr->me));
2104 ctrl &= ~IXGBE_RXDCTL_VME;
2105 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2106 ctrl);
2107 }
2108 rxr->vtag_strip = false;
2109 }
2110 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2111 /* Enable the Filter Table if enabled */
2112 ctrl |= IXGBE_VLNCTRL_CFIEN;
2113 ctrl &= ~IXGBE_VLNCTRL_VFE;
2114 if (hw->mac.type == ixgbe_mac_82598EB)
2115 ctrl &= ~IXGBE_VLNCTRL_VME;
2116 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2117 return;
2118 }
2119
2120 /* Setup the queues for vlans */
2121 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
2122 for (i = 0; i < sc->num_rx_queues; i++) {
2123 rxr = &sc->rx_queues[i].rxr;
2124 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2125 if (hw->mac.type != ixgbe_mac_82598EB) {
2126 ctrl = IXGBE_READ_REG(hw,
2127 IXGBE_RXDCTL(rxr->me));
2128 ctrl |= IXGBE_RXDCTL_VME;
2129 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2130 ctrl);
2131 }
2132 rxr->vtag_strip = true;
2133 }
2134 }
2135
2136 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2137 return;
2138 /*
2139 * A soft reset zero's out the VFTA, so
2140 * we need to repopulate it now.
2141 */
2142 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2143 if (sc->shadow_vfta[i] != 0)
2144 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2145 sc->shadow_vfta[i]);
2146
2147 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2148 /* Enable the Filter Table if enabled */
2149 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2150 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2151 ctrl |= IXGBE_VLNCTRL_VFE;
2152 }
2153 if (hw->mac.type == ixgbe_mac_82598EB)
2154 ctrl |= IXGBE_VLNCTRL_VME;
2155 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2156 } /* ixgbe_setup_vlan_hw_support */
2157
2158 /************************************************************************
2159 * ixgbe_get_slot_info
2160 *
2161 * Get the width and transaction speed of
2162 * the slot this adapter is plugged into.
2163 ************************************************************************/
2164 static void
ixgbe_get_slot_info(struct ixgbe_softc * sc)2165 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2166 {
2167 device_t dev = iflib_get_dev(sc->ctx);
2168 struct ixgbe_hw *hw = &sc->hw;
2169 int bus_info_valid = true;
2170 u32 offset;
2171 u16 link;
2172
2173 /* Some devices are behind an internal bridge */
2174 switch (hw->device_id) {
2175 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2176 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2177 goto get_parent_info;
2178 default:
2179 break;
2180 }
2181
2182 ixgbe_get_bus_info(hw);
2183
2184 /*
2185 * Some devices don't use PCI-E, but there is no need
2186 * to display "Unknown" for bus speed and width.
2187 */
2188 switch (hw->mac.type) {
2189 case ixgbe_mac_X550EM_x:
2190 case ixgbe_mac_X550EM_a:
2191 return;
2192 default:
2193 goto display;
2194 }
2195
2196 get_parent_info:
2197 /*
2198 * For the Quad port adapter we need to parse back
2199 * up the PCI tree to find the speed of the expansion
2200 * slot into which this adapter is plugged. A bit more work.
2201 */
2202 dev = device_get_parent(device_get_parent(dev));
2203 #ifdef IXGBE_DEBUG
2204 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2205 pci_get_slot(dev), pci_get_function(dev));
2206 #endif
2207 dev = device_get_parent(device_get_parent(dev));
2208 #ifdef IXGBE_DEBUG
2209 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2210 pci_get_slot(dev), pci_get_function(dev));
2211 #endif
2212 /* Now get the PCI Express Capabilities offset */
2213 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2214 /*
2215 * Hmm...can't get PCI-Express capabilities.
2216 * Falling back to default method.
2217 */
2218 bus_info_valid = false;
2219 ixgbe_get_bus_info(hw);
2220 goto display;
2221 }
2222 /* ...and read the Link Status Register */
2223 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2224 ixgbe_set_pci_config_data_generic(hw, link);
2225
2226 display:
2227 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2228 ((hw->bus.speed == ixgbe_bus_speed_16000) ? "16.0GT/s" :
2229 (hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2230 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2231 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2232 "Unknown"),
2233 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2234 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2235 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2236 "Unknown"));
2237
2238 if (bus_info_valid) {
2239 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2240 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2241 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2242 device_printf(dev,
2243 "PCI-Express bandwidth available for this card"
2244 " is not sufficient for optimal performance.\n");
2245 device_printf(dev,
2246 "For optimal performance a x8 PCIE, or x4 PCIE"
2247 " Gen2 slot is required.\n");
2248 }
2249 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2250 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2251 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2252 device_printf(dev,
2253 "PCI-Express bandwidth available for this card"
2254 " is not sufficient for optimal performance.\n");
2255 device_printf(dev,
2256 "For optimal performance a x8 PCIE Gen3 slot is"
2257 " required.\n");
2258 }
2259 } else
2260 device_printf(dev,
2261 "Unable to determine slot speed/width. The speed/width"
2262 " reported are that of the internal switch.\n");
2263
2264 return;
2265 } /* ixgbe_get_slot_info */
2266
2267 /************************************************************************
2268 * ixgbe_if_msix_intr_assign
2269 *
2270 * Setup MSI-X Interrupt resources and handlers
2271 ************************************************************************/
2272 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)2273 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2274 {
2275 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2276 struct ix_rx_queue *rx_que = sc->rx_queues;
2277 struct ix_tx_queue *tx_que;
2278 int error, rid, vector = 0;
2279 char buf[16];
2280
2281 /* Admin Que is vector 0*/
2282 rid = vector + 1;
2283 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2284 rid = vector + 1;
2285
2286 snprintf(buf, sizeof(buf), "rxq%d", i);
2287 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2288 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
2289 buf);
2290
2291 if (error) {
2292 device_printf(iflib_get_dev(ctx),
2293 "Failed to allocate que int %d err: %d",
2294 i,error);
2295 sc->num_rx_queues = i + 1;
2296 goto fail;
2297 }
2298
2299 rx_que->msix = vector;
2300 }
2301 for (int i = 0; i < sc->num_tx_queues; i++) {
2302 snprintf(buf, sizeof(buf), "txq%d", i);
2303 tx_que = &sc->tx_queues[i];
2304 tx_que->msix = i % sc->num_rx_queues;
2305 iflib_softirq_alloc_generic(ctx,
2306 &sc->rx_queues[tx_que->msix].que_irq,
2307 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2308 }
2309 rid = vector + 1;
2310 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2311 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2312 if (error) {
2313 device_printf(iflib_get_dev(ctx),
2314 "Failed to register admin handler");
2315 return (error);
2316 }
2317
2318 sc->vector = vector;
2319
2320 return (0);
2321 fail:
2322 iflib_irq_free(ctx, &sc->irq);
2323 rx_que = sc->rx_queues;
2324 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2325 iflib_irq_free(ctx, &rx_que->que_irq);
2326
2327 return (error);
2328 } /* ixgbe_if_msix_intr_assign */
2329
2330 static inline void
ixgbe_perform_aim(struct ixgbe_softc * sc,struct ix_rx_queue * que)2331 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2332 {
2333 uint32_t newitr = 0;
2334 struct rx_ring *rxr = &que->rxr;
2335 /* FIXME struct tx_ring *txr = ... ->txr; */
2336
2337 /*
2338 * Do Adaptive Interrupt Moderation:
2339 * - Write out last calculated setting
2340 * - Calculate based on average size over
2341 * the last interval.
2342 */
2343 if (que->eitr_setting) {
2344 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2345 que->eitr_setting);
2346 }
2347
2348 que->eitr_setting = 0;
2349 /* Idle, do nothing */
2350 if (rxr->bytes == 0) {
2351 /* FIXME && txr->bytes == 0 */
2352 return;
2353 }
2354
2355 if ((rxr->bytes) && (rxr->packets))
2356 newitr = rxr->bytes / rxr->packets;
2357 /* FIXME for transmit accounting
2358 * if ((txr->bytes) && (txr->packets))
2359 * newitr = txr->bytes/txr->packets;
2360 * if ((rxr->bytes) && (rxr->packets))
2361 * newitr = max(newitr, (rxr->bytes / rxr->packets));
2362 */
2363
2364 newitr += 24; /* account for hardware frame, crc */
2365 /* set an upper boundary */
2366 newitr = min(newitr, 3000);
2367
2368 /* Be nice to the mid range */
2369 if ((newitr > 300) && (newitr < 1200)) {
2370 newitr = (newitr / 3);
2371 } else {
2372 newitr = (newitr / 2);
2373 }
2374
2375 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2376 newitr |= newitr << 16;
2377 } else {
2378 newitr |= IXGBE_EITR_CNT_WDIS;
2379 }
2380
2381 /* save for next interrupt */
2382 que->eitr_setting = newitr;
2383
2384 /* Reset state */
2385 /* FIXME txr->bytes = 0; */
2386 /* FIXME txr->packets = 0; */
2387 rxr->bytes = 0;
2388 rxr->packets = 0;
2389
2390 return;
2391 }
2392
2393 /*********************************************************************
2394 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2395 **********************************************************************/
2396 static int
ixgbe_msix_que(void * arg)2397 ixgbe_msix_que(void *arg)
2398 {
2399 struct ix_rx_queue *que = arg;
2400 struct ixgbe_softc *sc = que->sc;
2401 if_t ifp = iflib_get_ifp(que->sc->ctx);
2402
2403 /* Protect against spurious interrupts */
2404 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2405 return (FILTER_HANDLED);
2406
2407 ixgbe_disable_queue(sc, que->msix);
2408 ++que->irqs;
2409
2410 /* Check for AIM */
2411 if (sc->enable_aim) {
2412 ixgbe_perform_aim(sc, que);
2413 }
2414
2415 return (FILTER_SCHEDULE_THREAD);
2416 } /* ixgbe_msix_que */
2417
2418 /************************************************************************
2419 * ixgbe_media_status - Media Ioctl callback
2420 *
2421 * Called whenever the user queries the status of
2422 * the interface using ifconfig.
2423 ************************************************************************/
2424 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2425 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2426 {
2427 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2428 struct ixgbe_hw *hw = &sc->hw;
2429 int layer;
2430
2431 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2432
2433 ifmr->ifm_status = IFM_AVALID;
2434 ifmr->ifm_active = IFM_ETHER;
2435
2436 if (!sc->link_active)
2437 return;
2438
2439 ifmr->ifm_status |= IFM_ACTIVE;
2440 layer = sc->phy_layer;
2441
2442 if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
2443 switch (sc->link_speed) {
2444 case IXGBE_LINK_SPEED_10GB_FULL:
2445 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2446 break;
2447 case IXGBE_LINK_SPEED_5GB_FULL:
2448 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2449 break;
2450 case IXGBE_LINK_SPEED_2_5GB_FULL:
2451 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2452 break;
2453 case IXGBE_LINK_SPEED_1GB_FULL:
2454 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2455 break;
2456 case IXGBE_LINK_SPEED_100_FULL:
2457 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2458 break;
2459 case IXGBE_LINK_SPEED_10_FULL:
2460 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2461 break;
2462 }
2463 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2464 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2465 switch (sc->link_speed) {
2466 case IXGBE_LINK_SPEED_10GB_FULL:
2467 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2468 break;
2469 case IXGBE_LINK_SPEED_1GB_FULL:
2470 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2471 break;
2472 }
2473 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2474 switch (sc->link_speed) {
2475 case IXGBE_LINK_SPEED_10GB_FULL:
2476 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2477 break;
2478 case IXGBE_LINK_SPEED_1GB_FULL:
2479 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2480 break;
2481 }
2482 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2483 switch (sc->link_speed) {
2484 case IXGBE_LINK_SPEED_10GB_FULL:
2485 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2486 break;
2487 case IXGBE_LINK_SPEED_1GB_FULL:
2488 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2489 break;
2490 }
2491 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2492 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2493 switch (sc->link_speed) {
2494 case IXGBE_LINK_SPEED_10GB_FULL:
2495 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2496 break;
2497 case IXGBE_LINK_SPEED_1GB_FULL:
2498 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2499 break;
2500 }
2501 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2502 switch (sc->link_speed) {
2503 case IXGBE_LINK_SPEED_10GB_FULL:
2504 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2505 break;
2506 }
2507 /*
2508 * XXX: These need to use the proper media types once
2509 * they're added.
2510 */
2511 #ifndef IFM_ETH_XTYPE
2512 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2513 switch (sc->link_speed) {
2514 case IXGBE_LINK_SPEED_10GB_FULL:
2515 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2516 break;
2517 case IXGBE_LINK_SPEED_2_5GB_FULL:
2518 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2519 break;
2520 case IXGBE_LINK_SPEED_1GB_FULL:
2521 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2522 break;
2523 }
2524 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2525 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2526 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2527 switch (sc->link_speed) {
2528 case IXGBE_LINK_SPEED_10GB_FULL:
2529 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2530 break;
2531 case IXGBE_LINK_SPEED_2_5GB_FULL:
2532 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2533 break;
2534 case IXGBE_LINK_SPEED_1GB_FULL:
2535 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2536 break;
2537 }
2538 #else
2539 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2540 switch (sc->link_speed) {
2541 case IXGBE_LINK_SPEED_10GB_FULL:
2542 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2543 break;
2544 case IXGBE_LINK_SPEED_2_5GB_FULL:
2545 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2546 break;
2547 case IXGBE_LINK_SPEED_1GB_FULL:
2548 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2549 break;
2550 }
2551 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2552 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2553 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2554 switch (sc->link_speed) {
2555 case IXGBE_LINK_SPEED_10GB_FULL:
2556 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2557 break;
2558 case IXGBE_LINK_SPEED_2_5GB_FULL:
2559 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2560 break;
2561 case IXGBE_LINK_SPEED_1GB_FULL:
2562 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2563 break;
2564 }
2565 #endif
2566
2567 /* If nothing is recognized... */
2568 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2569 ifmr->ifm_active |= IFM_UNKNOWN;
2570
2571 /* Display current flow control setting used on link */
2572 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2573 hw->fc.current_mode == ixgbe_fc_full)
2574 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2575 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2576 hw->fc.current_mode == ixgbe_fc_full)
2577 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2578 } /* ixgbe_media_status */
2579
2580 /************************************************************************
2581 * ixgbe_media_change - Media Ioctl callback
2582 *
2583 * Called when the user changes speed/duplex using
2584 * media/mediopt option with ifconfig.
2585 ************************************************************************/
2586 static int
ixgbe_if_media_change(if_ctx_t ctx)2587 ixgbe_if_media_change(if_ctx_t ctx)
2588 {
2589 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2590 struct ifmedia *ifm = iflib_get_media(ctx);
2591 struct ixgbe_hw *hw = &sc->hw;
2592 ixgbe_link_speed speed = 0;
2593
2594 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2595
2596 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2597 return (EINVAL);
2598
2599 if (hw->phy.media_type == ixgbe_media_type_backplane)
2600 return (EPERM);
2601
2602 /*
2603 * We don't actually need to check against the supported
2604 * media types of the adapter; ifmedia will take care of
2605 * that for us.
2606 */
2607 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2608 case IFM_AUTO:
2609 case IFM_10G_T:
2610 speed |= IXGBE_LINK_SPEED_100_FULL;
2611 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2612 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2613 break;
2614 case IFM_10G_LRM:
2615 case IFM_10G_LR:
2616 #ifndef IFM_ETH_XTYPE
2617 case IFM_10G_SR: /* KR, too */
2618 case IFM_10G_CX4: /* KX4 */
2619 #else
2620 case IFM_10G_KR:
2621 case IFM_10G_KX4:
2622 #endif
2623 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2624 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2625 break;
2626 #ifndef IFM_ETH_XTYPE
2627 case IFM_1000_CX: /* KX */
2628 #else
2629 case IFM_1000_KX:
2630 #endif
2631 case IFM_1000_LX:
2632 case IFM_1000_SX:
2633 case IFM_1000_BX:
2634 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2635 break;
2636 case IFM_1000_T:
2637 speed |= IXGBE_LINK_SPEED_100_FULL;
2638 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2639 break;
2640 case IFM_10G_TWINAX:
2641 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2642 break;
2643 case IFM_5000_T:
2644 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2645 break;
2646 case IFM_2500_T:
2647 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2648 break;
2649 case IFM_100_TX:
2650 speed |= IXGBE_LINK_SPEED_100_FULL;
2651 break;
2652 case IFM_10_T:
2653 speed |= IXGBE_LINK_SPEED_10_FULL;
2654 break;
2655 default:
2656 goto invalid;
2657 }
2658
2659 hw->mac.autotry_restart = true;
2660 hw->mac.ops.setup_link(hw, speed, true);
2661 sc->advertise =
2662 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
2663 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
2664 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2665 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
2666 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
2667 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
2668
2669 return (0);
2670
2671 invalid:
2672 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2673
2674 return (EINVAL);
2675 } /* ixgbe_if_media_change */
2676
2677 /************************************************************************
2678 * ixgbe_set_promisc
2679 ************************************************************************/
2680 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2681 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2682 {
2683 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2684 if_t ifp = iflib_get_ifp(ctx);
2685 u32 rctl;
2686 int mcnt = 0;
2687
2688 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2689 rctl &= (~IXGBE_FCTRL_UPE);
2690 if (if_getflags(ifp) & IFF_ALLMULTI)
2691 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2692 else {
2693 mcnt = min(if_llmaddr_count(ifp),
2694 MAX_NUM_MULTICAST_ADDRESSES);
2695 }
2696 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2697 rctl &= (~IXGBE_FCTRL_MPE);
2698 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2699
2700 if (if_getflags(ifp) & IFF_PROMISC) {
2701 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2702 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2703 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
2704 rctl |= IXGBE_FCTRL_MPE;
2705 rctl &= ~IXGBE_FCTRL_UPE;
2706 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2707 }
2708 return (0);
2709 } /* ixgbe_if_promisc_set */
2710
2711 /************************************************************************
2712 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2713 ************************************************************************/
2714 static int
ixgbe_msix_link(void * arg)2715 ixgbe_msix_link(void *arg)
2716 {
2717 struct ixgbe_softc *sc = arg;
2718 struct ixgbe_hw *hw = &sc->hw;
2719 u32 eicr, eicr_mask;
2720 s32 retval;
2721
2722 ++sc->link_irq;
2723
2724 /* Pause other interrupts */
2725 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2726
2727 /* First get the cause */
2728 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2729 /* Be sure the queue bits are not cleared */
2730 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2731 /* Clear interrupt with write */
2732 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2733
2734 /* Link status change */
2735 if (eicr & IXGBE_EICR_LSC) {
2736 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2737 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2738 }
2739
2740 if (eicr & IXGBE_EICR_FW_EVENT) {
2741 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
2742 sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
2743 }
2744
2745 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2746 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2747 (eicr & IXGBE_EICR_FLOW_DIR)) {
2748 /* This is probably overkill :) */
2749 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2750 return (FILTER_HANDLED);
2751 /* Disable the interrupt */
2752 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2753 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2754 } else
2755 if (eicr & IXGBE_EICR_ECC) {
2756 device_printf(iflib_get_dev(sc->ctx),
2757 "Received ECC Err, initiating reset\n");
2758 hw->mac.flags |=
2759 ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2760 ixgbe_reset_hw(hw);
2761 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2762 IXGBE_EICR_ECC);
2763 }
2764
2765 /* Check for over temp condition */
2766 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2767 switch (sc->hw.mac.type) {
2768 case ixgbe_mac_X550EM_a:
2769 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2770 break;
2771 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2772 IXGBE_EICR_GPI_SDP0_X550EM_a);
2773 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2774 IXGBE_EICR_GPI_SDP0_X550EM_a);
2775 retval = hw->phy.ops.check_overtemp(hw);
2776 if (retval != IXGBE_ERR_OVERTEMP)
2777 break;
2778 device_printf(iflib_get_dev(sc->ctx),
2779 "\nCRITICAL: OVER TEMP!!"
2780 " PHY IS SHUT DOWN!!\n");
2781 device_printf(iflib_get_dev(sc->ctx),
2782 "System shutdown required!\n");
2783 break;
2784 default:
2785 if (!(eicr & IXGBE_EICR_TS))
2786 break;
2787 retval = hw->phy.ops.check_overtemp(hw);
2788 if (retval != IXGBE_ERR_OVERTEMP)
2789 break;
2790 device_printf(iflib_get_dev(sc->ctx),
2791 "\nCRITICAL: OVER TEMP!!"
2792 " PHY IS SHUT DOWN!!\n");
2793 device_printf(iflib_get_dev(sc->ctx),
2794 "System shutdown required!\n");
2795 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2796 IXGBE_EICR_TS);
2797 break;
2798 }
2799 }
2800
2801 /* Check for VF message */
2802 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2803 (eicr & IXGBE_EICR_MAILBOX)) {
2804 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2805 }
2806 }
2807
2808 /*
2809 * On E610, the firmware handles PHY configuration, so
2810 * there is no need to perform any SFP-specific tasks.
2811 */
2812 if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
2813 /* Pluggable optics-related interrupt */
2814 if (hw->mac.type >= ixgbe_mac_X540)
2815 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2816 else
2817 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2818
2819 if (eicr & eicr_mask) {
2820 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2821 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2822 }
2823
2824 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2825 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2826 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2827 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2828 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2829 }
2830 }
2831
2832 /* Check for fan failure */
2833 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2834 ixgbe_check_fan_failure(sc, eicr, true);
2835 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2836 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2837 }
2838
2839 /* External PHY interrupt */
2840 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2841 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2842 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2843 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2844 }
2845
2846 return (sc->task_requests != 0) ?
2847 FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2848 } /* ixgbe_msix_link */
2849
2850 /************************************************************************
2851 * ixgbe_sysctl_interrupt_rate_handler
2852 ************************************************************************/
2853 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)2854 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2855 {
2856 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2857 int error;
2858 unsigned int reg, usec, rate;
2859
2860 if (atomic_load_acq_int(&que->sc->recovery_mode))
2861 return (EPERM);
2862
2863 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2864 usec = ((reg & 0x0FF8) >> 3);
2865 if (usec > 0)
2866 rate = 500000 / usec;
2867 else
2868 rate = 0;
2869 error = sysctl_handle_int(oidp, &rate, 0, req);
2870 if (error || !req->newptr)
2871 return error;
2872 reg &= ~0xfff; /* default, no limitation */
2873 ixgbe_max_interrupt_rate = 0;
2874 if (rate > 0 && rate < 500000) {
2875 if (rate < 1000)
2876 rate = 1000;
2877 ixgbe_max_interrupt_rate = rate;
2878 reg |= ((4000000/rate) & 0xff8);
2879 }
2880 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2881
2882 return (0);
2883 } /* ixgbe_sysctl_interrupt_rate_handler */
2884
2885 /************************************************************************
2886 * ixgbe_add_device_sysctls
2887 ************************************************************************/
2888 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)2889 ixgbe_add_device_sysctls(if_ctx_t ctx)
2890 {
2891 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2892 device_t dev = iflib_get_dev(ctx);
2893 struct ixgbe_hw *hw = &sc->hw;
2894 struct sysctl_oid_list *child;
2895 struct sysctl_ctx_list *ctx_list;
2896
2897 ctx_list = device_get_sysctl_ctx(dev);
2898 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2899
2900 /* Sysctls for all devices */
2901 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2902 CTLTYPE_INT | CTLFLAG_RW,
2903 sc, 0, ixgbe_sysctl_flowcntl, "I",
2904 IXGBE_SYSCTL_DESC_SET_FC);
2905
2906 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2907 CTLTYPE_INT | CTLFLAG_RW,
2908 sc, 0, ixgbe_sysctl_advertise, "I",
2909 IXGBE_SYSCTL_DESC_ADV_SPEED);
2910
2911 sc->enable_aim = ixgbe_enable_aim;
2912 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2913 &sc->enable_aim, 0, "Interrupt Moderation");
2914
2915 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2916 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2917 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2918
2919 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2920 "tso_tcp_flags_mask_first_segment",
2921 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2922 sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2923 "TSO TCP flags mask for first segment");
2924
2925 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2926 "tso_tcp_flags_mask_middle_segment",
2927 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2928 sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2929 "TSO TCP flags mask for middle segment");
2930
2931 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2932 "tso_tcp_flags_mask_last_segment",
2933 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2934 sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2935 "TSO TCP flags mask for last segment");
2936
2937 #ifdef IXGBE_DEBUG
2938 /* testing sysctls (for all devices) */
2939 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2940 CTLTYPE_INT | CTLFLAG_RW,
2941 sc, 0, ixgbe_sysctl_power_state,
2942 "I", "PCI Power State");
2943
2944 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2945 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2946 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2947 #endif
2948 /* for X550 series devices */
2949 if (hw->mac.type >= ixgbe_mac_X550)
2950 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2951 CTLTYPE_U16 | CTLFLAG_RW,
2952 sc, 0, ixgbe_sysctl_dmac,
2953 "I", "DMA Coalesce");
2954
2955 /* for WoL-capable devices */
2956 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2957 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2958 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2959 ixgbe_sysctl_wol_enable, "I",
2960 "Enable/Disable Wake on LAN");
2961
2962 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2963 CTLTYPE_U32 | CTLFLAG_RW,
2964 sc, 0, ixgbe_sysctl_wufc,
2965 "I", "Enable/Disable Wake Up Filters");
2966 }
2967
2968 /* for X552/X557-AT devices */
2969 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2970 struct sysctl_oid *phy_node;
2971 struct sysctl_oid_list *phy_list;
2972
2973 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2974 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2975 "External PHY sysctls");
2976 phy_list = SYSCTL_CHILDREN(phy_node);
2977
2978 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2979 CTLTYPE_U16 | CTLFLAG_RD,
2980 sc, 0, ixgbe_sysctl_phy_temp,
2981 "I", "Current External PHY Temperature (Celsius)");
2982
2983 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2984 "overtemp_occurred",
2985 CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2986 ixgbe_sysctl_phy_overtemp_occurred, "I",
2987 "External PHY High Temperature Event Occurred");
2988 }
2989
2990 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2991 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2992 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2993 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2994 }
2995 } /* ixgbe_add_device_sysctls */
2996
2997 /************************************************************************
2998 * ixgbe_allocate_pci_resources
2999 ************************************************************************/
3000 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)3001 ixgbe_allocate_pci_resources(if_ctx_t ctx)
3002 {
3003 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3004 device_t dev = iflib_get_dev(ctx);
3005 int rid;
3006
3007 rid = PCIR_BAR(0);
3008 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3009 RF_ACTIVE);
3010
3011 if (!(sc->pci_mem)) {
3012 device_printf(dev,
3013 "Unable to allocate bus resource: memory\n");
3014 return (ENXIO);
3015 }
3016
3017 /* Save bus_space values for READ/WRITE_REG macros */
3018 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
3019 sc->osdep.mem_bus_space_handle =
3020 rman_get_bushandle(sc->pci_mem);
3021 /* Set hw values for shared code */
3022 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
3023
3024 return (0);
3025 } /* ixgbe_allocate_pci_resources */
3026
3027 /************************************************************************
3028 * ixgbe_detach - Device removal routine
3029 *
3030 * Called when the driver is being removed.
3031 * Stops the adapter and deallocates all the resources
3032 * that were allocated for driver operation.
3033 *
3034 * return 0 on success, positive on failure
3035 ************************************************************************/
3036 static int
ixgbe_if_detach(if_ctx_t ctx)3037 ixgbe_if_detach(if_ctx_t ctx)
3038 {
3039 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3040 device_t dev = iflib_get_dev(ctx);
3041 u32 ctrl_ext;
3042
3043 INIT_DEBUGOUT("ixgbe_detach: begin");
3044
3045 if (ixgbe_pci_iov_detach(dev) != 0) {
3046 device_printf(dev, "SR-IOV in use; detach first.\n");
3047 return (EBUSY);
3048 }
3049
3050 ixgbe_setup_low_power_mode(ctx);
3051
3052 /* let hardware know driver is unloading */
3053 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
3054 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3055 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
3056
3057 callout_drain(&sc->fw_mode_timer);
3058
3059 if (sc->hw.mac.type == ixgbe_mac_E610) {
3060 ixgbe_disable_lse(sc);
3061 ixgbe_shutdown_aci(&sc->hw);
3062 }
3063
3064 ixgbe_free_pci_resources(ctx);
3065
3066 free(sc->mta, M_IXGBE);
3067
3068 return (0);
3069 } /* ixgbe_if_detach */
3070
3071 /************************************************************************
3072 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3073 *
3074 * Prepare the adapter/port for LPLU and/or WoL
3075 ************************************************************************/
3076 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)3077 ixgbe_setup_low_power_mode(if_ctx_t ctx)
3078 {
3079 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3080 struct ixgbe_hw *hw = &sc->hw;
3081 device_t dev = iflib_get_dev(ctx);
3082 s32 error = 0;
3083
3084 if (!hw->wol_enabled)
3085 ixgbe_set_phy_power(hw, false);
3086
3087 /* Limit power management flow to X550EM baseT */
3088 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3089 hw->phy.ops.enter_lplu) {
3090 /* Turn off support for APM wakeup. (Using ACPI instead) */
3091 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3092 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3093
3094 /*
3095 * Clear Wake Up Status register to prevent any previous
3096 * wakeup events from waking us up immediately after we
3097 * suspend.
3098 */
3099 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3100
3101 /*
3102 * Program the Wakeup Filter Control register with user filter
3103 * settings
3104 */
3105 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3106
3107 /* Enable wakeups and power management in Wakeup Control */
3108 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3109 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3110
3111 /* X550EM baseT adapters need a special LPLU flow */
3112 hw->phy.reset_disable = true;
3113 ixgbe_if_stop(ctx);
3114 error = hw->phy.ops.enter_lplu(hw);
3115 if (error)
3116 device_printf(dev, "Error entering LPLU: %d\n",
3117 error);
3118 hw->phy.reset_disable = false;
3119 } else {
3120 /* Just stop for other adapters */
3121 ixgbe_if_stop(ctx);
3122 }
3123
3124 return error;
3125 } /* ixgbe_setup_low_power_mode */
3126
3127 /************************************************************************
3128 * ixgbe_shutdown - Shutdown entry point
3129 ************************************************************************/
3130 static int
ixgbe_if_shutdown(if_ctx_t ctx)3131 ixgbe_if_shutdown(if_ctx_t ctx)
3132 {
3133 int error = 0;
3134
3135 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3136
3137 error = ixgbe_setup_low_power_mode(ctx);
3138
3139 return (error);
3140 } /* ixgbe_if_shutdown */
3141
3142 /************************************************************************
3143 * ixgbe_suspend
3144 *
3145 * From D0 to D3
3146 ************************************************************************/
3147 static int
ixgbe_if_suspend(if_ctx_t ctx)3148 ixgbe_if_suspend(if_ctx_t ctx)
3149 {
3150 int error = 0;
3151
3152 INIT_DEBUGOUT("ixgbe_suspend: begin");
3153
3154 error = ixgbe_setup_low_power_mode(ctx);
3155
3156 return (error);
3157 } /* ixgbe_if_suspend */
3158
3159 /************************************************************************
3160 * ixgbe_resume
3161 *
3162 * From D3 to D0
3163 ************************************************************************/
3164 static int
ixgbe_if_resume(if_ctx_t ctx)3165 ixgbe_if_resume(if_ctx_t ctx)
3166 {
3167 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3168 device_t dev = iflib_get_dev(ctx);
3169 if_t ifp = iflib_get_ifp(ctx);
3170 struct ixgbe_hw *hw = &sc->hw;
3171 u32 wus;
3172
3173 INIT_DEBUGOUT("ixgbe_resume: begin");
3174
3175 /* Read & clear WUS register */
3176 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3177 if (wus)
3178 device_printf(dev, "Woken up by (WUS): %#010x\n",
3179 IXGBE_READ_REG(hw, IXGBE_WUS));
3180 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3181 /* And clear WUFC until next low-power transition */
3182 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3183
3184 /*
3185 * Required after D3->D0 transition;
3186 * will re-advertise all previous advertised speeds
3187 */
3188 if (if_getflags(ifp) & IFF_UP)
3189 ixgbe_if_init(ctx);
3190
3191 return (0);
3192 } /* ixgbe_if_resume */
3193
3194 /************************************************************************
3195 * ixgbe_if_mtu_set - Ioctl mtu entry point
3196 *
3197 * Return 0 on success, EINVAL on failure
3198 ************************************************************************/
3199 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)3200 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3201 {
3202 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3203 int error = 0;
3204
3205 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3206
3207 if (mtu > IXGBE_MAX_MTU) {
3208 error = EINVAL;
3209 } else {
3210 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3211 }
3212
3213 return error;
3214 } /* ixgbe_if_mtu_set */
3215
3216 /************************************************************************
3217 * ixgbe_if_crcstrip_set
3218 ************************************************************************/
3219 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)3220 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3221 {
3222 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3223 struct ixgbe_hw *hw = &sc->hw;
3224 /* crc stripping is set in two places:
3225 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3226 * IXGBE_RDRXCTL (set by the original driver in
3227 * ixgbe_setup_hw_rsc() called in init_locked.
3228 * We disable the setting when netmap is compiled in).
3229 * We update the values here, but also in ixgbe.c because
3230 * init_locked sometimes is called outside our control.
3231 */
3232 uint32_t hl, rxc;
3233
3234 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3235 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3236 #ifdef NETMAP
3237 if (netmap_verbose)
3238 D("%s read HLREG 0x%x rxc 0x%x",
3239 onoff ? "enter" : "exit", hl, rxc);
3240 #endif
3241 /* hw requirements ... */
3242 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3243 rxc |= IXGBE_RDRXCTL_RSCACKC;
3244 if (onoff && !crcstrip) {
3245 /* keep the crc. Fast rx */
3246 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3247 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3248 } else {
3249 /* reset default mode */
3250 hl |= IXGBE_HLREG0_RXCRCSTRP;
3251 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3252 }
3253 #ifdef NETMAP
3254 if (netmap_verbose)
3255 D("%s write HLREG 0x%x rxc 0x%x",
3256 onoff ? "enter" : "exit", hl, rxc);
3257 #endif
3258 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3259 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3260 } /* ixgbe_if_crcstrip_set */
3261
3262 /*********************************************************************
3263 * ixgbe_if_init - Init entry point
3264 *
3265 * Used in two ways: It is used by the stack as an init
3266 * entry point in network interface structure. It is also
3267 * used by the driver as a hw/sw initialization routine to
3268 * get to a consistent state.
3269 *
3270 * Return 0 on success, positive on failure
3271 **********************************************************************/
3272 void
ixgbe_if_init(if_ctx_t ctx)3273 ixgbe_if_init(if_ctx_t ctx)
3274 {
3275 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3276 if_t ifp = iflib_get_ifp(ctx);
3277 device_t dev = iflib_get_dev(ctx);
3278 struct ixgbe_hw *hw = &sc->hw;
3279 struct ix_rx_queue *rx_que;
3280 struct ix_tx_queue *tx_que;
3281 u32 txdctl, mhadd;
3282 u32 rxdctl, rxctrl;
3283 u32 ctrl_ext;
3284
3285 int i, j, err;
3286
3287 INIT_DEBUGOUT("ixgbe_if_init: begin");
3288
3289 /* Queue indices may change with IOV mode */
3290 ixgbe_align_all_queue_indices(sc);
3291
3292 /* reprogram the RAR[0] in case user changed it. */
3293 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3294
3295 /* Get the latest mac address, User can use a LAA */
3296 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3297 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3298 hw->addr_ctrl.rar_used_count = 1;
3299
3300 ixgbe_init_hw(hw);
3301
3302 ixgbe_initialize_iov(sc);
3303
3304 ixgbe_initialize_transmit_units(ctx);
3305
3306 /* Setup Multicast table */
3307 ixgbe_if_multi_set(ctx);
3308
3309 /* Determine the correct mbuf pool, based on frame size */
3310 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3311
3312 /* Configure RX settings */
3313 ixgbe_initialize_receive_units(ctx);
3314
3315 /*
3316 * Initialize variable holding task enqueue requests
3317 * from MSI-X interrupts
3318 */
3319 sc->task_requests = 0;
3320
3321 /* Enable SDP & MSI-X interrupts based on adapter */
3322 ixgbe_config_gpie(sc);
3323
3324 /* Set MTU size */
3325 if (if_getmtu(ifp) > ETHERMTU) {
3326 /* aka IXGBE_MAXFRS on 82599 and newer */
3327 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3328 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3329 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3330 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3331 }
3332
3333 /* Now enable all the queues */
3334 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
3335 i++, tx_que++) {
3336 struct tx_ring *txr = &tx_que->txr;
3337
3338 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3339 txdctl |= IXGBE_TXDCTL_ENABLE;
3340 /* Set WTHRESH to 8, burst writeback */
3341 txdctl |= (8 << 16);
3342 /*
3343 * When the internal queue falls below PTHRESH (32),
3344 * start prefetching as long as there are at least
3345 * HTHRESH (1) buffers ready. The values are taken
3346 * from the Intel linux driver 3.8.21.
3347 * Prefetching enables tx line rate even with 1 queue.
3348 */
3349 txdctl |= (32 << 0) | (1 << 8);
3350 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3351 }
3352
3353 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
3354 i++, rx_que++) {
3355 struct rx_ring *rxr = &rx_que->rxr;
3356
3357 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3358 if (hw->mac.type == ixgbe_mac_82598EB) {
3359 /*
3360 * PTHRESH = 21
3361 * HTHRESH = 4
3362 * WTHRESH = 8
3363 */
3364 rxdctl &= ~0x3FFFFF;
3365 rxdctl |= 0x080420;
3366 }
3367 rxdctl |= IXGBE_RXDCTL_ENABLE;
3368 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3369 for (j = 0; j < 10; j++) {
3370 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3371 IXGBE_RXDCTL_ENABLE)
3372 break;
3373 else
3374 msec_delay(1);
3375 }
3376 wmb();
3377 }
3378
3379 /* Enable Receive engine */
3380 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3381 if (hw->mac.type == ixgbe_mac_82598EB)
3382 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3383 rxctrl |= IXGBE_RXCTRL_RXEN;
3384 ixgbe_enable_rx_dma(hw, rxctrl);
3385
3386 /* Set up MSI/MSI-X routing */
3387 if (ixgbe_enable_msix) {
3388 ixgbe_configure_ivars(sc);
3389 /* Set up auto-mask */
3390 if (hw->mac.type == ixgbe_mac_82598EB)
3391 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3392 else {
3393 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3394 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3395 }
3396 } else { /* Simple settings for Legacy/MSI */
3397 ixgbe_set_ivar(sc, 0, 0, 0);
3398 ixgbe_set_ivar(sc, 0, 0, 1);
3399 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3400 }
3401
3402 ixgbe_init_fdir(sc);
3403
3404 /*
3405 * Check on any SFP devices that
3406 * need to be kick-started
3407 */
3408 if (hw->phy.type == ixgbe_phy_none) {
3409 err = hw->phy.ops.identify(hw);
3410 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3411 device_printf(dev,
3412 "Unsupported SFP+ module type was detected.\n");
3413 return;
3414 }
3415 }
3416
3417 /* Set moderation on the Link interrupt */
3418 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3419
3420 /* Enable power to the phy. */
3421 ixgbe_set_phy_power(hw, true);
3422
3423 /* Config/Enable Link */
3424 ixgbe_config_link(ctx);
3425
3426 /* Hardware Packet Buffer & Flow Control setup */
3427 ixgbe_config_delay_values(sc);
3428
3429 /* Initialize the FC settings */
3430 ixgbe_start_hw(hw);
3431
3432 /* Set up VLAN support and filter */
3433 ixgbe_setup_vlan_hw_support(ctx);
3434
3435 /* Setup DMA Coalescing */
3436 ixgbe_config_dmac(sc);
3437
3438 /* And now turn on interrupts */
3439 ixgbe_if_enable_intr(ctx);
3440
3441 /* Enable the use of the MBX by the VF's */
3442 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3443 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3444 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3445 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3446 }
3447
3448 } /* ixgbe_init_locked */
3449
3450 /************************************************************************
3451 * ixgbe_set_ivar
3452 *
3453 * Setup the correct IVAR register for a particular MSI-X interrupt
3454 * (yes this is all very magic and confusing :)
3455 * - entry is the register array entry
3456 * - vector is the MSI-X vector for this queue
3457 * - type is RX/TX/MISC
3458 ************************************************************************/
3459 static void
ixgbe_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)3460 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3461 {
3462 struct ixgbe_hw *hw = &sc->hw;
3463 u32 ivar, index;
3464
3465 vector |= IXGBE_IVAR_ALLOC_VAL;
3466
3467 switch (hw->mac.type) {
3468 case ixgbe_mac_82598EB:
3469 if (type == -1)
3470 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3471 else
3472 entry += (type * 64);
3473 index = (entry >> 2) & 0x1F;
3474 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3475 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3476 ivar |= (vector << (8 * (entry & 0x3)));
3477 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3478 break;
3479 case ixgbe_mac_82599EB:
3480 case ixgbe_mac_X540:
3481 case ixgbe_mac_X550:
3482 case ixgbe_mac_X550EM_x:
3483 case ixgbe_mac_X550EM_a:
3484 case ixgbe_mac_E610:
3485 if (type == -1) { /* MISC IVAR */
3486 index = (entry & 1) * 8;
3487 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3488 ivar &= ~(0xFF << index);
3489 ivar |= (vector << index);
3490 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3491 } else { /* RX/TX IVARS */
3492 index = (16 * (entry & 1)) + (8 * type);
3493 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3494 ivar &= ~(0xFF << index);
3495 ivar |= (vector << index);
3496 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3497 }
3498 default:
3499 break;
3500 }
3501 } /* ixgbe_set_ivar */
3502
3503 /************************************************************************
3504 * ixgbe_configure_ivars
3505 ************************************************************************/
3506 static void
ixgbe_configure_ivars(struct ixgbe_softc * sc)3507 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3508 {
3509 struct ix_rx_queue *rx_que = sc->rx_queues;
3510 struct ix_tx_queue *tx_que = sc->tx_queues;
3511 u32 newitr;
3512
3513 if (ixgbe_max_interrupt_rate > 0)
3514 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3515 else {
3516 /*
3517 * Disable DMA coalescing if interrupt moderation is
3518 * disabled.
3519 */
3520 sc->dmac = 0;
3521 newitr = 0;
3522 }
3523
3524 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3525 struct rx_ring *rxr = &rx_que->rxr;
3526
3527 /* First the RX queue entry */
3528 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3529
3530 /* Set an Initial EITR value */
3531 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3532 }
3533 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3534 struct tx_ring *txr = &tx_que->txr;
3535
3536 /* ... and the TX */
3537 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3538 }
3539 /* For the Link interrupt */
3540 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3541 } /* ixgbe_configure_ivars */
3542
3543 /************************************************************************
3544 * ixgbe_config_gpie
3545 ************************************************************************/
3546 static void
ixgbe_config_gpie(struct ixgbe_softc * sc)3547 ixgbe_config_gpie(struct ixgbe_softc *sc)
3548 {
3549 struct ixgbe_hw *hw = &sc->hw;
3550 u32 gpie;
3551
3552 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3553
3554 if (sc->intr_type == IFLIB_INTR_MSIX) {
3555 /* Enable Enhanced MSI-X mode */
3556 gpie |= IXGBE_GPIE_MSIX_MODE |
3557 IXGBE_GPIE_EIAME |
3558 IXGBE_GPIE_PBA_SUPPORT |
3559 IXGBE_GPIE_OCD;
3560 }
3561
3562 /* Fan Failure Interrupt */
3563 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3564 gpie |= IXGBE_SDP1_GPIEN;
3565
3566 /* Thermal Sensor Interrupt */
3567 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3568 gpie |= IXGBE_SDP0_GPIEN_X540;
3569
3570 /* Link detection */
3571 switch (hw->mac.type) {
3572 case ixgbe_mac_82599EB:
3573 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3574 break;
3575 case ixgbe_mac_X550EM_x:
3576 case ixgbe_mac_X550EM_a:
3577 gpie |= IXGBE_SDP0_GPIEN_X540;
3578 break;
3579 default:
3580 break;
3581 }
3582
3583 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3584
3585 } /* ixgbe_config_gpie */
3586
3587 /************************************************************************
3588 * ixgbe_config_delay_values
3589 *
3590 * Requires sc->max_frame_size to be set.
3591 ************************************************************************/
3592 static void
ixgbe_config_delay_values(struct ixgbe_softc * sc)3593 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3594 {
3595 struct ixgbe_hw *hw = &sc->hw;
3596 u32 rxpb, frame, size, tmp;
3597
3598 frame = sc->max_frame_size;
3599
3600 /* Calculate High Water */
3601 switch (hw->mac.type) {
3602 case ixgbe_mac_X540:
3603 case ixgbe_mac_X550:
3604 case ixgbe_mac_X550EM_x:
3605 case ixgbe_mac_X550EM_a:
3606 tmp = IXGBE_DV_X540(frame, frame);
3607 break;
3608 default:
3609 tmp = IXGBE_DV(frame, frame);
3610 break;
3611 }
3612 size = IXGBE_BT2KB(tmp);
3613 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3614 hw->fc.high_water[0] = rxpb - size;
3615
3616 /* Now calculate Low Water */
3617 switch (hw->mac.type) {
3618 case ixgbe_mac_X540:
3619 case ixgbe_mac_X550:
3620 case ixgbe_mac_X550EM_x:
3621 case ixgbe_mac_X550EM_a:
3622 tmp = IXGBE_LOW_DV_X540(frame);
3623 break;
3624 default:
3625 tmp = IXGBE_LOW_DV(frame);
3626 break;
3627 }
3628 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3629
3630 hw->fc.pause_time = IXGBE_FC_PAUSE;
3631 hw->fc.send_xon = true;
3632 } /* ixgbe_config_delay_values */
3633
3634 /************************************************************************
3635 * ixgbe_set_multi - Multicast Update
3636 *
3637 * Called whenever multicast address list is updated.
3638 ************************************************************************/
3639 static u_int
ixgbe_mc_filter_apply(void * arg,struct sockaddr_dl * sdl,u_int idx)3640 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3641 {
3642 struct ixgbe_softc *sc = arg;
3643 struct ixgbe_mc_addr *mta = sc->mta;
3644
3645 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3646 return (0);
3647 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3648 mta[idx].vmdq = sc->pool;
3649
3650 return (1);
3651 } /* ixgbe_mc_filter_apply */
3652
3653 static void
ixgbe_if_multi_set(if_ctx_t ctx)3654 ixgbe_if_multi_set(if_ctx_t ctx)
3655 {
3656 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3657 struct ixgbe_mc_addr *mta;
3658 if_t ifp = iflib_get_ifp(ctx);
3659 u8 *update_ptr;
3660 u32 fctrl;
3661 u_int mcnt;
3662
3663 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3664
3665 mta = sc->mta;
3666 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3667
3668 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3669 sc);
3670
3671 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3672 update_ptr = (u8 *)mta;
3673 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3674 ixgbe_mc_array_itr, true);
3675 }
3676
3677 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3678
3679 if (if_getflags(ifp) & IFF_PROMISC)
3680 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3681 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3682 if_getflags(ifp) & IFF_ALLMULTI) {
3683 fctrl |= IXGBE_FCTRL_MPE;
3684 fctrl &= ~IXGBE_FCTRL_UPE;
3685 } else
3686 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3687
3688 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3689 } /* ixgbe_if_multi_set */
3690
3691 /************************************************************************
3692 * ixgbe_mc_array_itr
3693 *
3694 * An iterator function needed by the multicast shared code.
3695 * It feeds the shared code routine the addresses in the
3696 * array of ixgbe_set_multi() one by one.
3697 ************************************************************************/
3698 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)3699 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3700 {
3701 struct ixgbe_mc_addr *mta;
3702
3703 mta = (struct ixgbe_mc_addr *)*update_ptr;
3704 *vmdq = mta->vmdq;
3705
3706 *update_ptr = (u8*)(mta + 1);
3707
3708 return (mta->addr);
3709 } /* ixgbe_mc_array_itr */
3710
3711 /************************************************************************
3712 * ixgbe_local_timer - Timer routine
3713 *
3714 * Checks for link status, updates statistics,
3715 * and runs the watchdog check.
3716 ************************************************************************/
3717 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)3718 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3719 {
3720 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3721
3722 if (qid != 0)
3723 return;
3724
3725 /* Check for pluggable optics */
3726 if (sc->sfp_probe)
3727 if (!ixgbe_sfp_probe(ctx))
3728 return; /* Nothing to do */
3729
3730 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3731
3732 /* Fire off the adminq task */
3733 iflib_admin_intr_deferred(ctx);
3734
3735 } /* ixgbe_if_timer */
3736
3737 /************************************************************************
3738 * ixgbe_fw_mode_timer - FW mode timer routine
3739 ************************************************************************/
3740 static void
ixgbe_fw_mode_timer(void * arg)3741 ixgbe_fw_mode_timer(void *arg)
3742 {
3743 struct ixgbe_softc *sc = arg;
3744 struct ixgbe_hw *hw = &sc->hw;
3745
3746 if (ixgbe_fw_recovery_mode(hw)) {
3747 if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
3748 /* Firmware error detected, entering recovery mode */
3749 device_printf(sc->dev,
3750 "Firmware recovery mode detected. Limiting"
3751 " functionality. Refer to the Intel(R) Ethernet"
3752 " Adapters and Devices User Guide for details on"
3753 " firmware recovery mode.\n");
3754
3755 if (hw->adapter_stopped == FALSE)
3756 ixgbe_if_stop(sc->ctx);
3757 }
3758 } else
3759 atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
3760
3761
3762 callout_reset(&sc->fw_mode_timer, hz,
3763 ixgbe_fw_mode_timer, sc);
3764 } /* ixgbe_fw_mode_timer */
3765
3766 /************************************************************************
3767 * ixgbe_sfp_probe
3768 *
3769 * Determine if a port had optics inserted.
3770 ************************************************************************/
3771 static bool
ixgbe_sfp_probe(if_ctx_t ctx)3772 ixgbe_sfp_probe(if_ctx_t ctx)
3773 {
3774 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3775 struct ixgbe_hw *hw = &sc->hw;
3776 device_t dev = iflib_get_dev(ctx);
3777 bool result = false;
3778
3779 if ((hw->phy.type == ixgbe_phy_nl) &&
3780 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3781 s32 ret = hw->phy.ops.identify_sfp(hw);
3782 if (ret)
3783 goto out;
3784 ret = hw->phy.ops.reset(hw);
3785 sc->sfp_probe = false;
3786 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3787 device_printf(dev,
3788 "Unsupported SFP+ module detected!");
3789 device_printf(dev,
3790 "Reload driver with supported module.\n");
3791 goto out;
3792 } else
3793 device_printf(dev, "SFP+ module detected!\n");
3794 /* We now have supported optics */
3795 result = true;
3796 }
3797 out:
3798
3799 return (result);
3800 } /* ixgbe_sfp_probe */
3801
3802 /************************************************************************
3803 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3804 ************************************************************************/
3805 static void
ixgbe_handle_mod(void * context)3806 ixgbe_handle_mod(void *context)
3807 {
3808 if_ctx_t ctx = context;
3809 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3810 struct ixgbe_hw *hw = &sc->hw;
3811 device_t dev = iflib_get_dev(ctx);
3812 u32 err, cage_full = 0;
3813
3814 if (sc->hw.need_crosstalk_fix) {
3815 switch (hw->mac.type) {
3816 case ixgbe_mac_82599EB:
3817 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3818 IXGBE_ESDP_SDP2;
3819 break;
3820 case ixgbe_mac_X550EM_x:
3821 case ixgbe_mac_X550EM_a:
3822 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3823 IXGBE_ESDP_SDP0;
3824 break;
3825 default:
3826 break;
3827 }
3828
3829 if (!cage_full)
3830 goto handle_mod_out;
3831 }
3832
3833 err = hw->phy.ops.identify_sfp(hw);
3834 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3835 device_printf(dev,
3836 "Unsupported SFP+ module type was detected.\n");
3837 goto handle_mod_out;
3838 }
3839
3840 if (hw->mac.type == ixgbe_mac_82598EB)
3841 err = hw->phy.ops.reset(hw);
3842 else
3843 err = hw->mac.ops.setup_sfp(hw);
3844
3845 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3846 device_printf(dev,
3847 "Setup failure - unsupported SFP+ module type.\n");
3848 goto handle_mod_out;
3849 }
3850 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3851 return;
3852
3853 handle_mod_out:
3854 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3855 } /* ixgbe_handle_mod */
3856
3857
3858 /************************************************************************
3859 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3860 ************************************************************************/
3861 static void
ixgbe_handle_msf(void * context)3862 ixgbe_handle_msf(void *context)
3863 {
3864 if_ctx_t ctx = context;
3865 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3866 struct ixgbe_hw *hw = &sc->hw;
3867 u32 autoneg;
3868 bool negotiate;
3869
3870 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3871 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3872
3873 autoneg = hw->phy.autoneg_advertised;
3874 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3875 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3876 if (hw->mac.ops.setup_link)
3877 hw->mac.ops.setup_link(hw, autoneg, true);
3878
3879 /* Adjust media types shown in ifconfig */
3880 ifmedia_removeall(sc->media);
3881 ixgbe_add_media_types(sc->ctx);
3882 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3883 } /* ixgbe_handle_msf */
3884
3885 /************************************************************************
3886 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3887 ************************************************************************/
3888 static void
ixgbe_handle_phy(void * context)3889 ixgbe_handle_phy(void *context)
3890 {
3891 if_ctx_t ctx = context;
3892 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3893 struct ixgbe_hw *hw = &sc->hw;
3894 int error;
3895
3896 error = hw->phy.ops.handle_lasi(hw);
3897 if (error == IXGBE_ERR_OVERTEMP)
3898 device_printf(sc->dev,
3899 "CRITICAL: EXTERNAL PHY OVER TEMP!!"
3900 " PHY will downshift to lower power state!\n");
3901 else if (error)
3902 device_printf(sc->dev,
3903 "Error handling LASI interrupt: %d\n", error);
3904 } /* ixgbe_handle_phy */
3905
3906 /************************************************************************
3907 * ixgbe_enable_lse - enable link status events
3908 *
3909 * Sets mask and enables link status events
3910 ************************************************************************/
ixgbe_enable_lse(struct ixgbe_softc * sc)3911 s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
3912 {
3913 s32 error;
3914
3915 u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
3916 IXGBE_ACI_LINK_EVENT_MEDIA_NA |
3917 IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
3918 IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
3919
3920 error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
3921 if (error)
3922 return (error);
3923
3924 sc->lse_mask = mask;
3925 return (IXGBE_SUCCESS);
3926 } /* ixgbe_enable_lse */
3927
3928 /************************************************************************
3929 * ixgbe_disable_lse - disable link status events
3930 ************************************************************************/
ixgbe_disable_lse(struct ixgbe_softc * sc)3931 s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
3932 {
3933 s32 error;
3934
3935 error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
3936 if (error)
3937 return (error);
3938
3939 sc->lse_mask = 0;
3940 return (IXGBE_SUCCESS);
3941 } /* ixgbe_disable_lse */
3942
3943 /************************************************************************
3944 * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
3945 ************************************************************************/
3946 static void
ixgbe_handle_fw_event(void * context)3947 ixgbe_handle_fw_event(void *context)
3948 {
3949 if_ctx_t ctx = context;
3950 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3951 struct ixgbe_hw *hw = &sc->hw;
3952 struct ixgbe_aci_event event;
3953 bool pending = false;
3954 s32 error;
3955
3956 event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
3957 event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
3958 if (!event.msg_buf) {
3959 device_printf(sc->dev, "Can not allocate buffer for "
3960 "event message\n");
3961 return;
3962 }
3963
3964 do {
3965 error = ixgbe_aci_get_event(hw, &event, &pending);
3966 if (error) {
3967 device_printf(sc->dev, "Error getting event from "
3968 "FW:%d\n", error);
3969 break;
3970 }
3971
3972 switch (le16toh(event.desc.opcode)) {
3973 case ixgbe_aci_opc_get_link_status:
3974 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
3975 break;
3976
3977 case ixgbe_aci_opc_temp_tca_event:
3978 if (hw->adapter_stopped == FALSE)
3979 ixgbe_if_stop(ctx);
3980 device_printf(sc->dev,
3981 "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3982 device_printf(sc->dev, "System shutdown required!\n");
3983 break;
3984
3985 default:
3986 device_printf(sc->dev,
3987 "Unknown FW event captured, opcode=0x%04X\n",
3988 le16toh(event.desc.opcode));
3989 break;
3990 }
3991 } while (pending);
3992
3993 free(event.msg_buf, M_IXGBE);
3994 } /* ixgbe_handle_fw_event */
3995
3996 /************************************************************************
3997 * ixgbe_if_stop - Stop the hardware
3998 *
3999 * Disables all traffic on the adapter by issuing a
4000 * global reset on the MAC and deallocates TX/RX buffers.
4001 ************************************************************************/
4002 static void
ixgbe_if_stop(if_ctx_t ctx)4003 ixgbe_if_stop(if_ctx_t ctx)
4004 {
4005 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4006 struct ixgbe_hw *hw = &sc->hw;
4007
4008 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
4009
4010 ixgbe_reset_hw(hw);
4011 hw->adapter_stopped = false;
4012 ixgbe_stop_adapter(hw);
4013 if (hw->mac.type == ixgbe_mac_82599EB)
4014 ixgbe_stop_mac_link_on_d3_82599(hw);
4015 /* Turn off the laser - noop with no optics */
4016 ixgbe_disable_tx_laser(hw);
4017
4018 /* Update the stack */
4019 sc->link_up = false;
4020 ixgbe_if_update_admin_status(ctx);
4021
4022 /* reprogram the RAR[0] in case user changed it. */
4023 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
4024
4025 return;
4026 } /* ixgbe_if_stop */
4027
4028 /************************************************************************
4029 * ixgbe_link_speed_to_str - Convert link speed to string
4030 *
4031 * Helper function to convert link speed constants to human-readable
4032 * string representations in conventional Gbps or Mbps.
4033 ************************************************************************/
4034 static const char *
ixgbe_link_speed_to_str(u32 link_speed)4035 ixgbe_link_speed_to_str(u32 link_speed)
4036 {
4037 switch (link_speed) {
4038 case IXGBE_LINK_SPEED_10GB_FULL:
4039 return "10 Gbps";
4040 case IXGBE_LINK_SPEED_5GB_FULL:
4041 return "5 Gbps";
4042 case IXGBE_LINK_SPEED_2_5GB_FULL:
4043 return "2.5 Gbps";
4044 case IXGBE_LINK_SPEED_1GB_FULL:
4045 return "1 Gbps";
4046 case IXGBE_LINK_SPEED_100_FULL:
4047 return "100 Mbps";
4048 case IXGBE_LINK_SPEED_10_FULL:
4049 return "10 Mbps";
4050 default:
4051 return "Unknown";
4052 }
4053 } /* ixgbe_link_speed_to_str */
4054
4055 /************************************************************************
4056 * ixgbe_update_link_status - Update OS on link state
4057 *
4058 * Note: Only updates the OS on the cached link state.
4059 * The real check of the hardware only happens with
4060 * a link interrupt.
4061 ************************************************************************/
4062 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)4063 ixgbe_if_update_admin_status(if_ctx_t ctx)
4064 {
4065 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4066 device_t dev = iflib_get_dev(ctx);
4067
4068 if (sc->link_up) {
4069 if (sc->link_active == false) {
4070 if (bootverbose)
4071 device_printf(dev,
4072 "Link is up %s Full Duplex\n",
4073 ixgbe_link_speed_to_str(sc->link_speed));
4074 sc->link_active = true;
4075 /* Update any Flow Control changes */
4076 ixgbe_fc_enable(&sc->hw);
4077 /* Update DMA coalescing config */
4078 ixgbe_config_dmac(sc);
4079 iflib_link_state_change(ctx, LINK_STATE_UP,
4080 ixgbe_link_speed_to_baudrate(sc->link_speed));
4081
4082 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4083 ixgbe_ping_all_vfs(sc);
4084 }
4085 } else { /* Link down */
4086 if (sc->link_active == true) {
4087 if (bootverbose)
4088 device_printf(dev, "Link is Down\n");
4089 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
4090 sc->link_active = false;
4091 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4092 ixgbe_ping_all_vfs(sc);
4093 }
4094 }
4095
4096 /* Handle task requests from msix_link() */
4097 if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
4098 ixgbe_handle_fw_event(ctx);
4099 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
4100 ixgbe_handle_mod(ctx);
4101 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
4102 ixgbe_handle_msf(ctx);
4103 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
4104 ixgbe_handle_mbx(ctx);
4105 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
4106 ixgbe_reinit_fdir(ctx);
4107 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
4108 ixgbe_handle_phy(ctx);
4109 sc->task_requests = 0;
4110
4111 ixgbe_update_stats_counters(sc);
4112 } /* ixgbe_if_update_admin_status */
4113
4114 /************************************************************************
4115 * ixgbe_config_dmac - Configure DMA Coalescing
4116 ************************************************************************/
4117 static void
ixgbe_config_dmac(struct ixgbe_softc * sc)4118 ixgbe_config_dmac(struct ixgbe_softc *sc)
4119 {
4120 struct ixgbe_hw *hw = &sc->hw;
4121 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4122
4123 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4124 return;
4125
4126 if (dcfg->watchdog_timer ^ sc->dmac ||
4127 dcfg->link_speed ^ sc->link_speed) {
4128 dcfg->watchdog_timer = sc->dmac;
4129 dcfg->fcoe_en = false;
4130 dcfg->link_speed = sc->link_speed;
4131 dcfg->num_tcs = 1;
4132
4133 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4134 dcfg->watchdog_timer, dcfg->link_speed);
4135
4136 hw->mac.ops.dmac_config(hw);
4137 }
4138 } /* ixgbe_config_dmac */
4139
4140 /************************************************************************
4141 * ixgbe_if_enable_intr
4142 ************************************************************************/
4143 void
ixgbe_if_enable_intr(if_ctx_t ctx)4144 ixgbe_if_enable_intr(if_ctx_t ctx)
4145 {
4146 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4147 struct ixgbe_hw *hw = &sc->hw;
4148 struct ix_rx_queue *que = sc->rx_queues;
4149 u32 mask, fwsm;
4150
4151 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4152
4153 switch (sc->hw.mac.type) {
4154 case ixgbe_mac_82599EB:
4155 mask |= IXGBE_EIMS_ECC;
4156 /* Temperature sensor on some scs */
4157 mask |= IXGBE_EIMS_GPI_SDP0;
4158 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4159 mask |= IXGBE_EIMS_GPI_SDP1;
4160 mask |= IXGBE_EIMS_GPI_SDP2;
4161 break;
4162 case ixgbe_mac_X540:
4163 /* Detect if Thermal Sensor is enabled */
4164 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4165 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4166 mask |= IXGBE_EIMS_TS;
4167 mask |= IXGBE_EIMS_ECC;
4168 break;
4169 case ixgbe_mac_X550:
4170 /* MAC thermal sensor is automatically enabled */
4171 mask |= IXGBE_EIMS_TS;
4172 mask |= IXGBE_EIMS_ECC;
4173 break;
4174 case ixgbe_mac_X550EM_x:
4175 case ixgbe_mac_X550EM_a:
4176 /* Some devices use SDP0 for important information */
4177 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4178 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4179 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4180 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4181 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4182 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4183 mask |= IXGBE_EICR_GPI_SDP0_X540;
4184 mask |= IXGBE_EIMS_ECC;
4185 break;
4186 case ixgbe_mac_E610:
4187 mask |= IXGBE_EIMS_FW_EVENT;
4188 break;
4189 default:
4190 break;
4191 }
4192
4193 /* Enable Fan Failure detection */
4194 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4195 mask |= IXGBE_EIMS_GPI_SDP1;
4196 /* Enable SR-IOV */
4197 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4198 mask |= IXGBE_EIMS_MAILBOX;
4199 /* Enable Flow Director */
4200 if (sc->feat_en & IXGBE_FEATURE_FDIR)
4201 mask |= IXGBE_EIMS_FLOW_DIR;
4202
4203 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4204
4205 /* With MSI-X we use auto clear */
4206 if (sc->intr_type == IFLIB_INTR_MSIX) {
4207 mask = IXGBE_EIMS_ENABLE_MASK;
4208 /* Don't autoclear Link */
4209 mask &= ~IXGBE_EIMS_OTHER;
4210 mask &= ~IXGBE_EIMS_LSC;
4211 mask &= ~IXGBE_EIMS_FW_EVENT;
4212 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
4213 mask &= ~IXGBE_EIMS_MAILBOX;
4214 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4215 }
4216
4217 /*
4218 * Now enable all queues, this is done separately to
4219 * allow for handling the extended (beyond 32) MSI-X
4220 * vectors that can be used by 82599
4221 */
4222 for (int i = 0; i < sc->num_rx_queues; i++, que++)
4223 ixgbe_enable_queue(sc, que->msix);
4224
4225 IXGBE_WRITE_FLUSH(hw);
4226
4227 } /* ixgbe_if_enable_intr */
4228
4229 /************************************************************************
4230 * ixgbe_if_disable_intr
4231 ************************************************************************/
4232 static void
ixgbe_if_disable_intr(if_ctx_t ctx)4233 ixgbe_if_disable_intr(if_ctx_t ctx)
4234 {
4235 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4236
4237 if (sc->intr_type == IFLIB_INTR_MSIX)
4238 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
4239 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
4240 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
4241 } else {
4242 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
4243 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
4244 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
4245 }
4246 IXGBE_WRITE_FLUSH(&sc->hw);
4247
4248 } /* ixgbe_if_disable_intr */
4249
4250 /************************************************************************
4251 * ixgbe_link_intr_enable
4252 ************************************************************************/
4253 static void
ixgbe_link_intr_enable(if_ctx_t ctx)4254 ixgbe_link_intr_enable(if_ctx_t ctx)
4255 {
4256 struct ixgbe_hw *hw =
4257 &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
4258
4259 /* Re-enable other interrupts */
4260 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
4261 } /* ixgbe_link_intr_enable */
4262
4263 /************************************************************************
4264 * ixgbe_if_rx_queue_intr_enable
4265 ************************************************************************/
4266 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)4267 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
4268 {
4269 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4270 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
4271
4272 ixgbe_enable_queue(sc, que->msix);
4273
4274 return (0);
4275 } /* ixgbe_if_rx_queue_intr_enable */
4276
4277 /************************************************************************
4278 * ixgbe_enable_queue
4279 ************************************************************************/
4280 static void
ixgbe_enable_queue(struct ixgbe_softc * sc,u32 vector)4281 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
4282 {
4283 struct ixgbe_hw *hw = &sc->hw;
4284 u64 queue = 1ULL << vector;
4285 u32 mask;
4286
4287 if (hw->mac.type == ixgbe_mac_82598EB) {
4288 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4289 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4290 } else {
4291 mask = (queue & 0xFFFFFFFF);
4292 if (mask)
4293 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4294 mask = (queue >> 32);
4295 if (mask)
4296 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4297 }
4298 } /* ixgbe_enable_queue */
4299
4300 /************************************************************************
4301 * ixgbe_disable_queue
4302 ************************************************************************/
4303 static void
ixgbe_disable_queue(struct ixgbe_softc * sc,u32 vector)4304 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
4305 {
4306 struct ixgbe_hw *hw = &sc->hw;
4307 u64 queue = 1ULL << vector;
4308 u32 mask;
4309
4310 if (hw->mac.type == ixgbe_mac_82598EB) {
4311 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4312 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
4313 } else {
4314 mask = (queue & 0xFFFFFFFF);
4315 if (mask)
4316 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
4317 mask = (queue >> 32);
4318 if (mask)
4319 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
4320 }
4321 } /* ixgbe_disable_queue */
4322
4323 /************************************************************************
4324 * ixgbe_intr - Legacy Interrupt Service Routine
4325 ************************************************************************/
4326 int
ixgbe_intr(void * arg)4327 ixgbe_intr(void *arg)
4328 {
4329 struct ixgbe_softc *sc = arg;
4330 struct ix_rx_queue *que = sc->rx_queues;
4331 struct ixgbe_hw *hw = &sc->hw;
4332 if_ctx_t ctx = sc->ctx;
4333 u32 eicr, eicr_mask;
4334
4335 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4336
4337 ++que->irqs;
4338 if (eicr == 0) {
4339 ixgbe_if_enable_intr(ctx);
4340 return (FILTER_HANDLED);
4341 }
4342
4343 /* Check for fan failure */
4344 if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4345 (eicr & IXGBE_EICR_GPI_SDP1)) {
4346 device_printf(sc->dev,
4347 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4348 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
4349 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4350 }
4351
4352 /* Link status change */
4353 if (eicr & IXGBE_EICR_LSC) {
4354 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4355 iflib_admin_intr_deferred(ctx);
4356 }
4357
4358 if (ixgbe_is_sfp(hw)) {
4359 /* Pluggable optics-related interrupt */
4360 if (hw->mac.type >= ixgbe_mac_X540)
4361 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4362 else
4363 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4364
4365 if (eicr & eicr_mask) {
4366 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4367 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4368 }
4369
4370 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4371 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4372 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4373 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4374 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4375 }
4376 }
4377
4378 /* External PHY interrupt */
4379 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4380 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4381 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4382 }
4383
4384 return (FILTER_SCHEDULE_THREAD);
4385 } /* ixgbe_intr */
4386
4387 /************************************************************************
4388 * ixgbe_free_pci_resources
4389 ************************************************************************/
4390 static void
ixgbe_free_pci_resources(if_ctx_t ctx)4391 ixgbe_free_pci_resources(if_ctx_t ctx)
4392 {
4393 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4394 struct ix_rx_queue *que = sc->rx_queues;
4395 device_t dev = iflib_get_dev(ctx);
4396
4397 /* Release all MSI-X queue resources */
4398 if (sc->intr_type == IFLIB_INTR_MSIX)
4399 iflib_irq_free(ctx, &sc->irq);
4400
4401 if (que != NULL) {
4402 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4403 iflib_irq_free(ctx, &que->que_irq);
4404 }
4405 }
4406
4407 if (sc->pci_mem != NULL)
4408 bus_release_resource(dev, SYS_RES_MEMORY,
4409 rman_get_rid(sc->pci_mem), sc->pci_mem);
4410 } /* ixgbe_free_pci_resources */
4411
4412 /************************************************************************
4413 * ixgbe_sysctl_flowcntl
4414 *
4415 * SYSCTL wrapper around setting Flow Control
4416 ************************************************************************/
4417 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)4418 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4419 {
4420 struct ixgbe_softc *sc;
4421 int error, fc;
4422
4423 sc = (struct ixgbe_softc *)arg1;
4424 fc = sc->hw.fc.requested_mode;
4425
4426 error = sysctl_handle_int(oidp, &fc, 0, req);
4427 if ((error) || (req->newptr == NULL))
4428 return (error);
4429
4430 /* Don't bother if it's not changed */
4431 if (fc == sc->hw.fc.current_mode)
4432 return (0);
4433
4434 return ixgbe_set_flowcntl(sc, fc);
4435 } /* ixgbe_sysctl_flowcntl */
4436
4437 /************************************************************************
4438 * ixgbe_set_flowcntl - Set flow control
4439 *
4440 * Flow control values:
4441 * 0 - off
4442 * 1 - rx pause
4443 * 2 - tx pause
4444 * 3 - full
4445 ************************************************************************/
4446 static int
ixgbe_set_flowcntl(struct ixgbe_softc * sc,int fc)4447 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4448 {
4449 switch (fc) {
4450 case ixgbe_fc_rx_pause:
4451 case ixgbe_fc_tx_pause:
4452 case ixgbe_fc_full:
4453 if (sc->num_rx_queues > 1)
4454 ixgbe_disable_rx_drop(sc);
4455 break;
4456 case ixgbe_fc_none:
4457 if (sc->num_rx_queues > 1)
4458 ixgbe_enable_rx_drop(sc);
4459 break;
4460 default:
4461 return (EINVAL);
4462 }
4463
4464 sc->hw.fc.requested_mode = fc;
4465
4466 /* Don't autoneg if forcing a value */
4467 sc->hw.fc.disable_fc_autoneg = true;
4468 ixgbe_fc_enable(&sc->hw);
4469
4470 return (0);
4471 } /* ixgbe_set_flowcntl */
4472
4473 /************************************************************************
4474 * ixgbe_enable_rx_drop
4475 *
4476 * Enable the hardware to drop packets when the buffer is
4477 * full. This is useful with multiqueue, so that no single
4478 * queue being full stalls the entire RX engine. We only
4479 * enable this when Multiqueue is enabled AND Flow Control
4480 * is disabled.
4481 ************************************************************************/
4482 static void
ixgbe_enable_rx_drop(struct ixgbe_softc * sc)4483 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4484 {
4485 struct ixgbe_hw *hw = &sc->hw;
4486 struct rx_ring *rxr;
4487 u32 srrctl;
4488
4489 for (int i = 0; i < sc->num_rx_queues; i++) {
4490 rxr = &sc->rx_queues[i].rxr;
4491 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4492 srrctl |= IXGBE_SRRCTL_DROP_EN;
4493 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4494 }
4495
4496 /* enable drop for each vf */
4497 for (int i = 0; i < sc->num_vfs; i++) {
4498 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4499 (IXGBE_QDE_WRITE |
4500 (i << IXGBE_QDE_IDX_SHIFT) |
4501 IXGBE_QDE_ENABLE));
4502 }
4503 } /* ixgbe_enable_rx_drop */
4504
4505 /************************************************************************
4506 * ixgbe_disable_rx_drop
4507 ************************************************************************/
4508 static void
ixgbe_disable_rx_drop(struct ixgbe_softc * sc)4509 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4510 {
4511 struct ixgbe_hw *hw = &sc->hw;
4512 struct rx_ring *rxr;
4513 u32 srrctl;
4514
4515 for (int i = 0; i < sc->num_rx_queues; i++) {
4516 rxr = &sc->rx_queues[i].rxr;
4517 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4518 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4519 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4520 }
4521
4522 /* disable drop for each vf */
4523 for (int i = 0; i < sc->num_vfs; i++) {
4524 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4525 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4526 }
4527 } /* ixgbe_disable_rx_drop */
4528
4529 /************************************************************************
4530 * ixgbe_sysctl_advertise
4531 *
4532 * SYSCTL wrapper around setting advertised speed
4533 ************************************************************************/
4534 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)4535 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4536 {
4537 struct ixgbe_softc *sc;
4538 int error, advertise;
4539
4540 sc = (struct ixgbe_softc *)arg1;
4541 if (atomic_load_acq_int(&sc->recovery_mode))
4542 return (EPERM);
4543
4544 advertise = sc->advertise;
4545
4546 error = sysctl_handle_int(oidp, &advertise, 0, req);
4547 if ((error) || (req->newptr == NULL))
4548 return (error);
4549
4550 return ixgbe_set_advertise(sc, advertise);
4551 } /* ixgbe_sysctl_advertise */
4552
4553 /************************************************************************
4554 * ixgbe_set_advertise - Control advertised link speed
4555 *
4556 * Flags:
4557 * 0x1 - advertise 100 Mb
4558 * 0x2 - advertise 1G
4559 * 0x4 - advertise 10G
4560 * 0x8 - advertise 10 Mb (yes, Mb)
4561 * 0x10 - advertise 2.5G (disabled by default)
4562 * 0x20 - advertise 5G (disabled by default)
4563 *
4564 ************************************************************************/
4565 static int
ixgbe_set_advertise(struct ixgbe_softc * sc,int advertise)4566 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4567 {
4568 device_t dev = iflib_get_dev(sc->ctx);
4569 struct ixgbe_hw *hw;
4570 ixgbe_link_speed speed = 0;
4571 ixgbe_link_speed link_caps = 0;
4572 s32 err = IXGBE_NOT_IMPLEMENTED;
4573 bool negotiate = false;
4574
4575 /* Checks to validate new value */
4576 if (sc->advertise == advertise) /* no change */
4577 return (0);
4578
4579 hw = &sc->hw;
4580
4581 /* No speed changes for backplane media */
4582 if (hw->phy.media_type == ixgbe_media_type_backplane)
4583 return (ENODEV);
4584
4585 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4586 (hw->phy.multispeed_fiber))) {
4587 device_printf(dev,
4588 "Advertised speed can only be set on copper or multispeed"
4589 " fiber media types.\n");
4590 return (EINVAL);
4591 }
4592
4593 if (advertise < 0x1 || advertise > 0x3F) {
4594 device_printf(dev,
4595 "Invalid advertised speed; valid modes are 0x1 through"
4596 " 0x3F\n");
4597 return (EINVAL);
4598 }
4599
4600 if (hw->mac.ops.get_link_capabilities) {
4601 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4602 &negotiate);
4603 if (err != IXGBE_SUCCESS) {
4604 device_printf(dev,
4605 "Unable to determine supported advertise speeds"
4606 "\n");
4607 return (ENODEV);
4608 }
4609 }
4610
4611 /* Set new value and report new advertised mode */
4612 if (advertise & 0x1) {
4613 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4614 device_printf(dev,
4615 "Interface does not support 100Mb advertised"
4616 " speed\n");
4617 return (EINVAL);
4618 }
4619 speed |= IXGBE_LINK_SPEED_100_FULL;
4620 }
4621 if (advertise & 0x2) {
4622 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4623 device_printf(dev,
4624 "Interface does not support 1Gb advertised speed"
4625 "\n");
4626 return (EINVAL);
4627 }
4628 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4629 }
4630 if (advertise & 0x4) {
4631 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4632 device_printf(dev,
4633 "Interface does not support 10Gb advertised speed"
4634 "\n");
4635 return (EINVAL);
4636 }
4637 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4638 }
4639 if (advertise & 0x8) {
4640 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4641 device_printf(dev,
4642 "Interface does not support 10Mb advertised speed"
4643 "\n");
4644 return (EINVAL);
4645 }
4646 speed |= IXGBE_LINK_SPEED_10_FULL;
4647 }
4648 if (advertise & 0x10) {
4649 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4650 device_printf(dev,
4651 "Interface does not support 2.5G advertised speed"
4652 "\n");
4653 return (EINVAL);
4654 }
4655 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4656 }
4657 if (advertise & 0x20) {
4658 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4659 device_printf(dev,
4660 "Interface does not support 5G advertised speed"
4661 "\n");
4662 return (EINVAL);
4663 }
4664 speed |= IXGBE_LINK_SPEED_5GB_FULL;
4665 }
4666
4667 hw->mac.autotry_restart = true;
4668 hw->mac.ops.setup_link(hw, speed, true);
4669 sc->advertise = advertise;
4670
4671 return (0);
4672 } /* ixgbe_set_advertise */
4673
4674 /************************************************************************
4675 * ixgbe_get_default_advertise - Get default advertised speed settings
4676 *
4677 * Formatted for sysctl usage.
4678 * Flags:
4679 * 0x1 - advertise 100 Mb
4680 * 0x2 - advertise 1G
4681 * 0x4 - advertise 10G
4682 * 0x8 - advertise 10 Mb (yes, Mb)
4683 * 0x10 - advertise 2.5G (disabled by default)
4684 * 0x20 - advertise 5G (disabled by default)
4685 ************************************************************************/
4686 static int
ixgbe_get_default_advertise(struct ixgbe_softc * sc)4687 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4688 {
4689 struct ixgbe_hw *hw = &sc->hw;
4690 int speed;
4691 ixgbe_link_speed link_caps = 0;
4692 s32 err;
4693 bool negotiate = false;
4694
4695 /*
4696 * Advertised speed means nothing unless it's copper or
4697 * multi-speed fiber
4698 */
4699 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4700 !(hw->phy.multispeed_fiber))
4701 return (0);
4702
4703 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4704 if (err != IXGBE_SUCCESS)
4705 return (0);
4706
4707 if (hw->mac.type == ixgbe_mac_X550) {
4708 /*
4709 * 2.5G and 5G autonegotiation speeds on X550
4710 * are disabled by default due to reported
4711 * interoperability issues with some switches.
4712 */
4713 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4714 IXGBE_LINK_SPEED_5GB_FULL);
4715 }
4716
4717 speed =
4718 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
4719 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
4720 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4721 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
4722 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
4723 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
4724
4725 return speed;
4726 } /* ixgbe_get_default_advertise */
4727
4728 /************************************************************************
4729 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4730 *
4731 * Control values:
4732 * 0/1 - off / on (use default value of 1000)
4733 *
4734 * Legal timer values are:
4735 * 50,100,250,500,1000,2000,5000,10000
4736 *
4737 * Turning off interrupt moderation will also turn this off.
4738 ************************************************************************/
4739 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)4740 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4741 {
4742 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4743 if_t ifp = iflib_get_ifp(sc->ctx);
4744 int error;
4745 u16 newval;
4746
4747 newval = sc->dmac;
4748 error = sysctl_handle_16(oidp, &newval, 0, req);
4749 if ((error) || (req->newptr == NULL))
4750 return (error);
4751
4752 switch (newval) {
4753 case 0:
4754 /* Disabled */
4755 sc->dmac = 0;
4756 break;
4757 case 1:
4758 /* Enable and use default */
4759 sc->dmac = 1000;
4760 break;
4761 case 50:
4762 case 100:
4763 case 250:
4764 case 500:
4765 case 1000:
4766 case 2000:
4767 case 5000:
4768 case 10000:
4769 /* Legal values - allow */
4770 sc->dmac = newval;
4771 break;
4772 default:
4773 /* Do nothing, illegal value */
4774 return (EINVAL);
4775 }
4776
4777 /* Re-initialize hardware if it's already running */
4778 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
4779 if_init(ifp, ifp);
4780
4781 return (0);
4782 } /* ixgbe_sysctl_dmac */
4783
4784 #ifdef IXGBE_DEBUG
4785 /************************************************************************
4786 * ixgbe_sysctl_power_state
4787 *
4788 * Sysctl to test power states
4789 * Values:
4790 * 0 - set device to D0
4791 * 3 - set device to D3
4792 * (none) - get current device power state
4793 ************************************************************************/
4794 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)4795 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4796 {
4797 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4798 device_t dev = sc->dev;
4799 int curr_ps, new_ps, error = 0;
4800
4801 curr_ps = new_ps = pci_get_powerstate(dev);
4802
4803 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4804 if ((error) || (req->newptr == NULL))
4805 return (error);
4806
4807 if (new_ps == curr_ps)
4808 return (0);
4809
4810 if (new_ps == 3 && curr_ps == 0)
4811 error = DEVICE_SUSPEND(dev);
4812 else if (new_ps == 0 && curr_ps == 3)
4813 error = DEVICE_RESUME(dev);
4814 else
4815 return (EINVAL);
4816
4817 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4818
4819 return (error);
4820 } /* ixgbe_sysctl_power_state */
4821 #endif
4822
4823 /************************************************************************
4824 * ixgbe_sysctl_wol_enable
4825 *
4826 * Sysctl to enable/disable the WoL capability,
4827 * if supported by the adapter.
4828 *
4829 * Values:
4830 * 0 - disabled
4831 * 1 - enabled
4832 ************************************************************************/
4833 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)4834 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4835 {
4836 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4837 struct ixgbe_hw *hw = &sc->hw;
4838 int new_wol_enabled;
4839 int error = 0;
4840
4841 new_wol_enabled = hw->wol_enabled;
4842 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4843 if ((error) || (req->newptr == NULL))
4844 return (error);
4845 new_wol_enabled = !!(new_wol_enabled);
4846 if (new_wol_enabled == hw->wol_enabled)
4847 return (0);
4848
4849 if (new_wol_enabled > 0 && !sc->wol_support)
4850 return (ENODEV);
4851 else
4852 hw->wol_enabled = new_wol_enabled;
4853
4854 return (0);
4855 } /* ixgbe_sysctl_wol_enable */
4856
4857 /************************************************************************
4858 * ixgbe_sysctl_wufc - Wake Up Filter Control
4859 *
4860 * Sysctl to enable/disable the types of packets that the
4861 * adapter will wake up on upon receipt.
4862 * Flags:
4863 * 0x1 - Link Status Change
4864 * 0x2 - Magic Packet
4865 * 0x4 - Direct Exact
4866 * 0x8 - Directed Multicast
4867 * 0x10 - Broadcast
4868 * 0x20 - ARP/IPv4 Request Packet
4869 * 0x40 - Direct IPv4 Packet
4870 * 0x80 - Direct IPv6 Packet
4871 *
4872 * Settings not listed above will cause the sysctl to return an error.
4873 ************************************************************************/
4874 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)4875 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4876 {
4877 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4878 int error = 0;
4879 u32 new_wufc;
4880
4881 new_wufc = sc->wufc;
4882
4883 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4884 if ((error) || (req->newptr == NULL))
4885 return (error);
4886 if (new_wufc == sc->wufc)
4887 return (0);
4888
4889 if (new_wufc & 0xffffff00)
4890 return (EINVAL);
4891
4892 new_wufc &= 0xff;
4893 new_wufc |= (0xffffff & sc->wufc);
4894 sc->wufc = new_wufc;
4895
4896 return (0);
4897 } /* ixgbe_sysctl_wufc */
4898
4899 #ifdef IXGBE_DEBUG
4900 /************************************************************************
4901 * ixgbe_sysctl_print_rss_config
4902 ************************************************************************/
4903 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)4904 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4905 {
4906 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4907 struct ixgbe_hw *hw = &sc->hw;
4908 device_t dev = sc->dev;
4909 struct sbuf *buf;
4910 int error = 0, reta_size;
4911 u32 reg;
4912
4913 if (atomic_load_acq_int(&sc->recovery_mode))
4914 return (EPERM);
4915
4916 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4917 if (!buf) {
4918 device_printf(dev, "Could not allocate sbuf for output.\n");
4919 return (ENOMEM);
4920 }
4921
4922 // TODO: use sbufs to make a string to print out
4923 /* Set multiplier for RETA setup and table size based on MAC */
4924 switch (sc->hw.mac.type) {
4925 case ixgbe_mac_X550:
4926 case ixgbe_mac_X550EM_x:
4927 case ixgbe_mac_X550EM_a:
4928 reta_size = 128;
4929 break;
4930 default:
4931 reta_size = 32;
4932 break;
4933 }
4934
4935 /* Print out the redirection table */
4936 sbuf_cat(buf, "\n");
4937 for (int i = 0; i < reta_size; i++) {
4938 if (i < 32) {
4939 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4940 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4941 } else {
4942 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4943 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4944 }
4945 }
4946
4947 // TODO: print more config
4948
4949 error = sbuf_finish(buf);
4950 if (error)
4951 device_printf(dev, "Error finishing sbuf: %d\n", error);
4952
4953 sbuf_delete(buf);
4954
4955 return (0);
4956 } /* ixgbe_sysctl_print_rss_config */
4957 #endif /* IXGBE_DEBUG */
4958
4959 /************************************************************************
4960 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4961 *
4962 * For X552/X557-AT devices using an external PHY
4963 ************************************************************************/
4964 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)4965 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4966 {
4967 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4968 struct ixgbe_hw *hw = &sc->hw;
4969 u16 reg;
4970
4971 if (atomic_load_acq_int(&sc->recovery_mode))
4972 return (EPERM);
4973
4974 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4975 device_printf(iflib_get_dev(sc->ctx),
4976 "Device has no supported external thermal sensor.\n");
4977 return (ENODEV);
4978 }
4979
4980 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4981 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4982 device_printf(iflib_get_dev(sc->ctx),
4983 "Error reading from PHY's current temperature register"
4984 "\n");
4985 return (EAGAIN);
4986 }
4987
4988 /* Shift temp for output */
4989 reg = reg >> 8;
4990
4991 return (sysctl_handle_16(oidp, NULL, reg, req));
4992 } /* ixgbe_sysctl_phy_temp */
4993
4994 /************************************************************************
4995 * ixgbe_sysctl_phy_overtemp_occurred
4996 *
4997 * Reports (directly from the PHY) whether the current PHY
4998 * temperature is over the overtemp threshold.
4999 ************************************************************************/
5000 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)5001 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
5002 {
5003 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5004 struct ixgbe_hw *hw = &sc->hw;
5005 u16 reg;
5006
5007 if (atomic_load_acq_int(&sc->recovery_mode))
5008 return (EPERM);
5009
5010 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5011 device_printf(iflib_get_dev(sc->ctx),
5012 "Device has no supported external thermal sensor.\n");
5013 return (ENODEV);
5014 }
5015
5016 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5017 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5018 device_printf(iflib_get_dev(sc->ctx),
5019 "Error reading from PHY's temperature status register\n");
5020 return (EAGAIN);
5021 }
5022
5023 /* Get occurrence bit */
5024 reg = !!(reg & 0x4000);
5025
5026 return (sysctl_handle_16(oidp, 0, reg, req));
5027 } /* ixgbe_sysctl_phy_overtemp_occurred */
5028
5029 /************************************************************************
5030 * ixgbe_sysctl_eee_state
5031 *
5032 * Sysctl to set EEE power saving feature
5033 * Values:
5034 * 0 - disable EEE
5035 * 1 - enable EEE
5036 * (none) - get current device EEE state
5037 ************************************************************************/
5038 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)5039 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5040 {
5041 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5042 device_t dev = sc->dev;
5043 if_t ifp = iflib_get_ifp(sc->ctx);
5044 int curr_eee, new_eee, error = 0;
5045 s32 retval;
5046
5047 if (atomic_load_acq_int(&sc->recovery_mode))
5048 return (EPERM);
5049
5050 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
5051
5052 error = sysctl_handle_int(oidp, &new_eee, 0, req);
5053 if ((error) || (req->newptr == NULL))
5054 return (error);
5055
5056 /* Nothing to do */
5057 if (new_eee == curr_eee)
5058 return (0);
5059
5060 /* Not supported */
5061 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
5062 return (EINVAL);
5063
5064 /* Bounds checking */
5065 if ((new_eee < 0) || (new_eee > 1))
5066 return (EINVAL);
5067
5068 retval = ixgbe_setup_eee(&sc->hw, new_eee);
5069 if (retval) {
5070 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5071 return (EINVAL);
5072 }
5073
5074 /* Restart auto-neg */
5075 if_init(ifp, ifp);
5076
5077 device_printf(dev, "New EEE state: %d\n", new_eee);
5078
5079 /* Cache new value */
5080 if (new_eee)
5081 sc->feat_en |= IXGBE_FEATURE_EEE;
5082 else
5083 sc->feat_en &= ~IXGBE_FEATURE_EEE;
5084
5085 return (error);
5086 } /* ixgbe_sysctl_eee_state */
5087
5088 static int
ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)5089 ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
5090 {
5091 struct ixgbe_softc *sc;
5092 u32 reg, val, shift;
5093 int error, mask;
5094
5095 sc = oidp->oid_arg1;
5096 switch (oidp->oid_arg2) {
5097 case 0:
5098 reg = IXGBE_DTXTCPFLGL;
5099 shift = 0;
5100 break;
5101 case 1:
5102 reg = IXGBE_DTXTCPFLGL;
5103 shift = 16;
5104 break;
5105 case 2:
5106 reg = IXGBE_DTXTCPFLGH;
5107 shift = 0;
5108 break;
5109 default:
5110 return (EINVAL);
5111 break;
5112 }
5113 val = IXGBE_READ_REG(&sc->hw, reg);
5114 mask = (val >> shift) & 0xfff;
5115 error = sysctl_handle_int(oidp, &mask, 0, req);
5116 if (error != 0 || req->newptr == NULL)
5117 return (error);
5118 if (mask < 0 || mask > 0xfff)
5119 return (EINVAL);
5120 val = (val & ~(0xfff << shift)) | (mask << shift);
5121 IXGBE_WRITE_REG(&sc->hw, reg, val);
5122 return (0);
5123 }
5124
5125 /************************************************************************
5126 * ixgbe_init_device_features
5127 ************************************************************************/
5128 static void
ixgbe_init_device_features(struct ixgbe_softc * sc)5129 ixgbe_init_device_features(struct ixgbe_softc *sc)
5130 {
5131 sc->feat_cap = IXGBE_FEATURE_NETMAP |
5132 IXGBE_FEATURE_RSS |
5133 IXGBE_FEATURE_MSI |
5134 IXGBE_FEATURE_MSIX |
5135 IXGBE_FEATURE_LEGACY_IRQ;
5136
5137 /* Set capabilities first... */
5138 switch (sc->hw.mac.type) {
5139 case ixgbe_mac_82598EB:
5140 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
5141 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5142 break;
5143 case ixgbe_mac_X540:
5144 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5145 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5146 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5147 (sc->hw.bus.func == 0))
5148 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
5149 break;
5150 case ixgbe_mac_X550:
5151 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5152 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5153 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5154 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5155 break;
5156 case ixgbe_mac_X550EM_x:
5157 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5158 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5159 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5160 if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5161 sc->feat_cap |= IXGBE_FEATURE_EEE;
5162 break;
5163 case ixgbe_mac_X550EM_a:
5164 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5165 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5166 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5167 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5168 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5169 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5170 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5171 sc->feat_cap |= IXGBE_FEATURE_EEE;
5172 }
5173 break;
5174 case ixgbe_mac_82599EB:
5175 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5176 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5177 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5178 (sc->hw.bus.func == 0))
5179 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
5180 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5181 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5182 break;
5183 case ixgbe_mac_E610:
5184 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5185 break;
5186 default:
5187 break;
5188 }
5189
5190 /* Enabled by default... */
5191 /* Fan failure detection */
5192 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5193 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5194 /* Netmap */
5195 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
5196 sc->feat_en |= IXGBE_FEATURE_NETMAP;
5197 /* EEE */
5198 if (sc->feat_cap & IXGBE_FEATURE_EEE)
5199 sc->feat_en |= IXGBE_FEATURE_EEE;
5200 /* Thermal Sensor */
5201 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5202 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5203 /* Recovery mode */
5204 if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
5205 sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
5206
5207 /* Enabled via global sysctl... */
5208 /* Flow Director */
5209 if (ixgbe_enable_fdir) {
5210 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
5211 sc->feat_en |= IXGBE_FEATURE_FDIR;
5212 else
5213 device_printf(sc->dev,
5214 "Device does not support Flow Director."
5215 " Leaving disabled.");
5216 }
5217 /*
5218 * Message Signal Interrupts - Extended (MSI-X)
5219 * Normal MSI is only enabled if MSI-X calls fail.
5220 */
5221 if (!ixgbe_enable_msix)
5222 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
5223 /* Receive-Side Scaling (RSS) */
5224 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5225 sc->feat_en |= IXGBE_FEATURE_RSS;
5226
5227 /* Disable features with unmet dependencies... */
5228 /* No MSI-X */
5229 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
5230 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
5231 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5232 sc->feat_en &= ~IXGBE_FEATURE_RSS;
5233 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
5234 }
5235 } /* ixgbe_init_device_features */
5236
5237 /************************************************************************
5238 * ixgbe_check_fan_failure
5239 ************************************************************************/
5240 static void
ixgbe_check_fan_failure(struct ixgbe_softc * sc,u32 reg,bool in_interrupt)5241 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
5242 {
5243 u32 mask;
5244
5245 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
5246 IXGBE_ESDP_SDP1;
5247
5248 if (reg & mask)
5249 device_printf(sc->dev,
5250 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5251 } /* ixgbe_check_fan_failure */
5252
5253 /************************************************************************
5254 * ixgbe_sbuf_fw_version
5255 ************************************************************************/
5256 static void
ixgbe_sbuf_fw_version(struct ixgbe_hw * hw,struct sbuf * buf)5257 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
5258 {
5259 struct ixgbe_nvm_version nvm_ver = {0};
5260 const char *space = "";
5261
5262 ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
5263 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
5264 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
5265 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
5266
5267 /* FW version */
5268 if ((nvm_ver.phy_fw_maj == 0x0 &&
5269 nvm_ver.phy_fw_min == 0x0 &&
5270 nvm_ver.phy_fw_id == 0x0) ||
5271 (nvm_ver.phy_fw_maj == 0xF &&
5272 nvm_ver.phy_fw_min == 0xFF &&
5273 nvm_ver.phy_fw_id == 0xF)) {
5274 /* If major, minor and id numbers are set to 0,
5275 * reading FW version is unsupported. If major number
5276 * is set to 0xF, minor is set to 0xFF and id is set
5277 * to 0xF, this means that number read is invalid. */
5278 } else
5279 sbuf_printf(buf, "fw %d.%d.%d ",
5280 nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min,
5281 nvm_ver.phy_fw_id);
5282
5283 /* NVM version */
5284 if ((nvm_ver.nvm_major == 0x0 &&
5285 nvm_ver.nvm_minor == 0x0 &&
5286 nvm_ver.nvm_id == 0x0) ||
5287 (nvm_ver.nvm_major == 0xF &&
5288 nvm_ver.nvm_minor == 0xFF &&
5289 nvm_ver.nvm_id == 0xF)) {
5290 /* If major, minor and id numbers are set to 0,
5291 * reading NVM version is unsupported. If major number
5292 * is set to 0xF, minor is set to 0xFF and id is set
5293 * to 0xF, this means that number read is invalid. */
5294 } else
5295 sbuf_printf(buf, "nvm %x.%02x.%x ",
5296 nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
5297
5298 if (nvm_ver.oem_valid) {
5299 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
5300 nvm_ver.oem_minor, nvm_ver.oem_release);
5301 space = " ";
5302 }
5303
5304 if (nvm_ver.or_valid) {
5305 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
5306 space, nvm_ver.or_major, nvm_ver.or_build,
5307 nvm_ver.or_patch);
5308 space = " ";
5309 }
5310
5311 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
5312 NVM_VER_INVALID | 0xFFFFFFFF)) {
5313 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
5314 }
5315 } /* ixgbe_sbuf_fw_version */
5316
5317 /************************************************************************
5318 * ixgbe_print_fw_version
5319 ************************************************************************/
5320 static void
ixgbe_print_fw_version(if_ctx_t ctx)5321 ixgbe_print_fw_version(if_ctx_t ctx)
5322 {
5323 struct ixgbe_softc *sc = iflib_get_softc(ctx);
5324 struct ixgbe_hw *hw = &sc->hw;
5325 device_t dev = sc->dev;
5326 struct sbuf *buf;
5327 int error = 0;
5328
5329 buf = sbuf_new_auto();
5330 if (!buf) {
5331 device_printf(dev, "Could not allocate sbuf for output.\n");
5332 return;
5333 }
5334
5335 ixgbe_sbuf_fw_version(hw, buf);
5336
5337 error = sbuf_finish(buf);
5338 if (error)
5339 device_printf(dev, "Error finishing sbuf: %d\n", error);
5340 else if (sbuf_len(buf))
5341 device_printf(dev, "%s\n", sbuf_data(buf));
5342
5343 sbuf_delete(buf);
5344 } /* ixgbe_print_fw_version */
5345
5346 /************************************************************************
5347 * ixgbe_sysctl_print_fw_version
5348 ************************************************************************/
5349 static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)5350 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
5351 {
5352 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5353 struct ixgbe_hw *hw = &sc->hw;
5354 device_t dev = sc->dev;
5355 struct sbuf *buf;
5356 int error = 0;
5357
5358 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5359 if (!buf) {
5360 device_printf(dev, "Could not allocate sbuf for output.\n");
5361 return (ENOMEM);
5362 }
5363
5364 ixgbe_sbuf_fw_version(hw, buf);
5365
5366 error = sbuf_finish(buf);
5367 if (error)
5368 device_printf(dev, "Error finishing sbuf: %d\n", error);
5369
5370 sbuf_delete(buf);
5371
5372 return (0);
5373 } /* ixgbe_sysctl_print_fw_version */
5374