1 /*****************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 *****************************************************************************/
33
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46 * Driver version
47 ************************************************************************/
48 static const char ixgbe_driver_version[] = "5.0.1-k";
49
50 /************************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,
62 "Intel(R) 82598EB AF (Dual Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,
64 "Intel(R) 82598EB AF (Fiber)"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,
66 "Intel(R) 82598EB AT (CX4)"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,
68 "Intel(R) 82598EB AT"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,
70 "Intel(R) 82598EB AT2"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,
73 "Intel(R) 82598EB AF DA (Dual Fiber)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,
75 "Intel(R) 82598EB AT (Dual CX4)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,
77 "Intel(R) 82598EB AF (Dual Fiber LR)"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,
79 "Intel(R) 82598EB AF (Dual Fiber SR)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,
81 "Intel(R) 82598EB LOM"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,
83 "Intel(R) X520 82599 (KX4)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,
85 "Intel(R) X520 82599 (KX4 Mezzanine)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,
87 "Intel(R) X520 82599ES (SFI/SFP+)"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,
89 "Intel(R) X520 82599 (XAUI/BX4)"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,
91 "Intel(R) X520 82599 (Dual CX4)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,
93 "Intel(R) X520-T 82599 LOM"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,
95 "Intel(R) X520 82599 LS"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,
97 "Intel(R) X520 82599 (Combined Backplane)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,
99 "Intel(R) X520 82599 (Backplane w/FCoE)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,
101 "Intel(R) X520 82599 (Dual SFP+)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,
103 "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,
105 "Intel(R) X520-1 82599EN (SFP+)"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,
107 "Intel(R) X520-4 82599 (Quad SFP+)"),
108 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,
109 "Intel(R) X520-Q1 82599 (QSFP+)"),
110 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,
111 "Intel(R) X540-AT2"),
112 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
113 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
114 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
115 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,
116 "Intel(R) X552 (KR Backplane)"),
117 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,
118 "Intel(R) X552 (KX4 Backplane)"),
119 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,
120 "Intel(R) X552/X557-AT (10GBASE-T)"),
121 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,
122 "Intel(R) X552 (1000BASE-T)"),
123 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP,
124 "Intel(R) X552 (SFP+)"),
125 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR,
126 "Intel(R) X553 (KR Backplane)"),
127 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L,
128 "Intel(R) X553 L (KR Backplane)"),
129 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP,
130 "Intel(R) X553 (SFP+)"),
131 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N,
132 "Intel(R) X553 N (SFP+)"),
133 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII,
134 "Intel(R) X553 (1GbE SGMII)"),
135 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L,
136 "Intel(R) X553 L (1GbE SGMII)"),
137 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T,
138 "Intel(R) X553/X557-AT (10GBASE-T)"),
139 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T,
140 "Intel(R) X553 (1GbE)"),
141 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L,
142 "Intel(R) X553 L (1GbE)"),
143 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS,
144 "Intel(R) X540-T2 (Bypass)"),
145 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
146 "Intel(R) X520 82599 (Bypass)"),
147 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
148 "Intel(R) E610 (Backplane)"),
149 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
150 "Intel(R) E610 (SFP)"),
151 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
152 "Intel(R) E610 (2.5 GbE)"),
153 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
154 "Intel(R) E610 (10 GbE)"),
155 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
156 "Intel(R) E610 (SGMII)"),
157 /* required last entry */
158 PVID_END
159 };
160
161 static void *ixgbe_register(device_t);
162 static int ixgbe_if_attach_pre(if_ctx_t);
163 static int ixgbe_if_attach_post(if_ctx_t);
164 static int ixgbe_if_detach(if_ctx_t);
165 static int ixgbe_if_shutdown(if_ctx_t);
166 static int ixgbe_if_suspend(if_ctx_t);
167 static int ixgbe_if_resume(if_ctx_t);
168
169 static void ixgbe_if_stop(if_ctx_t);
170 void ixgbe_if_enable_intr(if_ctx_t);
171 static void ixgbe_if_disable_intr(if_ctx_t);
172 static void ixgbe_link_intr_enable(if_ctx_t);
173 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
174 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
175 static int ixgbe_if_media_change(if_ctx_t);
176 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
177 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
178 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
179 static void ixgbe_if_multi_set(if_ctx_t);
180 static int ixgbe_if_promisc_set(if_ctx_t, int);
181 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
182 int);
183 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
184 int);
185 static void ixgbe_if_queues_free(if_ctx_t);
186 static void ixgbe_if_timer(if_ctx_t, uint16_t);
187 static void ixgbe_if_update_admin_status(if_ctx_t);
188 static void ixgbe_if_vlan_register(if_ctx_t, u16);
189 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
190 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
191 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
192 int ixgbe_intr(void *);
193
194 /************************************************************************
195 * Function prototypes
196 ************************************************************************/
197 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
198
199 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
200 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
201 static void ixgbe_add_device_sysctls(if_ctx_t);
202 static int ixgbe_allocate_pci_resources(if_ctx_t);
203 static int ixgbe_setup_low_power_mode(if_ctx_t);
204
205 static void ixgbe_config_dmac(struct ixgbe_softc *);
206 static void ixgbe_configure_ivars(struct ixgbe_softc *);
207 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
208 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
209 static bool ixgbe_sfp_probe(if_ctx_t);
210
211 static void ixgbe_free_pci_resources(if_ctx_t);
212
213 static int ixgbe_msix_link(void *);
214 static int ixgbe_msix_que(void *);
215 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
216 static void ixgbe_initialize_receive_units(if_ctx_t);
217 static void ixgbe_initialize_transmit_units(if_ctx_t);
218
219 static int ixgbe_setup_interface(if_ctx_t);
220 static void ixgbe_init_device_features(struct ixgbe_softc *);
221 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
222 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
223 static void ixgbe_print_fw_version(if_ctx_t);
224 static void ixgbe_add_media_types(if_ctx_t);
225 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
226 static void ixgbe_config_link(if_ctx_t);
227 static void ixgbe_get_slot_info(struct ixgbe_softc *);
228 static void ixgbe_fw_mode_timer(void *);
229 static void ixgbe_check_wol_support(struct ixgbe_softc *);
230 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
231 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
232
233 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
234 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
235 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
236 static int ixgbe_get_default_advertise(struct ixgbe_softc *);
237 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
238 static void ixgbe_config_gpie(struct ixgbe_softc *);
239 static void ixgbe_config_delay_values(struct ixgbe_softc *);
240
241 /* Sysctl handlers */
242 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
243 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
244 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
245 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
246 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
247 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
248 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
249 #ifdef IXGBE_DEBUG
250 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
251 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
252 #endif
253 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
254 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
255 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
256 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
257 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
258 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
259 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
260 static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
261
262 /* Deferred interrupt tasklets */
263 static void ixgbe_handle_msf(void *);
264 static void ixgbe_handle_mod(void *);
265 static void ixgbe_handle_phy(void *);
266 static void ixgbe_handle_fw_event(void *);
267
268 static int ixgbe_enable_lse(struct ixgbe_softc *sc);
269 static int ixgbe_disable_lse(struct ixgbe_softc *sc);
270
271 /************************************************************************
272 * FreeBSD Device Interface Entry Points
273 ************************************************************************/
274 static device_method_t ix_methods[] = {
275 /* Device interface */
276 DEVMETHOD(device_register, ixgbe_register),
277 DEVMETHOD(device_probe, iflib_device_probe),
278 DEVMETHOD(device_attach, iflib_device_attach),
279 DEVMETHOD(device_detach, iflib_device_detach),
280 DEVMETHOD(device_shutdown, iflib_device_shutdown),
281 DEVMETHOD(device_suspend, iflib_device_suspend),
282 DEVMETHOD(device_resume, iflib_device_resume),
283 #ifdef PCI_IOV
284 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
285 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
286 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
287 #endif /* PCI_IOV */
288 DEVMETHOD_END
289 };
290
291 static driver_t ix_driver = {
292 "ix", ix_methods, sizeof(struct ixgbe_softc),
293 };
294
295 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
296 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
297 MODULE_DEPEND(ix, pci, 1, 1, 1);
298 MODULE_DEPEND(ix, ether, 1, 1, 1);
299 MODULE_DEPEND(ix, iflib, 1, 1, 1);
300
301 static device_method_t ixgbe_if_methods[] = {
302 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
303 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
304 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
305 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
306 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
307 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
308 DEVMETHOD(ifdi_init, ixgbe_if_init),
309 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
310 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
311 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
312 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
313 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
314 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
315 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
316 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
317 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
318 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
319 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
320 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
321 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
322 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
323 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
324 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
325 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
326 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
327 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
328 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
329 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
330 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
331 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
332 #ifdef PCI_IOV
333 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
334 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
335 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
336 #endif /* PCI_IOV */
337 DEVMETHOD_END
338 };
339
340 /*
341 * TUNEABLE PARAMETERS:
342 */
343
344 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
345 "IXGBE driver parameters");
346 static driver_t ixgbe_if_driver = {
347 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
348 };
349
350 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
351 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
352 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
353
354 /* Flow control setting, default to full */
355 static int ixgbe_flow_control = ixgbe_fc_full;
356 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
357 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
358
359 /* Advertise Speed, default to 0 (auto) */
360 static int ixgbe_advertise_speed = 0;
361 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
362 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
363
364 /*
365 * Smart speed setting, default to on
366 * this only works as a compile option
367 * right now as its during attach, set
368 * this to 'ixgbe_smart_speed_off' to
369 * disable.
370 */
371 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
372
373 /*
374 * MSI-X should be the default for best performance,
375 * but this allows it to be forced off for testing.
376 */
377 static int ixgbe_enable_msix = 1;
378 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix,
379 0,
380 "Enable MSI-X interrupts");
381
382 /*
383 * Defining this on will allow the use
384 * of unsupported SFP+ modules, note that
385 * doing so you are on your own :)
386 */
387 static int allow_unsupported_sfp = false;
388 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
389 &allow_unsupported_sfp, 0,
390 "Allow unsupported SFP modules...use at your own risk");
391
392 /*
393 * Not sure if Flow Director is fully baked,
394 * so we'll default to turning it off.
395 */
396 static int ixgbe_enable_fdir = 0;
397 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir,
398 0,
399 "Enable Flow Director");
400
401 /* Receive-Side Scaling */
402 static int ixgbe_enable_rss = 1;
403 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss,
404 0,
405 "Enable Receive-Side Scaling (RSS)");
406
407 /*
408 * AIM: Adaptive Interrupt Moderation
409 * which means that the interrupt rate
410 * is varied over time based on the
411 * traffic for that interrupt vector
412 */
413 static int ixgbe_enable_aim = false;
414 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim,
415 0,
416 "Enable adaptive interrupt moderation");
417
418 #if 0
419 /* Keep running tab on them for sanity check */
420 static int ixgbe_total_ports;
421 #endif
422
423 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
424
425 /*
426 * For Flow Director: this is the number of TX packets we sample
427 * for the filter pool, this means every 20th packet will be probed.
428 *
429 * This feature can be disabled by setting this to 0.
430 */
431 static int atr_sample_rate = 20;
432
433 extern struct if_txrx ixgbe_txrx;
434
435 static struct if_shared_ctx ixgbe_sctx_init = {
436 .isc_magic = IFLIB_MAGIC,
437 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
438 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
439 .isc_tx_maxsegsize = PAGE_SIZE,
440 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
441 .isc_tso_maxsegsize = PAGE_SIZE,
442 .isc_rx_maxsize = PAGE_SIZE*4,
443 .isc_rx_nsegments = 1,
444 .isc_rx_maxsegsize = PAGE_SIZE*4,
445 .isc_nfl = 1,
446 .isc_ntxqs = 1,
447 .isc_nrxqs = 1,
448
449 .isc_admin_intrcnt = 1,
450 .isc_vendor_info = ixgbe_vendor_info_array,
451 .isc_driver_version = ixgbe_driver_version,
452 .isc_driver = &ixgbe_if_driver,
453 .isc_flags = IFLIB_TSO_INIT_IP,
454
455 .isc_nrxd_min = {MIN_RXD},
456 .isc_ntxd_min = {MIN_TXD},
457 .isc_nrxd_max = {MAX_RXD},
458 .isc_ntxd_max = {MAX_TXD},
459 .isc_nrxd_default = {DEFAULT_RXD},
460 .isc_ntxd_default = {DEFAULT_TXD},
461 };
462
463 /************************************************************************
464 * ixgbe_if_tx_queues_alloc
465 ************************************************************************/
466 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)467 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
468 int ntxqs, int ntxqsets)
469 {
470 struct ixgbe_softc *sc = iflib_get_softc(ctx);
471 if_softc_ctx_t scctx = sc->shared;
472 struct ix_tx_queue *que;
473 int i, j, error;
474
475 MPASS(sc->num_tx_queues > 0);
476 MPASS(sc->num_tx_queues == ntxqsets);
477 MPASS(ntxqs == 1);
478
479 /* Allocate queue structure memory */
480 sc->tx_queues =
481 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
482 ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO);
483 if (!sc->tx_queues) {
484 device_printf(iflib_get_dev(ctx),
485 "Unable to allocate TX ring memory\n");
486 return (ENOMEM);
487 }
488
489 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
490 struct tx_ring *txr = &que->txr;
491
492 /* In case SR-IOV is enabled, align the index properly */
493 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
494
495 txr->sc = que->sc = sc;
496
497 /* Allocate report status array */
498 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
499 scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
500 if (txr->tx_rsq == NULL) {
501 error = ENOMEM;
502 goto fail;
503 }
504 for (j = 0; j < scctx->isc_ntxd[0]; j++)
505 txr->tx_rsq[j] = QIDX_INVALID;
506 /* get virtual and physical address of the hardware queues */
507 txr->tail = IXGBE_TDT(txr->me);
508 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
509 txr->tx_paddr = paddrs[i];
510
511 txr->bytes = 0;
512 txr->total_packets = 0;
513
514 /* Set the rate at which we sample packets */
515 if (sc->feat_en & IXGBE_FEATURE_FDIR)
516 txr->atr_sample = atr_sample_rate;
517
518 }
519
520 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
521 sc->num_tx_queues);
522
523 return (0);
524
525 fail:
526 ixgbe_if_queues_free(ctx);
527
528 return (error);
529 } /* ixgbe_if_tx_queues_alloc */
530
531 /************************************************************************
532 * ixgbe_if_rx_queues_alloc
533 ************************************************************************/
534 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)535 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
536 int nrxqs, int nrxqsets)
537 {
538 struct ixgbe_softc *sc = iflib_get_softc(ctx);
539 struct ix_rx_queue *que;
540 int i;
541
542 MPASS(sc->num_rx_queues > 0);
543 MPASS(sc->num_rx_queues == nrxqsets);
544 MPASS(nrxqs == 1);
545
546 /* Allocate queue structure memory */
547 sc->rx_queues =
548 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
549 M_IXGBE, M_NOWAIT | M_ZERO);
550 if (!sc->rx_queues) {
551 device_printf(iflib_get_dev(ctx),
552 "Unable to allocate TX ring memory\n");
553 return (ENOMEM);
554 }
555
556 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
557 struct rx_ring *rxr = &que->rxr;
558
559 /* In case SR-IOV is enabled, align the index properly */
560 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
561
562 rxr->sc = que->sc = sc;
563
564 /* get the virtual and physical address of the hw queues */
565 rxr->tail = IXGBE_RDT(rxr->me);
566 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
567 rxr->rx_paddr = paddrs[i];
568 rxr->bytes = 0;
569 rxr->que = que;
570 }
571
572 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
573 sc->num_rx_queues);
574
575 return (0);
576 } /* ixgbe_if_rx_queues_alloc */
577
578 /************************************************************************
579 * ixgbe_if_queues_free
580 ************************************************************************/
581 static void
ixgbe_if_queues_free(if_ctx_t ctx)582 ixgbe_if_queues_free(if_ctx_t ctx)
583 {
584 struct ixgbe_softc *sc = iflib_get_softc(ctx);
585 struct ix_tx_queue *tx_que = sc->tx_queues;
586 struct ix_rx_queue *rx_que = sc->rx_queues;
587 int i;
588
589 if (tx_que != NULL) {
590 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
591 struct tx_ring *txr = &tx_que->txr;
592 if (txr->tx_rsq == NULL)
593 break;
594
595 free(txr->tx_rsq, M_IXGBE);
596 txr->tx_rsq = NULL;
597 }
598
599 free(sc->tx_queues, M_IXGBE);
600 sc->tx_queues = NULL;
601 }
602 if (rx_que != NULL) {
603 free(sc->rx_queues, M_IXGBE);
604 sc->rx_queues = NULL;
605 }
606 } /* ixgbe_if_queues_free */
607
608 /************************************************************************
609 * ixgbe_initialize_rss_mapping
610 ************************************************************************/
611 static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc * sc)612 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
613 {
614 struct ixgbe_hw *hw = &sc->hw;
615 u32 reta = 0, mrqc, rss_key[10];
616 int queue_id, table_size, index_mult;
617 int i, j;
618 u32 rss_hash_config;
619
620 if (sc->feat_en & IXGBE_FEATURE_RSS) {
621 /* Fetch the configured RSS key */
622 rss_getkey((uint8_t *)&rss_key);
623 } else {
624 /* set up random bits */
625 arc4rand(&rss_key, sizeof(rss_key), 0);
626 }
627
628 /* Set multiplier for RETA setup and table size based on MAC */
629 index_mult = 0x1;
630 table_size = 128;
631 switch (sc->hw.mac.type) {
632 case ixgbe_mac_82598EB:
633 index_mult = 0x11;
634 break;
635 case ixgbe_mac_X550:
636 case ixgbe_mac_X550EM_x:
637 case ixgbe_mac_X550EM_a:
638 case ixgbe_mac_E610:
639 table_size = 512;
640 break;
641 default:
642 break;
643 }
644
645 /* Set up the redirection table */
646 for (i = 0, j = 0; i < table_size; i++, j++) {
647 if (j == sc->num_rx_queues)
648 j = 0;
649
650 if (sc->feat_en & IXGBE_FEATURE_RSS) {
651 /*
652 * Fetch the RSS bucket id for the given indirection
653 * entry. Cap it at the number of configured buckets
654 * (which is num_rx_queues.)
655 */
656 queue_id = rss_get_indirection_to_bucket(i);
657 queue_id = queue_id % sc->num_rx_queues;
658 } else
659 queue_id = (j * index_mult);
660
661 /*
662 * The low 8 bits are for hash value (n+0);
663 * The next 8 bits are for hash value (n+1), etc.
664 */
665 reta = reta >> 8;
666 reta = reta | (((uint32_t)queue_id) << 24);
667 if ((i & 3) == 3) {
668 if (i < 128)
669 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
670 else
671 IXGBE_WRITE_REG(hw,
672 IXGBE_ERETA((i >> 2) - 32), reta);
673 reta = 0;
674 }
675 }
676
677 /* Now fill our hash function seeds */
678 for (i = 0; i < 10; i++)
679 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
680
681 /* Perform hash on these packet types */
682 if (sc->feat_en & IXGBE_FEATURE_RSS)
683 rss_hash_config = rss_gethashconfig();
684 else {
685 /*
686 * Disable UDP - IP fragments aren't currently being handled
687 * and so we end up with a mix of 2-tuple and 4-tuple
688 * traffic.
689 */
690 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
691 RSS_HASHTYPE_RSS_TCP_IPV4 |
692 RSS_HASHTYPE_RSS_IPV6 |
693 RSS_HASHTYPE_RSS_TCP_IPV6 |
694 RSS_HASHTYPE_RSS_IPV6_EX |
695 RSS_HASHTYPE_RSS_TCP_IPV6_EX;
696 }
697
698 mrqc = IXGBE_MRQC_RSSEN;
699 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
700 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
701 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
702 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
703 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
704 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
705 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
706 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
707 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
708 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
709 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
710 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
711 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
712 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
713 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
714 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
715 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
716 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
717 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
718 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
719 } /* ixgbe_initialize_rss_mapping */
720
721 /************************************************************************
722 * ixgbe_initialize_receive_units - Setup receive registers and features.
723 ************************************************************************/
724 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
725
726 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)727 ixgbe_initialize_receive_units(if_ctx_t ctx)
728 {
729 struct ixgbe_softc *sc = iflib_get_softc(ctx);
730 if_softc_ctx_t scctx = sc->shared;
731 struct ixgbe_hw *hw = &sc->hw;
732 if_t ifp = iflib_get_ifp(ctx);
733 struct ix_rx_queue *que;
734 int i, j;
735 u32 bufsz, fctrl, srrctl, rxcsum;
736 u32 hlreg;
737
738 /*
739 * Make sure receives are disabled while
740 * setting up the descriptor ring
741 */
742 ixgbe_disable_rx(hw);
743
744 /* Enable broadcasts */
745 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
746 fctrl |= IXGBE_FCTRL_BAM;
747 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
748 fctrl |= IXGBE_FCTRL_DPF;
749 fctrl |= IXGBE_FCTRL_PMCF;
750 }
751 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
752
753 /* Set for Jumbo Frames? */
754 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
755 if (if_getmtu(ifp) > ETHERMTU)
756 hlreg |= IXGBE_HLREG0_JUMBOEN;
757 else
758 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
759 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
760
761 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
762 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
763
764 /* Setup the Base and Length of the Rx Descriptor Ring */
765 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
766 struct rx_ring *rxr = &que->rxr;
767 u64 rdba = rxr->rx_paddr;
768
769 j = rxr->me;
770
771 /* Setup the Base and Length of the Rx Descriptor Ring */
772 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
773 (rdba & 0x00000000ffffffffULL));
774 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
775 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
776 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
777
778 /* Set up the SRRCTL register */
779 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
780 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
781 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
782 srrctl |= bufsz;
783 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
784
785 /*
786 * Set DROP_EN iff we have no flow control and >1 queue.
787 * Note that srrctl was cleared shortly before during reset,
788 * so we do not need to clear the bit, but do it just in case
789 * this code is moved elsewhere.
790 */
791 if (sc->num_rx_queues > 1 &&
792 sc->hw.fc.requested_mode == ixgbe_fc_none) {
793 srrctl |= IXGBE_SRRCTL_DROP_EN;
794 } else {
795 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
796 }
797
798 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
799
800 /* Setup the HW Rx Head and Tail Descriptor Pointers */
801 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
802 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
803
804 /* Set the driver rx tail address */
805 rxr->tail = IXGBE_RDT(rxr->me);
806 }
807
808 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
809 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
810 IXGBE_PSRTYPE_UDPHDR |
811 IXGBE_PSRTYPE_IPV4HDR |
812 IXGBE_PSRTYPE_IPV6HDR;
813 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
814 }
815
816 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
817
818 ixgbe_initialize_rss_mapping(sc);
819
820 if (sc->feat_en & IXGBE_FEATURE_RSS) {
821 /* RSS and RX IPP Checksum are mutually exclusive */
822 rxcsum |= IXGBE_RXCSUM_PCSD;
823 }
824
825 if (if_getcapenable(ifp) & IFCAP_RXCSUM)
826 rxcsum |= IXGBE_RXCSUM_PCSD;
827
828 /* This is useful for calculating UDP/IP fragment checksums */
829 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
830 rxcsum |= IXGBE_RXCSUM_IPPCSE;
831
832 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
833
834 } /* ixgbe_initialize_receive_units */
835
836 /************************************************************************
837 * ixgbe_initialize_transmit_units - Enable transmit units.
838 ************************************************************************/
839 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)840 ixgbe_initialize_transmit_units(if_ctx_t ctx)
841 {
842 struct ixgbe_softc *sc = iflib_get_softc(ctx);
843 struct ixgbe_hw *hw = &sc->hw;
844 if_softc_ctx_t scctx = sc->shared;
845 struct ix_tx_queue *que;
846 int i;
847
848 /* Setup the Base and Length of the Tx Descriptor Ring */
849 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
850 i++, que++) {
851 struct tx_ring *txr = &que->txr;
852 u64 tdba = txr->tx_paddr;
853 u32 txctrl = 0;
854 int j = txr->me;
855
856 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
857 (tdba & 0x00000000ffffffffULL));
858 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
859 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
860 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
861
862 /* Setup the HW Tx Head and Tail descriptor pointers */
863 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
864 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
865
866 /* Cache the tail address */
867 txr->tail = IXGBE_TDT(txr->me);
868
869 txr->tx_rs_cidx = txr->tx_rs_pidx;
870 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
871 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
872 txr->tx_rsq[k] = QIDX_INVALID;
873
874 /* Disable Head Writeback */
875 /*
876 * Note: for X550 series devices, these registers are actually
877 * prefixed with TPH_ isntead of DCA_, but the addresses and
878 * fields remain the same.
879 */
880 switch (hw->mac.type) {
881 case ixgbe_mac_82598EB:
882 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
883 break;
884 default:
885 txctrl =
886 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
887 break;
888 }
889 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
890 switch (hw->mac.type) {
891 case ixgbe_mac_82598EB:
892 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
893 break;
894 default:
895 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
896 txctrl);
897 break;
898 }
899
900 }
901
902 if (hw->mac.type != ixgbe_mac_82598EB) {
903 u32 dmatxctl, rttdcs;
904
905 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
906 dmatxctl |= IXGBE_DMATXCTL_TE;
907 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
908 /* Disable arbiter to set MTQC */
909 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
910 rttdcs |= IXGBE_RTTDCS_ARBDIS;
911 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
912 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
913 ixgbe_get_mtqc(sc->iov_mode));
914 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
915 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
916 }
917
918 } /* ixgbe_initialize_transmit_units */
919
920 static int
ixgbe_check_fw_api_version(struct ixgbe_softc * sc)921 ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
922 {
923 struct ixgbe_hw *hw = &sc->hw;
924 if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
925 device_printf(sc->dev,
926 "The driver for the device stopped because the NVM "
927 "image is newer than expected. You must install the "
928 "most recent version of the network driver.\n");
929 return (EOPNOTSUPP);
930 } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
931 hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
932 device_printf(sc->dev,
933 "The driver for the device detected a newer version of "
934 "the NVM image than expected. Please install the most "
935 "recent version of the network driver.\n");
936 } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
937 hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
938 device_printf(sc->dev,
939 "The driver for the device detected an older version "
940 "of the NVM image than expected. "
941 "Please update the NVM image.\n");
942 }
943 return (0);
944 }
945
946 /************************************************************************
947 * ixgbe_register
948 ************************************************************************/
949 static void *
ixgbe_register(device_t dev)950 ixgbe_register(device_t dev)
951 {
952 return (&ixgbe_sctx_init);
953 } /* ixgbe_register */
954
955 /************************************************************************
956 * ixgbe_if_attach_pre - Device initialization routine, part 1
957 *
958 * Called when the driver is being loaded.
959 * Identifies the type of hardware, initializes the hardware,
960 * and initializes iflib structures.
961 *
962 * return 0 on success, positive on failure
963 ************************************************************************/
964 static int
ixgbe_if_attach_pre(if_ctx_t ctx)965 ixgbe_if_attach_pre(if_ctx_t ctx)
966 {
967 struct ixgbe_softc *sc;
968 device_t dev;
969 if_softc_ctx_t scctx;
970 struct ixgbe_hw *hw;
971 int error = 0;
972 u32 ctrl_ext;
973 size_t i;
974
975 INIT_DEBUGOUT("ixgbe_attach: begin");
976
977 /* Allocate, clear, and link in our adapter structure */
978 dev = iflib_get_dev(ctx);
979 sc = iflib_get_softc(ctx);
980 sc->hw.back = sc;
981 sc->ctx = ctx;
982 sc->dev = dev;
983 scctx = sc->shared = iflib_get_softc_ctx(ctx);
984 sc->media = iflib_get_media(ctx);
985 hw = &sc->hw;
986
987 /* Determine hardware revision */
988 hw->vendor_id = pci_get_vendor(dev);
989 hw->device_id = pci_get_device(dev);
990 hw->revision_id = pci_get_revid(dev);
991 hw->subsystem_vendor_id = pci_get_subvendor(dev);
992 hw->subsystem_device_id = pci_get_subdevice(dev);
993
994 /* Do base PCI setup - map BAR0 */
995 if (ixgbe_allocate_pci_resources(ctx)) {
996 device_printf(dev, "Allocation of PCI resources failed\n");
997 return (ENXIO);
998 }
999
1000 /* let hardware know driver is loaded */
1001 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1002 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1003 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1004
1005 /*
1006 * Initialize the shared code
1007 */
1008 if (ixgbe_init_shared_code(hw) != 0) {
1009 device_printf(dev, "Unable to initialize the shared code\n");
1010 error = ENXIO;
1011 goto err_pci;
1012 }
1013
1014 if (hw->mac.type == ixgbe_mac_E610)
1015 ixgbe_init_aci(hw);
1016
1017 if (hw->mac.ops.fw_recovery_mode &&
1018 hw->mac.ops.fw_recovery_mode(hw)) {
1019 device_printf(dev,
1020 "Firmware recovery mode detected. Limiting "
1021 "functionality.\nRefer to the Intel(R) Ethernet Adapters "
1022 "and Devices User Guide for details on firmware recovery "
1023 "mode.");
1024 error = ENOSYS;
1025 goto err_pci;
1026 }
1027
1028 /* 82598 Does not support SR-IOV, initialize everything else */
1029 if (hw->mac.type >= ixgbe_mac_82599_vf) {
1030 for (i = 0; i < sc->num_vfs; i++)
1031 hw->mbx.ops[i].init_params(hw);
1032 }
1033
1034 hw->allow_unsupported_sfp = allow_unsupported_sfp;
1035
1036 if (hw->mac.type != ixgbe_mac_82598EB)
1037 hw->phy.smart_speed = ixgbe_smart_speed;
1038
1039 ixgbe_init_device_features(sc);
1040
1041 /* Enable WoL (if supported) */
1042 ixgbe_check_wol_support(sc);
1043
1044 /* Verify adapter fan is still functional (if applicable) */
1045 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1046 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1047 ixgbe_check_fan_failure(sc, esdp, false);
1048 }
1049
1050 /* Ensure SW/FW semaphore is free */
1051 ixgbe_init_swfw_semaphore(hw);
1052
1053 /* Set an initial default flow control value */
1054 hw->fc.requested_mode = ixgbe_flow_control;
1055
1056 hw->phy.reset_if_overtemp = true;
1057 error = ixgbe_reset_hw(hw);
1058 hw->phy.reset_if_overtemp = false;
1059 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
1060 /*
1061 * No optics in this port, set up
1062 * so the timer routine will probe
1063 * for later insertion.
1064 */
1065 sc->sfp_probe = true;
1066 error = 0;
1067 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1068 device_printf(dev, "Unsupported SFP+ module detected!\n");
1069 error = EIO;
1070 goto err_pci;
1071 } else if (error) {
1072 device_printf(dev, "Hardware initialization failed\n");
1073 error = EIO;
1074 goto err_pci;
1075 }
1076
1077 /* Make sure we have a good EEPROM before we read from it */
1078 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1079 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
1080 error = EIO;
1081 goto err_pci;
1082 }
1083
1084 error = ixgbe_start_hw(hw);
1085 switch (error) {
1086 case IXGBE_ERR_EEPROM_VERSION:
1087 device_printf(dev,
1088 "This device is a pre-production adapter/LOM. Please be"
1089 " aware there may be issues associated with your"
1090 " hardware.\nIf you are experiencing problems please"
1091 " contact your Intel or hardware representative who"
1092 " provided you with this hardware.\n");
1093 break;
1094 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1095 device_printf(dev, "Unsupported SFP+ Module\n");
1096 error = EIO;
1097 goto err_pci;
1098 case IXGBE_ERR_SFP_NOT_PRESENT:
1099 device_printf(dev, "No SFP+ Module found\n");
1100 /* falls thru */
1101 default:
1102 break;
1103 }
1104
1105 /* Check the FW API version */
1106 if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
1107 error = EIO;
1108 goto err_pci;
1109 }
1110
1111 /* Most of the iflib initialization... */
1112
1113 iflib_set_mac(ctx, hw->mac.addr);
1114 switch (sc->hw.mac.type) {
1115 case ixgbe_mac_X550:
1116 case ixgbe_mac_X550EM_x:
1117 case ixgbe_mac_X550EM_a:
1118 scctx->isc_rss_table_size = 512;
1119 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1120 break;
1121 default:
1122 scctx->isc_rss_table_size = 128;
1123 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1124 }
1125
1126 /* Allow legacy interrupts */
1127 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1128
1129 scctx->isc_txqsizes[0] =
1130 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1131 sizeof(u32), DBA_ALIGN),
1132 scctx->isc_rxqsizes[0] =
1133 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1134 DBA_ALIGN);
1135
1136 /* XXX */
1137 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1138 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1139 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1140 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1141 } else {
1142 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1143 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1144 }
1145
1146 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1147
1148 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1149 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1150 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1151
1152 scctx->isc_txrx = &ixgbe_txrx;
1153
1154 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1155
1156 return (0);
1157
1158 err_pci:
1159 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1160 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1161 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1162 ixgbe_free_pci_resources(ctx);
1163
1164 if (hw->mac.type == ixgbe_mac_E610)
1165 ixgbe_shutdown_aci(hw);
1166
1167 return (error);
1168 } /* ixgbe_if_attach_pre */
1169
1170 /*********************************************************************
1171 * ixgbe_if_attach_post - Device initialization routine, part 2
1172 *
1173 * Called during driver load, but after interrupts and
1174 * resources have been allocated and configured.
1175 * Sets up some data structures not relevant to iflib.
1176 *
1177 * return 0 on success, positive on failure
1178 *********************************************************************/
1179 static int
ixgbe_if_attach_post(if_ctx_t ctx)1180 ixgbe_if_attach_post(if_ctx_t ctx)
1181 {
1182 device_t dev;
1183 struct ixgbe_softc *sc;
1184 struct ixgbe_hw *hw;
1185 int error = 0;
1186
1187 dev = iflib_get_dev(ctx);
1188 sc = iflib_get_softc(ctx);
1189 hw = &sc->hw;
1190
1191 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1192 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1193 device_printf(dev, "Device does not support legacy interrupts");
1194 error = ENXIO;
1195 goto err;
1196 }
1197
1198 /* Allocate multicast array memory. */
1199 sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
1200 M_IXGBE, M_NOWAIT);
1201 if (sc->mta == NULL) {
1202 device_printf(dev,
1203 "Can not allocate multicast setup array\n");
1204 error = ENOMEM;
1205 goto err;
1206 }
1207
1208 /* hw.ix defaults init */
1209 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1210
1211 /* Enable the optics for 82599 SFP+ fiber */
1212 ixgbe_enable_tx_laser(hw);
1213
1214 /* Enable power to the phy. */
1215 ixgbe_set_phy_power(hw, true);
1216
1217 ixgbe_initialize_iov(sc);
1218
1219 error = ixgbe_setup_interface(ctx);
1220 if (error) {
1221 device_printf(dev, "Interface setup failed: %d\n", error);
1222 goto err;
1223 }
1224
1225 ixgbe_if_update_admin_status(ctx);
1226
1227 /* Initialize statistics */
1228 ixgbe_update_stats_counters(sc);
1229 ixgbe_add_hw_stats(sc);
1230
1231 /* Check PCIE slot type/speed/width */
1232 ixgbe_get_slot_info(sc);
1233
1234 /*
1235 * Do time init and sysctl init here, but
1236 * only on the first port of a bypass sc.
1237 */
1238 ixgbe_bypass_init(sc);
1239
1240 /* Display NVM and Option ROM versions */
1241 ixgbe_print_fw_version(ctx);
1242
1243 /* Set an initial dmac value */
1244 sc->dmac = 0;
1245 /* Set initial advertised speeds (if applicable) */
1246 sc->advertise = ixgbe_get_default_advertise(sc);
1247
1248 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1249 ixgbe_define_iov_schemas(dev, &error);
1250
1251 /* Add sysctls */
1252 ixgbe_add_device_sysctls(ctx);
1253
1254 /* Init recovery mode timer and state variable */
1255 if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1256 sc->recovery_mode = 0;
1257
1258 /* Set up the timer callout */
1259 callout_init(&sc->fw_mode_timer, true);
1260
1261 /* Start the task */
1262 callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1263 }
1264
1265 return (0);
1266 err:
1267 return (error);
1268 } /* ixgbe_if_attach_post */
1269
1270 /************************************************************************
1271 * ixgbe_check_wol_support
1272 *
1273 * Checks whether the adapter's ports are capable of
1274 * Wake On LAN by reading the adapter's NVM.
1275 *
1276 * Sets each port's hw->wol_enabled value depending
1277 * on the value read here.
1278 ************************************************************************/
1279 static void
ixgbe_check_wol_support(struct ixgbe_softc * sc)1280 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1281 {
1282 struct ixgbe_hw *hw = &sc->hw;
1283 u16 dev_caps = 0;
1284
1285 /* Find out WoL support for port */
1286 sc->wol_support = hw->wol_enabled = 0;
1287 ixgbe_get_device_caps(hw, &dev_caps);
1288 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1289 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1290 hw->bus.func == 0))
1291 sc->wol_support = hw->wol_enabled = 1;
1292
1293 /* Save initial wake up filter configuration */
1294 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1295
1296 return;
1297 } /* ixgbe_check_wol_support */
1298
1299 /************************************************************************
1300 * ixgbe_setup_interface
1301 *
1302 * Setup networking device structure and register an interface.
1303 ************************************************************************/
1304 static int
ixgbe_setup_interface(if_ctx_t ctx)1305 ixgbe_setup_interface(if_ctx_t ctx)
1306 {
1307 if_t ifp = iflib_get_ifp(ctx);
1308 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1309
1310 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1311
1312 if_setbaudrate(ifp, IF_Gbps(10));
1313
1314 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1315
1316 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1317
1318 ixgbe_add_media_types(ctx);
1319
1320 /* Autoselect media by default */
1321 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1322
1323 return (0);
1324 } /* ixgbe_setup_interface */
1325
1326 /************************************************************************
1327 * ixgbe_if_get_counter
1328 ************************************************************************/
1329 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1330 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1331 {
1332 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1333 if_t ifp = iflib_get_ifp(ctx);
1334
1335 switch (cnt) {
1336 case IFCOUNTER_IPACKETS:
1337 return (sc->ipackets);
1338 case IFCOUNTER_OPACKETS:
1339 return (sc->opackets);
1340 case IFCOUNTER_IBYTES:
1341 return (sc->ibytes);
1342 case IFCOUNTER_OBYTES:
1343 return (sc->obytes);
1344 case IFCOUNTER_IMCASTS:
1345 return (sc->imcasts);
1346 case IFCOUNTER_OMCASTS:
1347 return (sc->omcasts);
1348 case IFCOUNTER_COLLISIONS:
1349 return (0);
1350 case IFCOUNTER_IQDROPS:
1351 return (sc->iqdrops);
1352 case IFCOUNTER_OQDROPS:
1353 return (0);
1354 case IFCOUNTER_IERRORS:
1355 return (sc->ierrors);
1356 default:
1357 return (if_get_counter_default(ifp, cnt));
1358 }
1359 } /* ixgbe_if_get_counter */
1360
1361 /************************************************************************
1362 * ixgbe_if_i2c_req
1363 ************************************************************************/
1364 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1365 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1366 {
1367 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1368 struct ixgbe_hw *hw = &sc->hw;
1369 int i;
1370
1371 if (hw->phy.ops.read_i2c_byte == NULL)
1372 return (ENXIO);
1373 for (i = 0; i < req->len; i++)
1374 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1375 req->dev_addr, &req->data[i]);
1376 return (0);
1377 } /* ixgbe_if_i2c_req */
1378
1379 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
1380 * reinitialized
1381 * @ctx: iflib context
1382 * @event: event code to check
1383 *
1384 * Defaults to returning false for unknown events.
1385 *
1386 * @returns true if iflib needs to reinit the interface
1387 */
1388 static bool
ixgbe_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1389 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1390 {
1391 switch (event) {
1392 case IFLIB_RESTART_VLAN_CONFIG:
1393 default:
1394 return (false);
1395 }
1396 }
1397
1398 /************************************************************************
1399 * ixgbe_add_media_types
1400 ************************************************************************/
1401 static void
ixgbe_add_media_types(if_ctx_t ctx)1402 ixgbe_add_media_types(if_ctx_t ctx)
1403 {
1404 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1405 struct ixgbe_hw *hw = &sc->hw;
1406 device_t dev = iflib_get_dev(ctx);
1407 u64 layer;
1408
1409 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1410
1411 /* Media types with matching FreeBSD media defines */
1412 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1413 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1414 if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
1415 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1416 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1417 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1418 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1419 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1420 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1421 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1422 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1423 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1424
1425 if (hw->mac.type == ixgbe_mac_X550) {
1426 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1427 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1428 }
1429
1430 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1431 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1432 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1433 NULL);
1434 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1435 }
1436
1437 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1438 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1439 if (hw->phy.multispeed_fiber)
1440 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1441 NULL);
1442 }
1443 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1444 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1445 if (hw->phy.multispeed_fiber)
1446 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1447 NULL);
1448 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1449 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1450 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1451 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1452
1453 #ifdef IFM_ETH_XTYPE
1454 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1455 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1456 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1457 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1458 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1459 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1460 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1461 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1462 #else
1463 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1464 device_printf(dev, "Media supported: 10GbaseKR\n");
1465 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1466 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1467 }
1468 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1469 device_printf(dev, "Media supported: 10GbaseKX4\n");
1470 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1471 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1472 }
1473 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1474 device_printf(dev, "Media supported: 1000baseKX\n");
1475 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1476 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1477 }
1478 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1479 device_printf(dev, "Media supported: 2500baseKX\n");
1480 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1481 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1482 }
1483 #endif
1484 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
1485 device_printf(dev, "Media supported: 1000baseBX\n");
1486 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_BX, 0, NULL);
1487 }
1488
1489 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1490 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1491 0, NULL);
1492 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1493 }
1494
1495 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1496 } /* ixgbe_add_media_types */
1497
1498 /************************************************************************
1499 * ixgbe_is_sfp
1500 ************************************************************************/
1501 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1502 ixgbe_is_sfp(struct ixgbe_hw *hw)
1503 {
1504 switch (hw->mac.type) {
1505 case ixgbe_mac_82598EB:
1506 if (hw->phy.type == ixgbe_phy_nl)
1507 return (true);
1508 return (false);
1509 case ixgbe_mac_82599EB:
1510 switch (hw->mac.ops.get_media_type(hw)) {
1511 case ixgbe_media_type_fiber:
1512 case ixgbe_media_type_fiber_qsfp:
1513 return (true);
1514 default:
1515 return (false);
1516 }
1517 case ixgbe_mac_X550EM_x:
1518 case ixgbe_mac_X550EM_a:
1519 case ixgbe_mac_E610:
1520 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1521 return (true);
1522 return (false);
1523 default:
1524 return (false);
1525 }
1526 } /* ixgbe_is_sfp */
1527
1528 /************************************************************************
1529 * ixgbe_config_link
1530 ************************************************************************/
1531 static void
ixgbe_config_link(if_ctx_t ctx)1532 ixgbe_config_link(if_ctx_t ctx)
1533 {
1534 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1535 struct ixgbe_hw *hw = &sc->hw;
1536 u32 autoneg, err = 0;
1537 bool sfp, negotiate;
1538
1539 sfp = ixgbe_is_sfp(hw);
1540
1541 if (sfp) {
1542 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1543 iflib_admin_intr_deferred(ctx);
1544 } else {
1545 if (hw->mac.ops.check_link)
1546 err = ixgbe_check_link(hw, &sc->link_speed,
1547 &sc->link_up, false);
1548 if (err)
1549 return;
1550 autoneg = hw->phy.autoneg_advertised;
1551 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1552 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1553 &negotiate);
1554 if (err)
1555 return;
1556
1557 if (hw->mac.type == ixgbe_mac_X550 &&
1558 hw->phy.autoneg_advertised == 0) {
1559 /*
1560 * 2.5G and 5G autonegotiation speeds on X550
1561 * are disabled by default due to reported
1562 * interoperability issues with some switches.
1563 *
1564 * The second condition checks if any operations
1565 * involving setting autonegotiation speeds have
1566 * been performed prior to this ixgbe_config_link()
1567 * call.
1568 *
1569 * If hw->phy.autoneg_advertised does not
1570 * equal 0, this means that the user might have
1571 * set autonegotiation speeds via the sysctl
1572 * before bringing the interface up. In this
1573 * case, we should not disable 2.5G and 5G
1574 * since that speeds might be selected by the
1575 * user.
1576 *
1577 * Otherwise (i.e. if hw->phy.autoneg_advertised
1578 * is set to 0), it is the first time we set
1579 * autonegotiation preferences and the default
1580 * set of speeds should exclude 2.5G and 5G.
1581 */
1582 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1583 IXGBE_LINK_SPEED_5GB_FULL);
1584 }
1585
1586 if (hw->mac.type == ixgbe_mac_E610) {
1587 hw->phy.ops.init(hw);
1588 err = ixgbe_enable_lse(sc);
1589 if (err)
1590 device_printf(sc->dev,
1591 "Failed to enable Link Status Event, "
1592 "error: %d", err);
1593 }
1594
1595 if (hw->mac.ops.setup_link)
1596 err = hw->mac.ops.setup_link(hw, autoneg,
1597 sc->link_up);
1598 }
1599 } /* ixgbe_config_link */
1600
1601 /************************************************************************
1602 * ixgbe_update_stats_counters - Update board statistics counters.
1603 ************************************************************************/
1604 static void
ixgbe_update_stats_counters(struct ixgbe_softc * sc)1605 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1606 {
1607 struct ixgbe_hw *hw = &sc->hw;
1608 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1609 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1610 u32 lxoffrxc;
1611 u64 total_missed_rx = 0;
1612
1613 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1614 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1615 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1616 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1617 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1618
1619 for (int i = 0; i < 16; i++) {
1620 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1621 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1622 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1623 }
1624 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1625 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1626 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1627
1628 /* Hardware workaround, gprc counts missed packets */
1629 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1630 stats->gprc -= missed_rx;
1631
1632 if (hw->mac.type != ixgbe_mac_82598EB) {
1633 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1634 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1635 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1636 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1637 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1638 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1639 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1640 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1641 stats->lxoffrxc += lxoffrxc;
1642 } else {
1643 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1644 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1645 stats->lxoffrxc += lxoffrxc;
1646 /* 82598 only has a counter in the high register */
1647 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1648 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1649 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1650 }
1651
1652 /*
1653 * For watchdog management we need to know if we have been paused
1654 * during the last interval, so capture that here.
1655 */
1656 if (lxoffrxc)
1657 sc->shared->isc_pause_frames = 1;
1658
1659 /*
1660 * Workaround: mprc hardware is incorrectly counting
1661 * broadcasts, so for now we subtract those.
1662 */
1663 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1664 stats->bprc += bprc;
1665 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1666 if (hw->mac.type == ixgbe_mac_82598EB)
1667 stats->mprc -= bprc;
1668
1669 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1670 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1671 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1672 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1673 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1674 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1675
1676 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1677 stats->lxontxc += lxon;
1678 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1679 stats->lxofftxc += lxoff;
1680 total = lxon + lxoff;
1681
1682 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1683 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1684 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1685 stats->gptc -= total;
1686 stats->mptc -= total;
1687 stats->ptc64 -= total;
1688 stats->gotc -= total * ETHER_MIN_LEN;
1689
1690 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1691 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1692 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1693 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1694 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1695 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1696 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1697 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1698 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1699 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1700 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1701 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1702 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1703 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1704 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1705 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1706 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1707 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1708 /* Only read FCOE on 82599 */
1709 if (hw->mac.type != ixgbe_mac_82598EB) {
1710 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1711 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1712 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1713 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1714 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1715 }
1716
1717 /* Fill out the OS statistics structure */
1718 IXGBE_SET_IPACKETS(sc, stats->gprc);
1719 IXGBE_SET_OPACKETS(sc, stats->gptc);
1720 IXGBE_SET_IBYTES(sc, stats->gorc);
1721 IXGBE_SET_OBYTES(sc, stats->gotc);
1722 IXGBE_SET_IMCASTS(sc, stats->mprc);
1723 IXGBE_SET_OMCASTS(sc, stats->mptc);
1724 IXGBE_SET_COLLISIONS(sc, 0);
1725 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1726
1727 /*
1728 * Aggregate following types of errors as RX errors:
1729 * - CRC error count,
1730 * - illegal byte error count,
1731 * - missed packets count,
1732 * - length error count,
1733 * - undersized packets count,
1734 * - fragmented packets count,
1735 * - oversized packets count,
1736 * - jabber count.
1737 */
1738 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1739 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
1740 stats->roc + stats->rjc);
1741 } /* ixgbe_update_stats_counters */
1742
1743 /************************************************************************
1744 * ixgbe_add_hw_stats
1745 *
1746 * Add sysctl variables, one per statistic, to the system.
1747 ************************************************************************/
1748 static void
ixgbe_add_hw_stats(struct ixgbe_softc * sc)1749 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1750 {
1751 device_t dev = iflib_get_dev(sc->ctx);
1752 struct ix_rx_queue *rx_que;
1753 struct ix_tx_queue *tx_que;
1754 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1755 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1756 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1757 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1758 struct sysctl_oid *stat_node, *queue_node;
1759 struct sysctl_oid_list *stat_list, *queue_list;
1760 int i;
1761
1762 #define QUEUE_NAME_LEN 32
1763 char namebuf[QUEUE_NAME_LEN];
1764
1765 /* Driver Statistics */
1766 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1767 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1768 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1769 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1770 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1771 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1772
1773 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
1774 i++, tx_que++) {
1775 struct tx_ring *txr = &tx_que->txr;
1776 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1777 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1778 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1779 queue_list = SYSCTL_CHILDREN(queue_node);
1780
1781 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1782 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1783 ixgbe_sysctl_tdh_handler, "IU",
1784 "Transmit Descriptor Head");
1785 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1786 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1787 ixgbe_sysctl_tdt_handler, "IU",
1788 "Transmit Descriptor Tail");
1789 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1790 CTLFLAG_RD, &txr->tso_tx, "TSO");
1791 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1792 CTLFLAG_RD, &txr->total_packets,
1793 "Queue Packets Transmitted");
1794 }
1795
1796 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
1797 i++, rx_que++) {
1798 struct rx_ring *rxr = &rx_que->rxr;
1799 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1800 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1801 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1802 queue_list = SYSCTL_CHILDREN(queue_node);
1803
1804 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1805 CTLTYPE_UINT | CTLFLAG_RW,
1806 &sc->rx_queues[i], 0,
1807 ixgbe_sysctl_interrupt_rate_handler, "IU",
1808 "Interrupt Rate");
1809 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1810 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1811 "irqs on this queue");
1812 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1813 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1814 ixgbe_sysctl_rdh_handler, "IU",
1815 "Receive Descriptor Head");
1816 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1817 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1818 ixgbe_sysctl_rdt_handler, "IU",
1819 "Receive Descriptor Tail");
1820 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1821 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1822 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1823 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1824 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1825 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1826 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1827 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1828 }
1829
1830 /* MAC stats get their own sub node */
1831 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1832 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1833 stat_list = SYSCTL_CHILDREN(stat_node);
1834
1835 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1836 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1837 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1838 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1839 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1840 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1841 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1842 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1843 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1844 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1845 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1846 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1847 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1848 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1849 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1850 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1851 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1852 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1853
1854 /* Flow Control stats */
1855 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1856 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1857 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1858 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1859 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1860 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1861 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1862 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1863
1864 /* Packet Reception Stats */
1865 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1866 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1867 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1868 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1869 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1870 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1871 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1872 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1873 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1874 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1875 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1876 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1877 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1878 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1879 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1880 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1881 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1882 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1883 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1884 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1885 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1886 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1887 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1888 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1889 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1890 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1891 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1892 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1893 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1894 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1895 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1896 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1897 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1898 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1899 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1900 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1901 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1902 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1903
1904 /* Packet Transmission Stats */
1905 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1906 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1907 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1908 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1909 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1910 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1911 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1912 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1913 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1914 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1915 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1916 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1917 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1918 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1919 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1920 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1921 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1922 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1923 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1924 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1925 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1926 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1927 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1928 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1929 } /* ixgbe_add_hw_stats */
1930
1931 /************************************************************************
1932 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1933 *
1934 * Retrieves the TDH value from the hardware
1935 ************************************************************************/
1936 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)1937 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1938 {
1939 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1940 int error;
1941 unsigned int val;
1942
1943 if (!txr)
1944 return (0);
1945
1946
1947 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1948 return (EPERM);
1949
1950 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1951 error = sysctl_handle_int(oidp, &val, 0, req);
1952 if (error || !req->newptr)
1953 return error;
1954
1955 return (0);
1956 } /* ixgbe_sysctl_tdh_handler */
1957
1958 /************************************************************************
1959 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1960 *
1961 * Retrieves the TDT value from the hardware
1962 ************************************************************************/
1963 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)1964 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1965 {
1966 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1967 int error;
1968 unsigned int val;
1969
1970 if (!txr)
1971 return (0);
1972
1973 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1974 return (EPERM);
1975
1976 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1977 error = sysctl_handle_int(oidp, &val, 0, req);
1978 if (error || !req->newptr)
1979 return error;
1980
1981 return (0);
1982 } /* ixgbe_sysctl_tdt_handler */
1983
1984 /************************************************************************
1985 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1986 *
1987 * Retrieves the RDH value from the hardware
1988 ************************************************************************/
1989 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)1990 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1991 {
1992 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1993 int error;
1994 unsigned int val;
1995
1996 if (!rxr)
1997 return (0);
1998
1999 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
2000 return (EPERM);
2001
2002 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
2003 error = sysctl_handle_int(oidp, &val, 0, req);
2004 if (error || !req->newptr)
2005 return error;
2006
2007 return (0);
2008 } /* ixgbe_sysctl_rdh_handler */
2009
2010 /************************************************************************
2011 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2012 *
2013 * Retrieves the RDT value from the hardware
2014 ************************************************************************/
2015 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)2016 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
2017 {
2018 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
2019 int error;
2020 unsigned int val;
2021
2022 if (!rxr)
2023 return (0);
2024
2025 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
2026 return (EPERM);
2027
2028 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
2029 error = sysctl_handle_int(oidp, &val, 0, req);
2030 if (error || !req->newptr)
2031 return error;
2032
2033 return (0);
2034 } /* ixgbe_sysctl_rdt_handler */
2035
2036 /************************************************************************
2037 * ixgbe_if_vlan_register
2038 *
2039 * Run via vlan config EVENT, it enables us to use the
2040 * HW Filter table since we can get the vlan id. This
2041 * just creates the entry in the soft version of the
2042 * VFTA, init will repopulate the real table.
2043 ************************************************************************/
2044 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)2045 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
2046 {
2047 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2048 u16 index, bit;
2049
2050 index = (vtag >> 5) & 0x7F;
2051 bit = vtag & 0x1F;
2052 sc->shadow_vfta[index] |= (1 << bit);
2053 ++sc->num_vlans;
2054 ixgbe_setup_vlan_hw_support(ctx);
2055 } /* ixgbe_if_vlan_register */
2056
2057 /************************************************************************
2058 * ixgbe_if_vlan_unregister
2059 *
2060 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2061 ************************************************************************/
2062 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)2063 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
2064 {
2065 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2066 u16 index, bit;
2067
2068 index = (vtag >> 5) & 0x7F;
2069 bit = vtag & 0x1F;
2070 sc->shadow_vfta[index] &= ~(1 << bit);
2071 --sc->num_vlans;
2072 /* Re-init to load the changes */
2073 ixgbe_setup_vlan_hw_support(ctx);
2074 } /* ixgbe_if_vlan_unregister */
2075
2076 /************************************************************************
2077 * ixgbe_setup_vlan_hw_support
2078 ************************************************************************/
2079 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)2080 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
2081 {
2082 if_t ifp = iflib_get_ifp(ctx);
2083 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2084 struct ixgbe_hw *hw = &sc->hw;
2085 struct rx_ring *rxr;
2086 int i;
2087 u32 ctrl;
2088
2089
2090 /*
2091 * We get here thru init_locked, meaning
2092 * a soft reset, this has already cleared
2093 * the VFTA and other state, so if there
2094 * have been no vlan's registered do nothing.
2095 */
2096 if (sc->num_vlans == 0 ||
2097 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
2098 /* Clear the vlan hw flag */
2099 for (i = 0; i < sc->num_rx_queues; i++) {
2100 rxr = &sc->rx_queues[i].rxr;
2101 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2102 if (hw->mac.type != ixgbe_mac_82598EB) {
2103 ctrl = IXGBE_READ_REG(hw,
2104 IXGBE_RXDCTL(rxr->me));
2105 ctrl &= ~IXGBE_RXDCTL_VME;
2106 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2107 ctrl);
2108 }
2109 rxr->vtag_strip = false;
2110 }
2111 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2112 /* Enable the Filter Table if enabled */
2113 ctrl |= IXGBE_VLNCTRL_CFIEN;
2114 ctrl &= ~IXGBE_VLNCTRL_VFE;
2115 if (hw->mac.type == ixgbe_mac_82598EB)
2116 ctrl &= ~IXGBE_VLNCTRL_VME;
2117 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2118 return;
2119 }
2120
2121 /* Setup the queues for vlans */
2122 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
2123 for (i = 0; i < sc->num_rx_queues; i++) {
2124 rxr = &sc->rx_queues[i].rxr;
2125 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2126 if (hw->mac.type != ixgbe_mac_82598EB) {
2127 ctrl = IXGBE_READ_REG(hw,
2128 IXGBE_RXDCTL(rxr->me));
2129 ctrl |= IXGBE_RXDCTL_VME;
2130 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2131 ctrl);
2132 }
2133 rxr->vtag_strip = true;
2134 }
2135 }
2136
2137 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2138 return;
2139 /*
2140 * A soft reset zero's out the VFTA, so
2141 * we need to repopulate it now.
2142 */
2143 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2144 if (sc->shadow_vfta[i] != 0)
2145 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2146 sc->shadow_vfta[i]);
2147
2148 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2149 /* Enable the Filter Table if enabled */
2150 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2151 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2152 ctrl |= IXGBE_VLNCTRL_VFE;
2153 }
2154 if (hw->mac.type == ixgbe_mac_82598EB)
2155 ctrl |= IXGBE_VLNCTRL_VME;
2156 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2157 } /* ixgbe_setup_vlan_hw_support */
2158
2159 /************************************************************************
2160 * ixgbe_get_slot_info
2161 *
2162 * Get the width and transaction speed of
2163 * the slot this adapter is plugged into.
2164 ************************************************************************/
2165 static void
ixgbe_get_slot_info(struct ixgbe_softc * sc)2166 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2167 {
2168 device_t dev = iflib_get_dev(sc->ctx);
2169 struct ixgbe_hw *hw = &sc->hw;
2170 int bus_info_valid = true;
2171 u32 offset;
2172 u16 link;
2173
2174 /* Some devices are behind an internal bridge */
2175 switch (hw->device_id) {
2176 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2177 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2178 goto get_parent_info;
2179 default:
2180 break;
2181 }
2182
2183 ixgbe_get_bus_info(hw);
2184
2185 /*
2186 * Some devices don't use PCI-E, but there is no need
2187 * to display "Unknown" for bus speed and width.
2188 */
2189 switch (hw->mac.type) {
2190 case ixgbe_mac_X550EM_x:
2191 case ixgbe_mac_X550EM_a:
2192 return;
2193 default:
2194 goto display;
2195 }
2196
2197 get_parent_info:
2198 /*
2199 * For the Quad port adapter we need to parse back
2200 * up the PCI tree to find the speed of the expansion
2201 * slot into which this adapter is plugged. A bit more work.
2202 */
2203 dev = device_get_parent(device_get_parent(dev));
2204 #ifdef IXGBE_DEBUG
2205 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2206 pci_get_slot(dev), pci_get_function(dev));
2207 #endif
2208 dev = device_get_parent(device_get_parent(dev));
2209 #ifdef IXGBE_DEBUG
2210 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2211 pci_get_slot(dev), pci_get_function(dev));
2212 #endif
2213 /* Now get the PCI Express Capabilities offset */
2214 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2215 /*
2216 * Hmm...can't get PCI-Express capabilities.
2217 * Falling back to default method.
2218 */
2219 bus_info_valid = false;
2220 ixgbe_get_bus_info(hw);
2221 goto display;
2222 }
2223 /* ...and read the Link Status Register */
2224 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2225 ixgbe_set_pci_config_data_generic(hw, link);
2226
2227 display:
2228 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2229 ((hw->bus.speed == ixgbe_bus_speed_16000) ? "16.0GT/s" :
2230 (hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2231 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2232 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2233 "Unknown"),
2234 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2235 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2236 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2237 "Unknown"));
2238
2239 if (bus_info_valid) {
2240 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2241 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2242 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2243 device_printf(dev,
2244 "PCI-Express bandwidth available for this card"
2245 " is not sufficient for optimal performance.\n");
2246 device_printf(dev,
2247 "For optimal performance a x8 PCIE, or x4 PCIE"
2248 " Gen2 slot is required.\n");
2249 }
2250 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2251 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2252 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2253 device_printf(dev,
2254 "PCI-Express bandwidth available for this card"
2255 " is not sufficient for optimal performance.\n");
2256 device_printf(dev,
2257 "For optimal performance a x8 PCIE Gen3 slot is"
2258 " required.\n");
2259 }
2260 } else
2261 device_printf(dev,
2262 "Unable to determine slot speed/width. The speed/width"
2263 " reported are that of the internal switch.\n");
2264
2265 return;
2266 } /* ixgbe_get_slot_info */
2267
2268 /************************************************************************
2269 * ixgbe_if_msix_intr_assign
2270 *
2271 * Setup MSI-X Interrupt resources and handlers
2272 ************************************************************************/
2273 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)2274 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2275 {
2276 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2277 struct ix_rx_queue *rx_que = sc->rx_queues;
2278 struct ix_tx_queue *tx_que;
2279 int error, rid, vector = 0;
2280 char buf[16];
2281
2282 /* Admin Que is vector 0*/
2283 rid = vector + 1;
2284 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2285 rid = vector + 1;
2286
2287 snprintf(buf, sizeof(buf), "rxq%d", i);
2288 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2289 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
2290 buf);
2291
2292 if (error) {
2293 device_printf(iflib_get_dev(ctx),
2294 "Failed to allocate que int %d err: %d",
2295 i,error);
2296 sc->num_rx_queues = i + 1;
2297 goto fail;
2298 }
2299
2300 rx_que->msix = vector;
2301 }
2302 for (int i = 0; i < sc->num_tx_queues; i++) {
2303 snprintf(buf, sizeof(buf), "txq%d", i);
2304 tx_que = &sc->tx_queues[i];
2305 tx_que->msix = i % sc->num_rx_queues;
2306 iflib_softirq_alloc_generic(ctx,
2307 &sc->rx_queues[tx_que->msix].que_irq,
2308 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2309 }
2310 rid = vector + 1;
2311 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2312 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2313 if (error) {
2314 device_printf(iflib_get_dev(ctx),
2315 "Failed to register admin handler");
2316 return (error);
2317 }
2318
2319 sc->vector = vector;
2320
2321 return (0);
2322 fail:
2323 iflib_irq_free(ctx, &sc->irq);
2324 rx_que = sc->rx_queues;
2325 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2326 iflib_irq_free(ctx, &rx_que->que_irq);
2327
2328 return (error);
2329 } /* ixgbe_if_msix_intr_assign */
2330
2331 static inline void
ixgbe_perform_aim(struct ixgbe_softc * sc,struct ix_rx_queue * que)2332 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2333 {
2334 uint32_t newitr = 0;
2335 struct rx_ring *rxr = &que->rxr;
2336 /* FIXME struct tx_ring *txr = ... ->txr; */
2337
2338 /*
2339 * Do Adaptive Interrupt Moderation:
2340 * - Write out last calculated setting
2341 * - Calculate based on average size over
2342 * the last interval.
2343 */
2344 if (que->eitr_setting) {
2345 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2346 que->eitr_setting);
2347 }
2348
2349 que->eitr_setting = 0;
2350 /* Idle, do nothing */
2351 if (rxr->bytes == 0) {
2352 /* FIXME && txr->bytes == 0 */
2353 return;
2354 }
2355
2356 if ((rxr->bytes) && (rxr->packets))
2357 newitr = rxr->bytes / rxr->packets;
2358 /* FIXME for transmit accounting
2359 * if ((txr->bytes) && (txr->packets))
2360 * newitr = txr->bytes/txr->packets;
2361 * if ((rxr->bytes) && (rxr->packets))
2362 * newitr = max(newitr, (rxr->bytes / rxr->packets));
2363 */
2364
2365 newitr += 24; /* account for hardware frame, crc */
2366 /* set an upper boundary */
2367 newitr = min(newitr, 3000);
2368
2369 /* Be nice to the mid range */
2370 if ((newitr > 300) && (newitr < 1200)) {
2371 newitr = (newitr / 3);
2372 } else {
2373 newitr = (newitr / 2);
2374 }
2375
2376 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2377 newitr |= newitr << 16;
2378 } else {
2379 newitr |= IXGBE_EITR_CNT_WDIS;
2380 }
2381
2382 /* save for next interrupt */
2383 que->eitr_setting = newitr;
2384
2385 /* Reset state */
2386 /* FIXME txr->bytes = 0; */
2387 /* FIXME txr->packets = 0; */
2388 rxr->bytes = 0;
2389 rxr->packets = 0;
2390
2391 return;
2392 }
2393
2394 /*********************************************************************
2395 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2396 **********************************************************************/
2397 static int
ixgbe_msix_que(void * arg)2398 ixgbe_msix_que(void *arg)
2399 {
2400 struct ix_rx_queue *que = arg;
2401 struct ixgbe_softc *sc = que->sc;
2402 if_t ifp = iflib_get_ifp(que->sc->ctx);
2403
2404 /* Protect against spurious interrupts */
2405 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2406 return (FILTER_HANDLED);
2407
2408 ixgbe_disable_queue(sc, que->msix);
2409 ++que->irqs;
2410
2411 /* Check for AIM */
2412 if (sc->enable_aim) {
2413 ixgbe_perform_aim(sc, que);
2414 }
2415
2416 return (FILTER_SCHEDULE_THREAD);
2417 } /* ixgbe_msix_que */
2418
2419 /************************************************************************
2420 * ixgbe_media_status - Media Ioctl callback
2421 *
2422 * Called whenever the user queries the status of
2423 * the interface using ifconfig.
2424 ************************************************************************/
2425 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2426 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2427 {
2428 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2429 struct ixgbe_hw *hw = &sc->hw;
2430 int layer;
2431
2432 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2433
2434 ifmr->ifm_status = IFM_AVALID;
2435 ifmr->ifm_active = IFM_ETHER;
2436
2437 if (!sc->link_active)
2438 return;
2439
2440 ifmr->ifm_status |= IFM_ACTIVE;
2441 layer = sc->phy_layer;
2442
2443 if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
2444 switch (sc->link_speed) {
2445 case IXGBE_LINK_SPEED_10GB_FULL:
2446 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2447 break;
2448 case IXGBE_LINK_SPEED_5GB_FULL:
2449 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2450 break;
2451 case IXGBE_LINK_SPEED_2_5GB_FULL:
2452 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2453 break;
2454 case IXGBE_LINK_SPEED_1GB_FULL:
2455 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2456 break;
2457 case IXGBE_LINK_SPEED_100_FULL:
2458 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2459 break;
2460 case IXGBE_LINK_SPEED_10_FULL:
2461 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2462 break;
2463 }
2464 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2465 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2466 switch (sc->link_speed) {
2467 case IXGBE_LINK_SPEED_10GB_FULL:
2468 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2469 break;
2470 case IXGBE_LINK_SPEED_1GB_FULL:
2471 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2472 break;
2473 }
2474 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2475 switch (sc->link_speed) {
2476 case IXGBE_LINK_SPEED_10GB_FULL:
2477 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2478 break;
2479 case IXGBE_LINK_SPEED_1GB_FULL:
2480 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2481 break;
2482 }
2483 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2484 switch (sc->link_speed) {
2485 case IXGBE_LINK_SPEED_10GB_FULL:
2486 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2487 break;
2488 case IXGBE_LINK_SPEED_1GB_FULL:
2489 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2490 break;
2491 }
2492 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2493 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2494 switch (sc->link_speed) {
2495 case IXGBE_LINK_SPEED_10GB_FULL:
2496 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2497 break;
2498 case IXGBE_LINK_SPEED_1GB_FULL:
2499 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2500 break;
2501 }
2502 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2503 switch (sc->link_speed) {
2504 case IXGBE_LINK_SPEED_10GB_FULL:
2505 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2506 break;
2507 }
2508 /*
2509 * XXX: These need to use the proper media types once
2510 * they're added.
2511 */
2512 #ifndef IFM_ETH_XTYPE
2513 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2514 switch (sc->link_speed) {
2515 case IXGBE_LINK_SPEED_10GB_FULL:
2516 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2517 break;
2518 case IXGBE_LINK_SPEED_2_5GB_FULL:
2519 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2520 break;
2521 case IXGBE_LINK_SPEED_1GB_FULL:
2522 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2523 break;
2524 }
2525 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2526 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2527 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2528 switch (sc->link_speed) {
2529 case IXGBE_LINK_SPEED_10GB_FULL:
2530 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2531 break;
2532 case IXGBE_LINK_SPEED_2_5GB_FULL:
2533 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2534 break;
2535 case IXGBE_LINK_SPEED_1GB_FULL:
2536 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2537 break;
2538 }
2539 #else
2540 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2541 switch (sc->link_speed) {
2542 case IXGBE_LINK_SPEED_10GB_FULL:
2543 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2544 break;
2545 case IXGBE_LINK_SPEED_2_5GB_FULL:
2546 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2547 break;
2548 case IXGBE_LINK_SPEED_1GB_FULL:
2549 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2550 break;
2551 }
2552 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2553 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2554 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2555 switch (sc->link_speed) {
2556 case IXGBE_LINK_SPEED_10GB_FULL:
2557 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2558 break;
2559 case IXGBE_LINK_SPEED_2_5GB_FULL:
2560 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2561 break;
2562 case IXGBE_LINK_SPEED_1GB_FULL:
2563 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2564 break;
2565 }
2566 #endif
2567
2568 /* If nothing is recognized... */
2569 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2570 ifmr->ifm_active |= IFM_UNKNOWN;
2571
2572 /* Display current flow control setting used on link */
2573 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2574 hw->fc.current_mode == ixgbe_fc_full)
2575 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2576 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2577 hw->fc.current_mode == ixgbe_fc_full)
2578 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2579 } /* ixgbe_media_status */
2580
2581 /************************************************************************
2582 * ixgbe_media_change - Media Ioctl callback
2583 *
2584 * Called when the user changes speed/duplex using
2585 * media/mediopt option with ifconfig.
2586 ************************************************************************/
2587 static int
ixgbe_if_media_change(if_ctx_t ctx)2588 ixgbe_if_media_change(if_ctx_t ctx)
2589 {
2590 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2591 struct ifmedia *ifm = iflib_get_media(ctx);
2592 struct ixgbe_hw *hw = &sc->hw;
2593 ixgbe_link_speed speed = 0;
2594
2595 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2596
2597 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2598 return (EINVAL);
2599
2600 if (hw->phy.media_type == ixgbe_media_type_backplane)
2601 return (EPERM);
2602
2603 /*
2604 * We don't actually need to check against the supported
2605 * media types of the adapter; ifmedia will take care of
2606 * that for us.
2607 */
2608 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2609 case IFM_AUTO:
2610 case IFM_10G_T:
2611 speed |= IXGBE_LINK_SPEED_100_FULL;
2612 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2613 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2614 break;
2615 case IFM_10G_LRM:
2616 case IFM_10G_LR:
2617 #ifndef IFM_ETH_XTYPE
2618 case IFM_10G_SR: /* KR, too */
2619 case IFM_10G_CX4: /* KX4 */
2620 #else
2621 case IFM_10G_KR:
2622 case IFM_10G_KX4:
2623 #endif
2624 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2625 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2626 break;
2627 #ifndef IFM_ETH_XTYPE
2628 case IFM_1000_CX: /* KX */
2629 #else
2630 case IFM_1000_KX:
2631 #endif
2632 case IFM_1000_LX:
2633 case IFM_1000_SX:
2634 case IFM_1000_BX:
2635 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2636 break;
2637 case IFM_1000_T:
2638 speed |= IXGBE_LINK_SPEED_100_FULL;
2639 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2640 break;
2641 case IFM_10G_TWINAX:
2642 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2643 break;
2644 case IFM_5000_T:
2645 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2646 break;
2647 case IFM_2500_T:
2648 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2649 break;
2650 case IFM_100_TX:
2651 speed |= IXGBE_LINK_SPEED_100_FULL;
2652 break;
2653 case IFM_10_T:
2654 speed |= IXGBE_LINK_SPEED_10_FULL;
2655 break;
2656 default:
2657 goto invalid;
2658 }
2659
2660 hw->mac.autotry_restart = true;
2661 hw->mac.ops.setup_link(hw, speed, true);
2662 sc->advertise =
2663 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
2664 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
2665 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2666 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
2667 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
2668 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
2669
2670 return (0);
2671
2672 invalid:
2673 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2674
2675 return (EINVAL);
2676 } /* ixgbe_if_media_change */
2677
2678 /************************************************************************
2679 * ixgbe_set_promisc
2680 ************************************************************************/
2681 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2682 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2683 {
2684 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2685 if_t ifp = iflib_get_ifp(ctx);
2686 u32 rctl;
2687 int mcnt = 0;
2688
2689 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2690 rctl &= (~IXGBE_FCTRL_UPE);
2691 if (if_getflags(ifp) & IFF_ALLMULTI)
2692 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2693 else {
2694 mcnt = min(if_llmaddr_count(ifp),
2695 MAX_NUM_MULTICAST_ADDRESSES);
2696 }
2697 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2698 rctl &= (~IXGBE_FCTRL_MPE);
2699 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2700
2701 if (if_getflags(ifp) & IFF_PROMISC) {
2702 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2703 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2704 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
2705 rctl |= IXGBE_FCTRL_MPE;
2706 rctl &= ~IXGBE_FCTRL_UPE;
2707 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2708 }
2709 return (0);
2710 } /* ixgbe_if_promisc_set */
2711
2712 /************************************************************************
2713 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2714 ************************************************************************/
2715 static int
ixgbe_msix_link(void * arg)2716 ixgbe_msix_link(void *arg)
2717 {
2718 struct ixgbe_softc *sc = arg;
2719 struct ixgbe_hw *hw = &sc->hw;
2720 u32 eicr, eicr_mask;
2721 s32 retval;
2722
2723 ++sc->link_irq;
2724
2725 /* Pause other interrupts */
2726 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2727
2728 /* First get the cause */
2729 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2730 /* Be sure the queue bits are not cleared */
2731 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2732 /* Clear interrupt with write */
2733 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2734
2735 /* Link status change */
2736 if (eicr & IXGBE_EICR_LSC) {
2737 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2738 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2739 }
2740
2741 if (eicr & IXGBE_EICR_FW_EVENT) {
2742 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
2743 sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
2744 }
2745
2746 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2747 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2748 (eicr & IXGBE_EICR_FLOW_DIR)) {
2749 /* This is probably overkill :) */
2750 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2751 return (FILTER_HANDLED);
2752 /* Disable the interrupt */
2753 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2754 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2755 } else
2756 if (eicr & IXGBE_EICR_ECC) {
2757 device_printf(iflib_get_dev(sc->ctx),
2758 "Received ECC Err, initiating reset\n");
2759 hw->mac.flags |=
2760 ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2761 ixgbe_reset_hw(hw);
2762 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2763 IXGBE_EICR_ECC);
2764 }
2765
2766 /* Check for over temp condition */
2767 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2768 switch (sc->hw.mac.type) {
2769 case ixgbe_mac_X550EM_a:
2770 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2771 break;
2772 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2773 IXGBE_EICR_GPI_SDP0_X550EM_a);
2774 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2775 IXGBE_EICR_GPI_SDP0_X550EM_a);
2776 retval = hw->phy.ops.check_overtemp(hw);
2777 if (retval != IXGBE_ERR_OVERTEMP)
2778 break;
2779 device_printf(iflib_get_dev(sc->ctx),
2780 "\nCRITICAL: OVER TEMP!!"
2781 " PHY IS SHUT DOWN!!\n");
2782 device_printf(iflib_get_dev(sc->ctx),
2783 "System shutdown required!\n");
2784 break;
2785 default:
2786 if (!(eicr & IXGBE_EICR_TS))
2787 break;
2788 retval = hw->phy.ops.check_overtemp(hw);
2789 if (retval != IXGBE_ERR_OVERTEMP)
2790 break;
2791 device_printf(iflib_get_dev(sc->ctx),
2792 "\nCRITICAL: OVER TEMP!!"
2793 " PHY IS SHUT DOWN!!\n");
2794 device_printf(iflib_get_dev(sc->ctx),
2795 "System shutdown required!\n");
2796 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2797 IXGBE_EICR_TS);
2798 break;
2799 }
2800 }
2801
2802 /* Check for VF message */
2803 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2804 (eicr & IXGBE_EICR_MAILBOX)) {
2805 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2806 }
2807 }
2808
2809 /*
2810 * On E610, the firmware handles PHY configuration, so
2811 * there is no need to perform any SFP-specific tasks.
2812 */
2813 if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
2814 /* Pluggable optics-related interrupt */
2815 if (hw->mac.type >= ixgbe_mac_X540)
2816 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2817 else
2818 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2819
2820 if (eicr & eicr_mask) {
2821 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2822 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2823 }
2824
2825 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2826 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2827 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2828 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2829 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2830 }
2831 }
2832
2833 /* Check for fan failure */
2834 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2835 ixgbe_check_fan_failure(sc, eicr, true);
2836 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2837 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2838 }
2839
2840 /* External PHY interrupt */
2841 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2842 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2843 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2844 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2845 }
2846
2847 return (sc->task_requests != 0) ?
2848 FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2849 } /* ixgbe_msix_link */
2850
2851 /************************************************************************
2852 * ixgbe_sysctl_interrupt_rate_handler
2853 ************************************************************************/
2854 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)2855 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2856 {
2857 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2858 int error;
2859 unsigned int reg, usec, rate;
2860
2861 if (atomic_load_acq_int(&que->sc->recovery_mode))
2862 return (EPERM);
2863
2864 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2865 usec = ((reg & 0x0FF8) >> 3);
2866 if (usec > 0)
2867 rate = 500000 / usec;
2868 else
2869 rate = 0;
2870 error = sysctl_handle_int(oidp, &rate, 0, req);
2871 if (error || !req->newptr)
2872 return error;
2873 reg &= ~0xfff; /* default, no limitation */
2874 ixgbe_max_interrupt_rate = 0;
2875 if (rate > 0 && rate < 500000) {
2876 if (rate < 1000)
2877 rate = 1000;
2878 ixgbe_max_interrupt_rate = rate;
2879 reg |= ((4000000/rate) & 0xff8);
2880 }
2881 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2882
2883 return (0);
2884 } /* ixgbe_sysctl_interrupt_rate_handler */
2885
2886 /************************************************************************
2887 * ixgbe_add_device_sysctls
2888 ************************************************************************/
2889 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)2890 ixgbe_add_device_sysctls(if_ctx_t ctx)
2891 {
2892 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2893 device_t dev = iflib_get_dev(ctx);
2894 struct ixgbe_hw *hw = &sc->hw;
2895 struct sysctl_oid_list *child;
2896 struct sysctl_ctx_list *ctx_list;
2897
2898 ctx_list = device_get_sysctl_ctx(dev);
2899 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2900
2901 /* Sysctls for all devices */
2902 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2903 CTLTYPE_INT | CTLFLAG_RW,
2904 sc, 0, ixgbe_sysctl_flowcntl, "I",
2905 IXGBE_SYSCTL_DESC_SET_FC);
2906
2907 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2908 CTLTYPE_INT | CTLFLAG_RW,
2909 sc, 0, ixgbe_sysctl_advertise, "I",
2910 IXGBE_SYSCTL_DESC_ADV_SPEED);
2911
2912 sc->enable_aim = ixgbe_enable_aim;
2913 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2914 &sc->enable_aim, 0, "Interrupt Moderation");
2915
2916 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2917 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2918 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2919
2920 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2921 "tso_tcp_flags_mask_first_segment",
2922 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2923 sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2924 "TSO TCP flags mask for first segment");
2925
2926 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2927 "tso_tcp_flags_mask_middle_segment",
2928 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2929 sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2930 "TSO TCP flags mask for middle segment");
2931
2932 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2933 "tso_tcp_flags_mask_last_segment",
2934 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2935 sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2936 "TSO TCP flags mask for last segment");
2937
2938 #ifdef IXGBE_DEBUG
2939 /* testing sysctls (for all devices) */
2940 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2941 CTLTYPE_INT | CTLFLAG_RW,
2942 sc, 0, ixgbe_sysctl_power_state,
2943 "I", "PCI Power State");
2944
2945 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2946 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2947 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2948 #endif
2949 /* for X550 series devices */
2950 if (hw->mac.type >= ixgbe_mac_X550)
2951 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2952 CTLTYPE_U16 | CTLFLAG_RW,
2953 sc, 0, ixgbe_sysctl_dmac,
2954 "I", "DMA Coalesce");
2955
2956 /* for WoL-capable devices */
2957 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2958 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2959 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2960 ixgbe_sysctl_wol_enable, "I",
2961 "Enable/Disable Wake on LAN");
2962
2963 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2964 CTLTYPE_U32 | CTLFLAG_RW,
2965 sc, 0, ixgbe_sysctl_wufc,
2966 "I", "Enable/Disable Wake Up Filters");
2967 }
2968
2969 /* for X552/X557-AT devices */
2970 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2971 struct sysctl_oid *phy_node;
2972 struct sysctl_oid_list *phy_list;
2973
2974 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2975 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2976 "External PHY sysctls");
2977 phy_list = SYSCTL_CHILDREN(phy_node);
2978
2979 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2980 CTLTYPE_U16 | CTLFLAG_RD,
2981 sc, 0, ixgbe_sysctl_phy_temp,
2982 "I", "Current External PHY Temperature (Celsius)");
2983
2984 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2985 "overtemp_occurred",
2986 CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2987 ixgbe_sysctl_phy_overtemp_occurred, "I",
2988 "External PHY High Temperature Event Occurred");
2989 }
2990
2991 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2992 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2993 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2994 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2995 }
2996 } /* ixgbe_add_device_sysctls */
2997
2998 /************************************************************************
2999 * ixgbe_allocate_pci_resources
3000 ************************************************************************/
3001 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)3002 ixgbe_allocate_pci_resources(if_ctx_t ctx)
3003 {
3004 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3005 device_t dev = iflib_get_dev(ctx);
3006 int rid;
3007
3008 rid = PCIR_BAR(0);
3009 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3010 RF_ACTIVE);
3011
3012 if (!(sc->pci_mem)) {
3013 device_printf(dev,
3014 "Unable to allocate bus resource: memory\n");
3015 return (ENXIO);
3016 }
3017
3018 /* Save bus_space values for READ/WRITE_REG macros */
3019 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
3020 sc->osdep.mem_bus_space_handle =
3021 rman_get_bushandle(sc->pci_mem);
3022 /* Set hw values for shared code */
3023 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
3024
3025 return (0);
3026 } /* ixgbe_allocate_pci_resources */
3027
3028 /************************************************************************
3029 * ixgbe_detach - Device removal routine
3030 *
3031 * Called when the driver is being removed.
3032 * Stops the adapter and deallocates all the resources
3033 * that were allocated for driver operation.
3034 *
3035 * return 0 on success, positive on failure
3036 ************************************************************************/
3037 static int
ixgbe_if_detach(if_ctx_t ctx)3038 ixgbe_if_detach(if_ctx_t ctx)
3039 {
3040 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3041 device_t dev = iflib_get_dev(ctx);
3042 u32 ctrl_ext;
3043
3044 INIT_DEBUGOUT("ixgbe_detach: begin");
3045
3046 if (ixgbe_pci_iov_detach(dev) != 0) {
3047 device_printf(dev, "SR-IOV in use; detach first.\n");
3048 return (EBUSY);
3049 }
3050
3051 ixgbe_setup_low_power_mode(ctx);
3052
3053 /* let hardware know driver is unloading */
3054 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
3055 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3056 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
3057
3058 callout_drain(&sc->fw_mode_timer);
3059
3060 if (sc->hw.mac.type == ixgbe_mac_E610) {
3061 ixgbe_disable_lse(sc);
3062 ixgbe_shutdown_aci(&sc->hw);
3063 }
3064
3065 ixgbe_free_pci_resources(ctx);
3066
3067 free(sc->mta, M_IXGBE);
3068
3069 return (0);
3070 } /* ixgbe_if_detach */
3071
3072 /************************************************************************
3073 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3074 *
3075 * Prepare the adapter/port for LPLU and/or WoL
3076 ************************************************************************/
3077 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)3078 ixgbe_setup_low_power_mode(if_ctx_t ctx)
3079 {
3080 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3081 struct ixgbe_hw *hw = &sc->hw;
3082 device_t dev = iflib_get_dev(ctx);
3083 s32 error = 0;
3084
3085 if (!hw->wol_enabled)
3086 ixgbe_set_phy_power(hw, false);
3087
3088 /* Limit power management flow to X550EM baseT */
3089 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3090 hw->phy.ops.enter_lplu) {
3091 /* Turn off support for APM wakeup. (Using ACPI instead) */
3092 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3093 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3094
3095 /*
3096 * Clear Wake Up Status register to prevent any previous
3097 * wakeup events from waking us up immediately after we
3098 * suspend.
3099 */
3100 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3101
3102 /*
3103 * Program the Wakeup Filter Control register with user filter
3104 * settings
3105 */
3106 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3107
3108 /* Enable wakeups and power management in Wakeup Control */
3109 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3110 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3111
3112 /* X550EM baseT adapters need a special LPLU flow */
3113 hw->phy.reset_disable = true;
3114 ixgbe_if_stop(ctx);
3115 error = hw->phy.ops.enter_lplu(hw);
3116 if (error)
3117 device_printf(dev, "Error entering LPLU: %d\n",
3118 error);
3119 hw->phy.reset_disable = false;
3120 } else {
3121 /* Just stop for other adapters */
3122 ixgbe_if_stop(ctx);
3123 }
3124
3125 return error;
3126 } /* ixgbe_setup_low_power_mode */
3127
3128 /************************************************************************
3129 * ixgbe_shutdown - Shutdown entry point
3130 ************************************************************************/
3131 static int
ixgbe_if_shutdown(if_ctx_t ctx)3132 ixgbe_if_shutdown(if_ctx_t ctx)
3133 {
3134 int error = 0;
3135
3136 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3137
3138 error = ixgbe_setup_low_power_mode(ctx);
3139
3140 return (error);
3141 } /* ixgbe_if_shutdown */
3142
3143 /************************************************************************
3144 * ixgbe_suspend
3145 *
3146 * From D0 to D3
3147 ************************************************************************/
3148 static int
ixgbe_if_suspend(if_ctx_t ctx)3149 ixgbe_if_suspend(if_ctx_t ctx)
3150 {
3151 int error = 0;
3152
3153 INIT_DEBUGOUT("ixgbe_suspend: begin");
3154
3155 error = ixgbe_setup_low_power_mode(ctx);
3156
3157 return (error);
3158 } /* ixgbe_if_suspend */
3159
3160 /************************************************************************
3161 * ixgbe_resume
3162 *
3163 * From D3 to D0
3164 ************************************************************************/
3165 static int
ixgbe_if_resume(if_ctx_t ctx)3166 ixgbe_if_resume(if_ctx_t ctx)
3167 {
3168 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3169 device_t dev = iflib_get_dev(ctx);
3170 if_t ifp = iflib_get_ifp(ctx);
3171 struct ixgbe_hw *hw = &sc->hw;
3172 u32 wus;
3173
3174 INIT_DEBUGOUT("ixgbe_resume: begin");
3175
3176 /* Read & clear WUS register */
3177 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3178 if (wus)
3179 device_printf(dev, "Woken up by (WUS): %#010x\n",
3180 IXGBE_READ_REG(hw, IXGBE_WUS));
3181 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3182 /* And clear WUFC until next low-power transition */
3183 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3184
3185 /*
3186 * Required after D3->D0 transition;
3187 * will re-advertise all previous advertised speeds
3188 */
3189 if (if_getflags(ifp) & IFF_UP)
3190 ixgbe_if_init(ctx);
3191
3192 return (0);
3193 } /* ixgbe_if_resume */
3194
3195 /************************************************************************
3196 * ixgbe_if_mtu_set - Ioctl mtu entry point
3197 *
3198 * Return 0 on success, EINVAL on failure
3199 ************************************************************************/
3200 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)3201 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3202 {
3203 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3204 int error = 0;
3205
3206 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3207
3208 if (mtu > IXGBE_MAX_MTU) {
3209 error = EINVAL;
3210 } else {
3211 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3212 }
3213
3214 return error;
3215 } /* ixgbe_if_mtu_set */
3216
3217 /************************************************************************
3218 * ixgbe_if_crcstrip_set
3219 ************************************************************************/
3220 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)3221 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3222 {
3223 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3224 struct ixgbe_hw *hw = &sc->hw;
3225 /* crc stripping is set in two places:
3226 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3227 * IXGBE_RDRXCTL (set by the original driver in
3228 * ixgbe_setup_hw_rsc() called in init_locked.
3229 * We disable the setting when netmap is compiled in).
3230 * We update the values here, but also in ixgbe.c because
3231 * init_locked sometimes is called outside our control.
3232 */
3233 uint32_t hl, rxc;
3234
3235 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3236 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3237 #ifdef NETMAP
3238 if (netmap_verbose)
3239 D("%s read HLREG 0x%x rxc 0x%x",
3240 onoff ? "enter" : "exit", hl, rxc);
3241 #endif
3242 /* hw requirements ... */
3243 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3244 rxc |= IXGBE_RDRXCTL_RSCACKC;
3245 if (onoff && !crcstrip) {
3246 /* keep the crc. Fast rx */
3247 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3248 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3249 } else {
3250 /* reset default mode */
3251 hl |= IXGBE_HLREG0_RXCRCSTRP;
3252 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3253 }
3254 #ifdef NETMAP
3255 if (netmap_verbose)
3256 D("%s write HLREG 0x%x rxc 0x%x",
3257 onoff ? "enter" : "exit", hl, rxc);
3258 #endif
3259 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3260 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3261 } /* ixgbe_if_crcstrip_set */
3262
3263 /*********************************************************************
3264 * ixgbe_if_init - Init entry point
3265 *
3266 * Used in two ways: It is used by the stack as an init
3267 * entry point in network interface structure. It is also
3268 * used by the driver as a hw/sw initialization routine to
3269 * get to a consistent state.
3270 *
3271 * Return 0 on success, positive on failure
3272 **********************************************************************/
3273 void
ixgbe_if_init(if_ctx_t ctx)3274 ixgbe_if_init(if_ctx_t ctx)
3275 {
3276 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3277 if_t ifp = iflib_get_ifp(ctx);
3278 device_t dev = iflib_get_dev(ctx);
3279 struct ixgbe_hw *hw = &sc->hw;
3280 struct ix_rx_queue *rx_que;
3281 struct ix_tx_queue *tx_que;
3282 u32 txdctl, mhadd;
3283 u32 rxdctl, rxctrl;
3284 u32 ctrl_ext;
3285
3286 int i, j, err;
3287
3288 INIT_DEBUGOUT("ixgbe_if_init: begin");
3289
3290 /* Queue indices may change with IOV mode */
3291 ixgbe_align_all_queue_indices(sc);
3292
3293 /* reprogram the RAR[0] in case user changed it. */
3294 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3295
3296 /* Get the latest mac address, User can use a LAA */
3297 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3298 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3299 hw->addr_ctrl.rar_used_count = 1;
3300
3301 ixgbe_init_hw(hw);
3302
3303 ixgbe_initialize_iov(sc);
3304
3305 ixgbe_initialize_transmit_units(ctx);
3306
3307 /* Setup Multicast table */
3308 ixgbe_if_multi_set(ctx);
3309
3310 /* Determine the correct mbuf pool, based on frame size */
3311 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3312
3313 /* Configure RX settings */
3314 ixgbe_initialize_receive_units(ctx);
3315
3316 /*
3317 * Initialize variable holding task enqueue requests
3318 * from MSI-X interrupts
3319 */
3320 sc->task_requests = 0;
3321
3322 /* Enable SDP & MSI-X interrupts based on adapter */
3323 ixgbe_config_gpie(sc);
3324
3325 /* Set MTU size */
3326 if (if_getmtu(ifp) > ETHERMTU) {
3327 /* aka IXGBE_MAXFRS on 82599 and newer */
3328 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3329 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3330 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3331 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3332 }
3333
3334 /* Now enable all the queues */
3335 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
3336 i++, tx_que++) {
3337 struct tx_ring *txr = &tx_que->txr;
3338
3339 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3340 txdctl |= IXGBE_TXDCTL_ENABLE;
3341 /* Set WTHRESH to 8, burst writeback */
3342 txdctl |= (8 << 16);
3343 /*
3344 * When the internal queue falls below PTHRESH (32),
3345 * start prefetching as long as there are at least
3346 * HTHRESH (1) buffers ready. The values are taken
3347 * from the Intel linux driver 3.8.21.
3348 * Prefetching enables tx line rate even with 1 queue.
3349 */
3350 txdctl |= (32 << 0) | (1 << 8);
3351 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3352 }
3353
3354 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
3355 i++, rx_que++) {
3356 struct rx_ring *rxr = &rx_que->rxr;
3357
3358 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3359 if (hw->mac.type == ixgbe_mac_82598EB) {
3360 /*
3361 * PTHRESH = 21
3362 * HTHRESH = 4
3363 * WTHRESH = 8
3364 */
3365 rxdctl &= ~0x3FFFFF;
3366 rxdctl |= 0x080420;
3367 }
3368 rxdctl |= IXGBE_RXDCTL_ENABLE;
3369 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3370 for (j = 0; j < 10; j++) {
3371 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3372 IXGBE_RXDCTL_ENABLE)
3373 break;
3374 else
3375 msec_delay(1);
3376 }
3377 wmb();
3378 }
3379
3380 /* Enable Receive engine */
3381 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3382 if (hw->mac.type == ixgbe_mac_82598EB)
3383 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3384 rxctrl |= IXGBE_RXCTRL_RXEN;
3385 ixgbe_enable_rx_dma(hw, rxctrl);
3386
3387 /* Set up MSI/MSI-X routing */
3388 if (ixgbe_enable_msix) {
3389 ixgbe_configure_ivars(sc);
3390 /* Set up auto-mask */
3391 if (hw->mac.type == ixgbe_mac_82598EB)
3392 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3393 else {
3394 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3395 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3396 }
3397 } else { /* Simple settings for Legacy/MSI */
3398 ixgbe_set_ivar(sc, 0, 0, 0);
3399 ixgbe_set_ivar(sc, 0, 0, 1);
3400 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3401 }
3402
3403 ixgbe_init_fdir(sc);
3404
3405 /*
3406 * Check on any SFP devices that
3407 * need to be kick-started
3408 */
3409 if (hw->phy.type == ixgbe_phy_none) {
3410 err = hw->phy.ops.identify(hw);
3411 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3412 device_printf(dev,
3413 "Unsupported SFP+ module type was detected.\n");
3414 return;
3415 }
3416 }
3417
3418 /* Set moderation on the Link interrupt */
3419 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3420
3421 /* Enable power to the phy. */
3422 ixgbe_set_phy_power(hw, true);
3423
3424 /* Config/Enable Link */
3425 ixgbe_config_link(ctx);
3426
3427 /* Hardware Packet Buffer & Flow Control setup */
3428 ixgbe_config_delay_values(sc);
3429
3430 /* Initialize the FC settings */
3431 ixgbe_start_hw(hw);
3432
3433 /* Set up VLAN support and filter */
3434 ixgbe_setup_vlan_hw_support(ctx);
3435
3436 /* Setup DMA Coalescing */
3437 ixgbe_config_dmac(sc);
3438
3439 /* And now turn on interrupts */
3440 ixgbe_if_enable_intr(ctx);
3441
3442 /* Enable the use of the MBX by the VF's */
3443 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3444 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3445 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3446 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3447 }
3448
3449 } /* ixgbe_init_locked */
3450
3451 /************************************************************************
3452 * ixgbe_set_ivar
3453 *
3454 * Setup the correct IVAR register for a particular MSI-X interrupt
3455 * (yes this is all very magic and confusing :)
3456 * - entry is the register array entry
3457 * - vector is the MSI-X vector for this queue
3458 * - type is RX/TX/MISC
3459 ************************************************************************/
3460 static void
ixgbe_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)3461 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3462 {
3463 struct ixgbe_hw *hw = &sc->hw;
3464 u32 ivar, index;
3465
3466 vector |= IXGBE_IVAR_ALLOC_VAL;
3467
3468 switch (hw->mac.type) {
3469 case ixgbe_mac_82598EB:
3470 if (type == -1)
3471 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3472 else
3473 entry += (type * 64);
3474 index = (entry >> 2) & 0x1F;
3475 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3476 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3477 ivar |= (vector << (8 * (entry & 0x3)));
3478 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3479 break;
3480 case ixgbe_mac_82599EB:
3481 case ixgbe_mac_X540:
3482 case ixgbe_mac_X550:
3483 case ixgbe_mac_X550EM_x:
3484 case ixgbe_mac_X550EM_a:
3485 case ixgbe_mac_E610:
3486 if (type == -1) { /* MISC IVAR */
3487 index = (entry & 1) * 8;
3488 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3489 ivar &= ~(0xFF << index);
3490 ivar |= (vector << index);
3491 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3492 } else { /* RX/TX IVARS */
3493 index = (16 * (entry & 1)) + (8 * type);
3494 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3495 ivar &= ~(0xFF << index);
3496 ivar |= (vector << index);
3497 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3498 }
3499 default:
3500 break;
3501 }
3502 } /* ixgbe_set_ivar */
3503
3504 /************************************************************************
3505 * ixgbe_configure_ivars
3506 ************************************************************************/
3507 static void
ixgbe_configure_ivars(struct ixgbe_softc * sc)3508 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3509 {
3510 struct ix_rx_queue *rx_que = sc->rx_queues;
3511 struct ix_tx_queue *tx_que = sc->tx_queues;
3512 u32 newitr;
3513
3514 if (ixgbe_max_interrupt_rate > 0)
3515 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3516 else {
3517 /*
3518 * Disable DMA coalescing if interrupt moderation is
3519 * disabled.
3520 */
3521 sc->dmac = 0;
3522 newitr = 0;
3523 }
3524
3525 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3526 struct rx_ring *rxr = &rx_que->rxr;
3527
3528 /* First the RX queue entry */
3529 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3530
3531 /* Set an Initial EITR value */
3532 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3533 }
3534 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3535 struct tx_ring *txr = &tx_que->txr;
3536
3537 /* ... and the TX */
3538 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3539 }
3540 /* For the Link interrupt */
3541 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3542 } /* ixgbe_configure_ivars */
3543
3544 /************************************************************************
3545 * ixgbe_config_gpie
3546 ************************************************************************/
3547 static void
ixgbe_config_gpie(struct ixgbe_softc * sc)3548 ixgbe_config_gpie(struct ixgbe_softc *sc)
3549 {
3550 struct ixgbe_hw *hw = &sc->hw;
3551 u32 gpie;
3552
3553 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3554
3555 if (sc->intr_type == IFLIB_INTR_MSIX) {
3556 /* Enable Enhanced MSI-X mode */
3557 gpie |= IXGBE_GPIE_MSIX_MODE |
3558 IXGBE_GPIE_EIAME |
3559 IXGBE_GPIE_PBA_SUPPORT |
3560 IXGBE_GPIE_OCD;
3561 }
3562
3563 /* Fan Failure Interrupt */
3564 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3565 gpie |= IXGBE_SDP1_GPIEN;
3566
3567 /* Thermal Sensor Interrupt */
3568 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3569 gpie |= IXGBE_SDP0_GPIEN_X540;
3570
3571 /* Link detection */
3572 switch (hw->mac.type) {
3573 case ixgbe_mac_82599EB:
3574 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3575 break;
3576 case ixgbe_mac_X550EM_x:
3577 case ixgbe_mac_X550EM_a:
3578 gpie |= IXGBE_SDP0_GPIEN_X540;
3579 break;
3580 default:
3581 break;
3582 }
3583
3584 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3585
3586 } /* ixgbe_config_gpie */
3587
3588 /************************************************************************
3589 * ixgbe_config_delay_values
3590 *
3591 * Requires sc->max_frame_size to be set.
3592 ************************************************************************/
3593 static void
ixgbe_config_delay_values(struct ixgbe_softc * sc)3594 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3595 {
3596 struct ixgbe_hw *hw = &sc->hw;
3597 u32 rxpb, frame, size, tmp;
3598
3599 frame = sc->max_frame_size;
3600
3601 /* Calculate High Water */
3602 switch (hw->mac.type) {
3603 case ixgbe_mac_X540:
3604 case ixgbe_mac_X550:
3605 case ixgbe_mac_X550EM_x:
3606 case ixgbe_mac_X550EM_a:
3607 tmp = IXGBE_DV_X540(frame, frame);
3608 break;
3609 default:
3610 tmp = IXGBE_DV(frame, frame);
3611 break;
3612 }
3613 size = IXGBE_BT2KB(tmp);
3614 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3615 hw->fc.high_water[0] = rxpb - size;
3616
3617 /* Now calculate Low Water */
3618 switch (hw->mac.type) {
3619 case ixgbe_mac_X540:
3620 case ixgbe_mac_X550:
3621 case ixgbe_mac_X550EM_x:
3622 case ixgbe_mac_X550EM_a:
3623 tmp = IXGBE_LOW_DV_X540(frame);
3624 break;
3625 default:
3626 tmp = IXGBE_LOW_DV(frame);
3627 break;
3628 }
3629 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3630
3631 hw->fc.pause_time = IXGBE_FC_PAUSE;
3632 hw->fc.send_xon = true;
3633 } /* ixgbe_config_delay_values */
3634
3635 /************************************************************************
3636 * ixgbe_set_multi - Multicast Update
3637 *
3638 * Called whenever multicast address list is updated.
3639 ************************************************************************/
3640 static u_int
ixgbe_mc_filter_apply(void * arg,struct sockaddr_dl * sdl,u_int idx)3641 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3642 {
3643 struct ixgbe_softc *sc = arg;
3644 struct ixgbe_mc_addr *mta = sc->mta;
3645
3646 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3647 return (0);
3648 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3649 mta[idx].vmdq = sc->pool;
3650
3651 return (1);
3652 } /* ixgbe_mc_filter_apply */
3653
3654 static void
ixgbe_if_multi_set(if_ctx_t ctx)3655 ixgbe_if_multi_set(if_ctx_t ctx)
3656 {
3657 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3658 struct ixgbe_mc_addr *mta;
3659 if_t ifp = iflib_get_ifp(ctx);
3660 u8 *update_ptr;
3661 u32 fctrl;
3662 u_int mcnt;
3663
3664 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3665
3666 mta = sc->mta;
3667 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3668
3669 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3670 sc);
3671
3672 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3673 update_ptr = (u8 *)mta;
3674 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3675 ixgbe_mc_array_itr, true);
3676 }
3677
3678 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3679
3680 if (if_getflags(ifp) & IFF_PROMISC)
3681 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3682 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3683 if_getflags(ifp) & IFF_ALLMULTI) {
3684 fctrl |= IXGBE_FCTRL_MPE;
3685 fctrl &= ~IXGBE_FCTRL_UPE;
3686 } else
3687 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3688
3689 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3690 } /* ixgbe_if_multi_set */
3691
3692 /************************************************************************
3693 * ixgbe_mc_array_itr
3694 *
3695 * An iterator function needed by the multicast shared code.
3696 * It feeds the shared code routine the addresses in the
3697 * array of ixgbe_set_multi() one by one.
3698 ************************************************************************/
3699 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)3700 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3701 {
3702 struct ixgbe_mc_addr *mta;
3703
3704 mta = (struct ixgbe_mc_addr *)*update_ptr;
3705 *vmdq = mta->vmdq;
3706
3707 *update_ptr = (u8*)(mta + 1);
3708
3709 return (mta->addr);
3710 } /* ixgbe_mc_array_itr */
3711
3712 /************************************************************************
3713 * ixgbe_local_timer - Timer routine
3714 *
3715 * Checks for link status, updates statistics,
3716 * and runs the watchdog check.
3717 ************************************************************************/
3718 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)3719 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3720 {
3721 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3722
3723 if (qid != 0)
3724 return;
3725
3726 /* Check for pluggable optics */
3727 if (sc->sfp_probe)
3728 if (!ixgbe_sfp_probe(ctx))
3729 return; /* Nothing to do */
3730
3731 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3732
3733 /* Fire off the adminq task */
3734 iflib_admin_intr_deferred(ctx);
3735
3736 } /* ixgbe_if_timer */
3737
3738 /************************************************************************
3739 * ixgbe_fw_mode_timer - FW mode timer routine
3740 ************************************************************************/
3741 static void
ixgbe_fw_mode_timer(void * arg)3742 ixgbe_fw_mode_timer(void *arg)
3743 {
3744 struct ixgbe_softc *sc = arg;
3745 struct ixgbe_hw *hw = &sc->hw;
3746
3747 if (ixgbe_fw_recovery_mode(hw)) {
3748 if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
3749 /* Firmware error detected, entering recovery mode */
3750 device_printf(sc->dev,
3751 "Firmware recovery mode detected. Limiting"
3752 " functionality. Refer to the Intel(R) Ethernet"
3753 " Adapters and Devices User Guide for details on"
3754 " firmware recovery mode.\n");
3755
3756 if (hw->adapter_stopped == FALSE)
3757 ixgbe_if_stop(sc->ctx);
3758 }
3759 } else
3760 atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
3761
3762
3763 callout_reset(&sc->fw_mode_timer, hz,
3764 ixgbe_fw_mode_timer, sc);
3765 } /* ixgbe_fw_mode_timer */
3766
3767 /************************************************************************
3768 * ixgbe_sfp_probe
3769 *
3770 * Determine if a port had optics inserted.
3771 ************************************************************************/
3772 static bool
ixgbe_sfp_probe(if_ctx_t ctx)3773 ixgbe_sfp_probe(if_ctx_t ctx)
3774 {
3775 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3776 struct ixgbe_hw *hw = &sc->hw;
3777 device_t dev = iflib_get_dev(ctx);
3778 bool result = false;
3779
3780 if ((hw->phy.type == ixgbe_phy_nl) &&
3781 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3782 s32 ret = hw->phy.ops.identify_sfp(hw);
3783 if (ret)
3784 goto out;
3785 ret = hw->phy.ops.reset(hw);
3786 sc->sfp_probe = false;
3787 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3788 device_printf(dev,
3789 "Unsupported SFP+ module detected!");
3790 device_printf(dev,
3791 "Reload driver with supported module.\n");
3792 goto out;
3793 } else
3794 device_printf(dev, "SFP+ module detected!\n");
3795 /* We now have supported optics */
3796 result = true;
3797 }
3798 out:
3799
3800 return (result);
3801 } /* ixgbe_sfp_probe */
3802
3803 /************************************************************************
3804 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3805 ************************************************************************/
3806 static void
ixgbe_handle_mod(void * context)3807 ixgbe_handle_mod(void *context)
3808 {
3809 if_ctx_t ctx = context;
3810 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3811 struct ixgbe_hw *hw = &sc->hw;
3812 device_t dev = iflib_get_dev(ctx);
3813 u32 err, cage_full = 0;
3814
3815 if (sc->hw.need_crosstalk_fix) {
3816 switch (hw->mac.type) {
3817 case ixgbe_mac_82599EB:
3818 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3819 IXGBE_ESDP_SDP2;
3820 break;
3821 case ixgbe_mac_X550EM_x:
3822 case ixgbe_mac_X550EM_a:
3823 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3824 IXGBE_ESDP_SDP0;
3825 break;
3826 default:
3827 break;
3828 }
3829
3830 if (!cage_full)
3831 goto handle_mod_out;
3832 }
3833
3834 err = hw->phy.ops.identify_sfp(hw);
3835 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3836 device_printf(dev,
3837 "Unsupported SFP+ module type was detected.\n");
3838 goto handle_mod_out;
3839 }
3840
3841 if (hw->mac.type == ixgbe_mac_82598EB)
3842 err = hw->phy.ops.reset(hw);
3843 else
3844 err = hw->mac.ops.setup_sfp(hw);
3845
3846 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3847 device_printf(dev,
3848 "Setup failure - unsupported SFP+ module type.\n");
3849 goto handle_mod_out;
3850 }
3851 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3852 return;
3853
3854 handle_mod_out:
3855 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3856 } /* ixgbe_handle_mod */
3857
3858
3859 /************************************************************************
3860 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3861 ************************************************************************/
3862 static void
ixgbe_handle_msf(void * context)3863 ixgbe_handle_msf(void *context)
3864 {
3865 if_ctx_t ctx = context;
3866 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3867 struct ixgbe_hw *hw = &sc->hw;
3868 u32 autoneg;
3869 bool negotiate;
3870
3871 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3872 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3873
3874 autoneg = hw->phy.autoneg_advertised;
3875 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3876 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3877 if (hw->mac.ops.setup_link)
3878 hw->mac.ops.setup_link(hw, autoneg, true);
3879
3880 /* Adjust media types shown in ifconfig */
3881 ifmedia_removeall(sc->media);
3882 ixgbe_add_media_types(sc->ctx);
3883 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3884 } /* ixgbe_handle_msf */
3885
3886 /************************************************************************
3887 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3888 ************************************************************************/
3889 static void
ixgbe_handle_phy(void * context)3890 ixgbe_handle_phy(void *context)
3891 {
3892 if_ctx_t ctx = context;
3893 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3894 struct ixgbe_hw *hw = &sc->hw;
3895 int error;
3896
3897 error = hw->phy.ops.handle_lasi(hw);
3898 if (error == IXGBE_ERR_OVERTEMP)
3899 device_printf(sc->dev,
3900 "CRITICAL: EXTERNAL PHY OVER TEMP!!"
3901 " PHY will downshift to lower power state!\n");
3902 else if (error)
3903 device_printf(sc->dev,
3904 "Error handling LASI interrupt: %d\n", error);
3905 } /* ixgbe_handle_phy */
3906
3907 /************************************************************************
3908 * ixgbe_enable_lse - enable link status events
3909 *
3910 * Sets mask and enables link status events
3911 ************************************************************************/
ixgbe_enable_lse(struct ixgbe_softc * sc)3912 s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
3913 {
3914 s32 error;
3915
3916 u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
3917 IXGBE_ACI_LINK_EVENT_MEDIA_NA |
3918 IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
3919 IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
3920
3921 error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
3922 if (error)
3923 return (error);
3924
3925 sc->lse_mask = mask;
3926 return (IXGBE_SUCCESS);
3927 } /* ixgbe_enable_lse */
3928
3929 /************************************************************************
3930 * ixgbe_disable_lse - disable link status events
3931 ************************************************************************/
ixgbe_disable_lse(struct ixgbe_softc * sc)3932 s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
3933 {
3934 s32 error;
3935
3936 error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
3937 if (error)
3938 return (error);
3939
3940 sc->lse_mask = 0;
3941 return (IXGBE_SUCCESS);
3942 } /* ixgbe_disable_lse */
3943
3944 /************************************************************************
3945 * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
3946 ************************************************************************/
3947 static void
ixgbe_handle_fw_event(void * context)3948 ixgbe_handle_fw_event(void *context)
3949 {
3950 if_ctx_t ctx = context;
3951 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3952 struct ixgbe_hw *hw = &sc->hw;
3953 struct ixgbe_aci_event event;
3954 bool pending = false;
3955 s32 error;
3956
3957 event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
3958 event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
3959 if (!event.msg_buf) {
3960 device_printf(sc->dev, "Can not allocate buffer for "
3961 "event message\n");
3962 return;
3963 }
3964
3965 do {
3966 error = ixgbe_aci_get_event(hw, &event, &pending);
3967 if (error) {
3968 device_printf(sc->dev, "Error getting event from "
3969 "FW:%d\n", error);
3970 break;
3971 }
3972
3973 switch (le16toh(event.desc.opcode)) {
3974 case ixgbe_aci_opc_get_link_status:
3975 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
3976 break;
3977
3978 case ixgbe_aci_opc_temp_tca_event:
3979 if (hw->adapter_stopped == FALSE)
3980 ixgbe_if_stop(ctx);
3981 device_printf(sc->dev,
3982 "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3983 device_printf(sc->dev, "System shutdown required!\n");
3984 break;
3985
3986 default:
3987 device_printf(sc->dev,
3988 "Unknown FW event captured, opcode=0x%04X\n",
3989 le16toh(event.desc.opcode));
3990 break;
3991 }
3992 } while (pending);
3993
3994 free(event.msg_buf, M_IXGBE);
3995 } /* ixgbe_handle_fw_event */
3996
3997 /************************************************************************
3998 * ixgbe_if_stop - Stop the hardware
3999 *
4000 * Disables all traffic on the adapter by issuing a
4001 * global reset on the MAC and deallocates TX/RX buffers.
4002 ************************************************************************/
4003 static void
ixgbe_if_stop(if_ctx_t ctx)4004 ixgbe_if_stop(if_ctx_t ctx)
4005 {
4006 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4007 struct ixgbe_hw *hw = &sc->hw;
4008
4009 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
4010
4011 ixgbe_reset_hw(hw);
4012 hw->adapter_stopped = false;
4013 ixgbe_stop_adapter(hw);
4014 if (hw->mac.type == ixgbe_mac_82599EB)
4015 ixgbe_stop_mac_link_on_d3_82599(hw);
4016 /* Turn off the laser - noop with no optics */
4017 ixgbe_disable_tx_laser(hw);
4018
4019 /* Update the stack */
4020 sc->link_up = false;
4021 ixgbe_if_update_admin_status(ctx);
4022
4023 /* reprogram the RAR[0] in case user changed it. */
4024 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
4025
4026 return;
4027 } /* ixgbe_if_stop */
4028
4029 /************************************************************************
4030 * ixgbe_update_link_status - Update OS on link state
4031 *
4032 * Note: Only updates the OS on the cached link state.
4033 * The real check of the hardware only happens with
4034 * a link interrupt.
4035 ************************************************************************/
4036 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)4037 ixgbe_if_update_admin_status(if_ctx_t ctx)
4038 {
4039 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4040 device_t dev = iflib_get_dev(ctx);
4041
4042 if (sc->link_up) {
4043 if (sc->link_active == false) {
4044 if (bootverbose)
4045 device_printf(dev, "Link is up %d Gbps %s \n",
4046 ((sc->link_speed == 128) ? 10 : 1),
4047 "Full Duplex");
4048 sc->link_active = true;
4049 /* Update any Flow Control changes */
4050 ixgbe_fc_enable(&sc->hw);
4051 /* Update DMA coalescing config */
4052 ixgbe_config_dmac(sc);
4053 iflib_link_state_change(ctx, LINK_STATE_UP,
4054 ixgbe_link_speed_to_baudrate(sc->link_speed));
4055
4056 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4057 ixgbe_ping_all_vfs(sc);
4058 }
4059 } else { /* Link down */
4060 if (sc->link_active == true) {
4061 if (bootverbose)
4062 device_printf(dev, "Link is Down\n");
4063 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
4064 sc->link_active = false;
4065 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4066 ixgbe_ping_all_vfs(sc);
4067 }
4068 }
4069
4070 /* Handle task requests from msix_link() */
4071 if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
4072 ixgbe_handle_fw_event(ctx);
4073 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
4074 ixgbe_handle_mod(ctx);
4075 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
4076 ixgbe_handle_msf(ctx);
4077 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
4078 ixgbe_handle_mbx(ctx);
4079 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
4080 ixgbe_reinit_fdir(ctx);
4081 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
4082 ixgbe_handle_phy(ctx);
4083 sc->task_requests = 0;
4084
4085 ixgbe_update_stats_counters(sc);
4086 } /* ixgbe_if_update_admin_status */
4087
4088 /************************************************************************
4089 * ixgbe_config_dmac - Configure DMA Coalescing
4090 ************************************************************************/
4091 static void
ixgbe_config_dmac(struct ixgbe_softc * sc)4092 ixgbe_config_dmac(struct ixgbe_softc *sc)
4093 {
4094 struct ixgbe_hw *hw = &sc->hw;
4095 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4096
4097 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4098 return;
4099
4100 if (dcfg->watchdog_timer ^ sc->dmac ||
4101 dcfg->link_speed ^ sc->link_speed) {
4102 dcfg->watchdog_timer = sc->dmac;
4103 dcfg->fcoe_en = false;
4104 dcfg->link_speed = sc->link_speed;
4105 dcfg->num_tcs = 1;
4106
4107 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4108 dcfg->watchdog_timer, dcfg->link_speed);
4109
4110 hw->mac.ops.dmac_config(hw);
4111 }
4112 } /* ixgbe_config_dmac */
4113
4114 /************************************************************************
4115 * ixgbe_if_enable_intr
4116 ************************************************************************/
4117 void
ixgbe_if_enable_intr(if_ctx_t ctx)4118 ixgbe_if_enable_intr(if_ctx_t ctx)
4119 {
4120 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4121 struct ixgbe_hw *hw = &sc->hw;
4122 struct ix_rx_queue *que = sc->rx_queues;
4123 u32 mask, fwsm;
4124
4125 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4126
4127 switch (sc->hw.mac.type) {
4128 case ixgbe_mac_82599EB:
4129 mask |= IXGBE_EIMS_ECC;
4130 /* Temperature sensor on some scs */
4131 mask |= IXGBE_EIMS_GPI_SDP0;
4132 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4133 mask |= IXGBE_EIMS_GPI_SDP1;
4134 mask |= IXGBE_EIMS_GPI_SDP2;
4135 break;
4136 case ixgbe_mac_X540:
4137 /* Detect if Thermal Sensor is enabled */
4138 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4139 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4140 mask |= IXGBE_EIMS_TS;
4141 mask |= IXGBE_EIMS_ECC;
4142 break;
4143 case ixgbe_mac_X550:
4144 /* MAC thermal sensor is automatically enabled */
4145 mask |= IXGBE_EIMS_TS;
4146 mask |= IXGBE_EIMS_ECC;
4147 break;
4148 case ixgbe_mac_X550EM_x:
4149 case ixgbe_mac_X550EM_a:
4150 /* Some devices use SDP0 for important information */
4151 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4152 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4153 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4154 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4155 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4156 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4157 mask |= IXGBE_EICR_GPI_SDP0_X540;
4158 mask |= IXGBE_EIMS_ECC;
4159 break;
4160 case ixgbe_mac_E610:
4161 mask |= IXGBE_EIMS_FW_EVENT;
4162 break;
4163 default:
4164 break;
4165 }
4166
4167 /* Enable Fan Failure detection */
4168 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4169 mask |= IXGBE_EIMS_GPI_SDP1;
4170 /* Enable SR-IOV */
4171 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4172 mask |= IXGBE_EIMS_MAILBOX;
4173 /* Enable Flow Director */
4174 if (sc->feat_en & IXGBE_FEATURE_FDIR)
4175 mask |= IXGBE_EIMS_FLOW_DIR;
4176
4177 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4178
4179 /* With MSI-X we use auto clear */
4180 if (sc->intr_type == IFLIB_INTR_MSIX) {
4181 mask = IXGBE_EIMS_ENABLE_MASK;
4182 /* Don't autoclear Link */
4183 mask &= ~IXGBE_EIMS_OTHER;
4184 mask &= ~IXGBE_EIMS_LSC;
4185 mask &= ~IXGBE_EIMS_FW_EVENT;
4186 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
4187 mask &= ~IXGBE_EIMS_MAILBOX;
4188 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4189 }
4190
4191 /*
4192 * Now enable all queues, this is done separately to
4193 * allow for handling the extended (beyond 32) MSI-X
4194 * vectors that can be used by 82599
4195 */
4196 for (int i = 0; i < sc->num_rx_queues; i++, que++)
4197 ixgbe_enable_queue(sc, que->msix);
4198
4199 IXGBE_WRITE_FLUSH(hw);
4200
4201 } /* ixgbe_if_enable_intr */
4202
4203 /************************************************************************
4204 * ixgbe_if_disable_intr
4205 ************************************************************************/
4206 static void
ixgbe_if_disable_intr(if_ctx_t ctx)4207 ixgbe_if_disable_intr(if_ctx_t ctx)
4208 {
4209 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4210
4211 if (sc->intr_type == IFLIB_INTR_MSIX)
4212 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
4213 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
4214 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
4215 } else {
4216 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
4217 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
4218 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
4219 }
4220 IXGBE_WRITE_FLUSH(&sc->hw);
4221
4222 } /* ixgbe_if_disable_intr */
4223
4224 /************************************************************************
4225 * ixgbe_link_intr_enable
4226 ************************************************************************/
4227 static void
ixgbe_link_intr_enable(if_ctx_t ctx)4228 ixgbe_link_intr_enable(if_ctx_t ctx)
4229 {
4230 struct ixgbe_hw *hw =
4231 &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
4232
4233 /* Re-enable other interrupts */
4234 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
4235 } /* ixgbe_link_intr_enable */
4236
4237 /************************************************************************
4238 * ixgbe_if_rx_queue_intr_enable
4239 ************************************************************************/
4240 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)4241 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
4242 {
4243 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4244 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
4245
4246 ixgbe_enable_queue(sc, que->msix);
4247
4248 return (0);
4249 } /* ixgbe_if_rx_queue_intr_enable */
4250
4251 /************************************************************************
4252 * ixgbe_enable_queue
4253 ************************************************************************/
4254 static void
ixgbe_enable_queue(struct ixgbe_softc * sc,u32 vector)4255 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
4256 {
4257 struct ixgbe_hw *hw = &sc->hw;
4258 u64 queue = 1ULL << vector;
4259 u32 mask;
4260
4261 if (hw->mac.type == ixgbe_mac_82598EB) {
4262 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4263 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4264 } else {
4265 mask = (queue & 0xFFFFFFFF);
4266 if (mask)
4267 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4268 mask = (queue >> 32);
4269 if (mask)
4270 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4271 }
4272 } /* ixgbe_enable_queue */
4273
4274 /************************************************************************
4275 * ixgbe_disable_queue
4276 ************************************************************************/
4277 static void
ixgbe_disable_queue(struct ixgbe_softc * sc,u32 vector)4278 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
4279 {
4280 struct ixgbe_hw *hw = &sc->hw;
4281 u64 queue = 1ULL << vector;
4282 u32 mask;
4283
4284 if (hw->mac.type == ixgbe_mac_82598EB) {
4285 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4286 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
4287 } else {
4288 mask = (queue & 0xFFFFFFFF);
4289 if (mask)
4290 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
4291 mask = (queue >> 32);
4292 if (mask)
4293 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
4294 }
4295 } /* ixgbe_disable_queue */
4296
4297 /************************************************************************
4298 * ixgbe_intr - Legacy Interrupt Service Routine
4299 ************************************************************************/
4300 int
ixgbe_intr(void * arg)4301 ixgbe_intr(void *arg)
4302 {
4303 struct ixgbe_softc *sc = arg;
4304 struct ix_rx_queue *que = sc->rx_queues;
4305 struct ixgbe_hw *hw = &sc->hw;
4306 if_ctx_t ctx = sc->ctx;
4307 u32 eicr, eicr_mask;
4308
4309 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4310
4311 ++que->irqs;
4312 if (eicr == 0) {
4313 ixgbe_if_enable_intr(ctx);
4314 return (FILTER_HANDLED);
4315 }
4316
4317 /* Check for fan failure */
4318 if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4319 (eicr & IXGBE_EICR_GPI_SDP1)) {
4320 device_printf(sc->dev,
4321 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4322 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
4323 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4324 }
4325
4326 /* Link status change */
4327 if (eicr & IXGBE_EICR_LSC) {
4328 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4329 iflib_admin_intr_deferred(ctx);
4330 }
4331
4332 if (ixgbe_is_sfp(hw)) {
4333 /* Pluggable optics-related interrupt */
4334 if (hw->mac.type >= ixgbe_mac_X540)
4335 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4336 else
4337 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4338
4339 if (eicr & eicr_mask) {
4340 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4341 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4342 }
4343
4344 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4345 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4346 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4347 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4348 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4349 }
4350 }
4351
4352 /* External PHY interrupt */
4353 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4354 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4355 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4356 }
4357
4358 return (FILTER_SCHEDULE_THREAD);
4359 } /* ixgbe_intr */
4360
4361 /************************************************************************
4362 * ixgbe_free_pci_resources
4363 ************************************************************************/
4364 static void
ixgbe_free_pci_resources(if_ctx_t ctx)4365 ixgbe_free_pci_resources(if_ctx_t ctx)
4366 {
4367 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4368 struct ix_rx_queue *que = sc->rx_queues;
4369 device_t dev = iflib_get_dev(ctx);
4370
4371 /* Release all MSI-X queue resources */
4372 if (sc->intr_type == IFLIB_INTR_MSIX)
4373 iflib_irq_free(ctx, &sc->irq);
4374
4375 if (que != NULL) {
4376 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4377 iflib_irq_free(ctx, &que->que_irq);
4378 }
4379 }
4380
4381 if (sc->pci_mem != NULL)
4382 bus_release_resource(dev, SYS_RES_MEMORY,
4383 rman_get_rid(sc->pci_mem), sc->pci_mem);
4384 } /* ixgbe_free_pci_resources */
4385
4386 /************************************************************************
4387 * ixgbe_sysctl_flowcntl
4388 *
4389 * SYSCTL wrapper around setting Flow Control
4390 ************************************************************************/
4391 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)4392 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4393 {
4394 struct ixgbe_softc *sc;
4395 int error, fc;
4396
4397 sc = (struct ixgbe_softc *)arg1;
4398 fc = sc->hw.fc.requested_mode;
4399
4400 error = sysctl_handle_int(oidp, &fc, 0, req);
4401 if ((error) || (req->newptr == NULL))
4402 return (error);
4403
4404 /* Don't bother if it's not changed */
4405 if (fc == sc->hw.fc.current_mode)
4406 return (0);
4407
4408 return ixgbe_set_flowcntl(sc, fc);
4409 } /* ixgbe_sysctl_flowcntl */
4410
4411 /************************************************************************
4412 * ixgbe_set_flowcntl - Set flow control
4413 *
4414 * Flow control values:
4415 * 0 - off
4416 * 1 - rx pause
4417 * 2 - tx pause
4418 * 3 - full
4419 ************************************************************************/
4420 static int
ixgbe_set_flowcntl(struct ixgbe_softc * sc,int fc)4421 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4422 {
4423 switch (fc) {
4424 case ixgbe_fc_rx_pause:
4425 case ixgbe_fc_tx_pause:
4426 case ixgbe_fc_full:
4427 if (sc->num_rx_queues > 1)
4428 ixgbe_disable_rx_drop(sc);
4429 break;
4430 case ixgbe_fc_none:
4431 if (sc->num_rx_queues > 1)
4432 ixgbe_enable_rx_drop(sc);
4433 break;
4434 default:
4435 return (EINVAL);
4436 }
4437
4438 sc->hw.fc.requested_mode = fc;
4439
4440 /* Don't autoneg if forcing a value */
4441 sc->hw.fc.disable_fc_autoneg = true;
4442 ixgbe_fc_enable(&sc->hw);
4443
4444 return (0);
4445 } /* ixgbe_set_flowcntl */
4446
4447 /************************************************************************
4448 * ixgbe_enable_rx_drop
4449 *
4450 * Enable the hardware to drop packets when the buffer is
4451 * full. This is useful with multiqueue, so that no single
4452 * queue being full stalls the entire RX engine. We only
4453 * enable this when Multiqueue is enabled AND Flow Control
4454 * is disabled.
4455 ************************************************************************/
4456 static void
ixgbe_enable_rx_drop(struct ixgbe_softc * sc)4457 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4458 {
4459 struct ixgbe_hw *hw = &sc->hw;
4460 struct rx_ring *rxr;
4461 u32 srrctl;
4462
4463 for (int i = 0; i < sc->num_rx_queues; i++) {
4464 rxr = &sc->rx_queues[i].rxr;
4465 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4466 srrctl |= IXGBE_SRRCTL_DROP_EN;
4467 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4468 }
4469
4470 /* enable drop for each vf */
4471 for (int i = 0; i < sc->num_vfs; i++) {
4472 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4473 (IXGBE_QDE_WRITE |
4474 (i << IXGBE_QDE_IDX_SHIFT) |
4475 IXGBE_QDE_ENABLE));
4476 }
4477 } /* ixgbe_enable_rx_drop */
4478
4479 /************************************************************************
4480 * ixgbe_disable_rx_drop
4481 ************************************************************************/
4482 static void
ixgbe_disable_rx_drop(struct ixgbe_softc * sc)4483 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4484 {
4485 struct ixgbe_hw *hw = &sc->hw;
4486 struct rx_ring *rxr;
4487 u32 srrctl;
4488
4489 for (int i = 0; i < sc->num_rx_queues; i++) {
4490 rxr = &sc->rx_queues[i].rxr;
4491 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4492 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4493 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4494 }
4495
4496 /* disable drop for each vf */
4497 for (int i = 0; i < sc->num_vfs; i++) {
4498 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4499 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4500 }
4501 } /* ixgbe_disable_rx_drop */
4502
4503 /************************************************************************
4504 * ixgbe_sysctl_advertise
4505 *
4506 * SYSCTL wrapper around setting advertised speed
4507 ************************************************************************/
4508 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)4509 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4510 {
4511 struct ixgbe_softc *sc;
4512 int error, advertise;
4513
4514 sc = (struct ixgbe_softc *)arg1;
4515 if (atomic_load_acq_int(&sc->recovery_mode))
4516 return (EPERM);
4517
4518 advertise = sc->advertise;
4519
4520 error = sysctl_handle_int(oidp, &advertise, 0, req);
4521 if ((error) || (req->newptr == NULL))
4522 return (error);
4523
4524 return ixgbe_set_advertise(sc, advertise);
4525 } /* ixgbe_sysctl_advertise */
4526
4527 /************************************************************************
4528 * ixgbe_set_advertise - Control advertised link speed
4529 *
4530 * Flags:
4531 * 0x1 - advertise 100 Mb
4532 * 0x2 - advertise 1G
4533 * 0x4 - advertise 10G
4534 * 0x8 - advertise 10 Mb (yes, Mb)
4535 * 0x10 - advertise 2.5G (disabled by default)
4536 * 0x20 - advertise 5G (disabled by default)
4537 *
4538 ************************************************************************/
4539 static int
ixgbe_set_advertise(struct ixgbe_softc * sc,int advertise)4540 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4541 {
4542 device_t dev = iflib_get_dev(sc->ctx);
4543 struct ixgbe_hw *hw;
4544 ixgbe_link_speed speed = 0;
4545 ixgbe_link_speed link_caps = 0;
4546 s32 err = IXGBE_NOT_IMPLEMENTED;
4547 bool negotiate = false;
4548
4549 /* Checks to validate new value */
4550 if (sc->advertise == advertise) /* no change */
4551 return (0);
4552
4553 hw = &sc->hw;
4554
4555 /* No speed changes for backplane media */
4556 if (hw->phy.media_type == ixgbe_media_type_backplane)
4557 return (ENODEV);
4558
4559 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4560 (hw->phy.multispeed_fiber))) {
4561 device_printf(dev,
4562 "Advertised speed can only be set on copper or multispeed"
4563 " fiber media types.\n");
4564 return (EINVAL);
4565 }
4566
4567 if (advertise < 0x1 || advertise > 0x3F) {
4568 device_printf(dev,
4569 "Invalid advertised speed; valid modes are 0x1 through"
4570 " 0x3F\n");
4571 return (EINVAL);
4572 }
4573
4574 if (hw->mac.ops.get_link_capabilities) {
4575 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4576 &negotiate);
4577 if (err != IXGBE_SUCCESS) {
4578 device_printf(dev,
4579 "Unable to determine supported advertise speeds"
4580 "\n");
4581 return (ENODEV);
4582 }
4583 }
4584
4585 /* Set new value and report new advertised mode */
4586 if (advertise & 0x1) {
4587 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4588 device_printf(dev,
4589 "Interface does not support 100Mb advertised"
4590 " speed\n");
4591 return (EINVAL);
4592 }
4593 speed |= IXGBE_LINK_SPEED_100_FULL;
4594 }
4595 if (advertise & 0x2) {
4596 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4597 device_printf(dev,
4598 "Interface does not support 1Gb advertised speed"
4599 "\n");
4600 return (EINVAL);
4601 }
4602 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4603 }
4604 if (advertise & 0x4) {
4605 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4606 device_printf(dev,
4607 "Interface does not support 10Gb advertised speed"
4608 "\n");
4609 return (EINVAL);
4610 }
4611 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4612 }
4613 if (advertise & 0x8) {
4614 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4615 device_printf(dev,
4616 "Interface does not support 10Mb advertised speed"
4617 "\n");
4618 return (EINVAL);
4619 }
4620 speed |= IXGBE_LINK_SPEED_10_FULL;
4621 }
4622 if (advertise & 0x10) {
4623 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4624 device_printf(dev,
4625 "Interface does not support 2.5G advertised speed"
4626 "\n");
4627 return (EINVAL);
4628 }
4629 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4630 }
4631 if (advertise & 0x20) {
4632 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4633 device_printf(dev,
4634 "Interface does not support 5G advertised speed"
4635 "\n");
4636 return (EINVAL);
4637 }
4638 speed |= IXGBE_LINK_SPEED_5GB_FULL;
4639 }
4640
4641 hw->mac.autotry_restart = true;
4642 hw->mac.ops.setup_link(hw, speed, true);
4643 sc->advertise = advertise;
4644
4645 return (0);
4646 } /* ixgbe_set_advertise */
4647
4648 /************************************************************************
4649 * ixgbe_get_default_advertise - Get default advertised speed settings
4650 *
4651 * Formatted for sysctl usage.
4652 * Flags:
4653 * 0x1 - advertise 100 Mb
4654 * 0x2 - advertise 1G
4655 * 0x4 - advertise 10G
4656 * 0x8 - advertise 10 Mb (yes, Mb)
4657 * 0x10 - advertise 2.5G (disabled by default)
4658 * 0x20 - advertise 5G (disabled by default)
4659 ************************************************************************/
4660 static int
ixgbe_get_default_advertise(struct ixgbe_softc * sc)4661 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4662 {
4663 struct ixgbe_hw *hw = &sc->hw;
4664 int speed;
4665 ixgbe_link_speed link_caps = 0;
4666 s32 err;
4667 bool negotiate = false;
4668
4669 /*
4670 * Advertised speed means nothing unless it's copper or
4671 * multi-speed fiber
4672 */
4673 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4674 !(hw->phy.multispeed_fiber))
4675 return (0);
4676
4677 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4678 if (err != IXGBE_SUCCESS)
4679 return (0);
4680
4681 if (hw->mac.type == ixgbe_mac_X550) {
4682 /*
4683 * 2.5G and 5G autonegotiation speeds on X550
4684 * are disabled by default due to reported
4685 * interoperability issues with some switches.
4686 */
4687 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4688 IXGBE_LINK_SPEED_5GB_FULL);
4689 }
4690
4691 speed =
4692 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
4693 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
4694 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4695 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
4696 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
4697 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
4698
4699 return speed;
4700 } /* ixgbe_get_default_advertise */
4701
4702 /************************************************************************
4703 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4704 *
4705 * Control values:
4706 * 0/1 - off / on (use default value of 1000)
4707 *
4708 * Legal timer values are:
4709 * 50,100,250,500,1000,2000,5000,10000
4710 *
4711 * Turning off interrupt moderation will also turn this off.
4712 ************************************************************************/
4713 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)4714 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4715 {
4716 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4717 if_t ifp = iflib_get_ifp(sc->ctx);
4718 int error;
4719 u16 newval;
4720
4721 newval = sc->dmac;
4722 error = sysctl_handle_16(oidp, &newval, 0, req);
4723 if ((error) || (req->newptr == NULL))
4724 return (error);
4725
4726 switch (newval) {
4727 case 0:
4728 /* Disabled */
4729 sc->dmac = 0;
4730 break;
4731 case 1:
4732 /* Enable and use default */
4733 sc->dmac = 1000;
4734 break;
4735 case 50:
4736 case 100:
4737 case 250:
4738 case 500:
4739 case 1000:
4740 case 2000:
4741 case 5000:
4742 case 10000:
4743 /* Legal values - allow */
4744 sc->dmac = newval;
4745 break;
4746 default:
4747 /* Do nothing, illegal value */
4748 return (EINVAL);
4749 }
4750
4751 /* Re-initialize hardware if it's already running */
4752 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
4753 if_init(ifp, ifp);
4754
4755 return (0);
4756 } /* ixgbe_sysctl_dmac */
4757
4758 #ifdef IXGBE_DEBUG
4759 /************************************************************************
4760 * ixgbe_sysctl_power_state
4761 *
4762 * Sysctl to test power states
4763 * Values:
4764 * 0 - set device to D0
4765 * 3 - set device to D3
4766 * (none) - get current device power state
4767 ************************************************************************/
4768 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)4769 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4770 {
4771 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4772 device_t dev = sc->dev;
4773 int curr_ps, new_ps, error = 0;
4774
4775 curr_ps = new_ps = pci_get_powerstate(dev);
4776
4777 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4778 if ((error) || (req->newptr == NULL))
4779 return (error);
4780
4781 if (new_ps == curr_ps)
4782 return (0);
4783
4784 if (new_ps == 3 && curr_ps == 0)
4785 error = DEVICE_SUSPEND(dev);
4786 else if (new_ps == 0 && curr_ps == 3)
4787 error = DEVICE_RESUME(dev);
4788 else
4789 return (EINVAL);
4790
4791 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4792
4793 return (error);
4794 } /* ixgbe_sysctl_power_state */
4795 #endif
4796
4797 /************************************************************************
4798 * ixgbe_sysctl_wol_enable
4799 *
4800 * Sysctl to enable/disable the WoL capability,
4801 * if supported by the adapter.
4802 *
4803 * Values:
4804 * 0 - disabled
4805 * 1 - enabled
4806 ************************************************************************/
4807 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)4808 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4809 {
4810 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4811 struct ixgbe_hw *hw = &sc->hw;
4812 int new_wol_enabled;
4813 int error = 0;
4814
4815 new_wol_enabled = hw->wol_enabled;
4816 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4817 if ((error) || (req->newptr == NULL))
4818 return (error);
4819 new_wol_enabled = !!(new_wol_enabled);
4820 if (new_wol_enabled == hw->wol_enabled)
4821 return (0);
4822
4823 if (new_wol_enabled > 0 && !sc->wol_support)
4824 return (ENODEV);
4825 else
4826 hw->wol_enabled = new_wol_enabled;
4827
4828 return (0);
4829 } /* ixgbe_sysctl_wol_enable */
4830
4831 /************************************************************************
4832 * ixgbe_sysctl_wufc - Wake Up Filter Control
4833 *
4834 * Sysctl to enable/disable the types of packets that the
4835 * adapter will wake up on upon receipt.
4836 * Flags:
4837 * 0x1 - Link Status Change
4838 * 0x2 - Magic Packet
4839 * 0x4 - Direct Exact
4840 * 0x8 - Directed Multicast
4841 * 0x10 - Broadcast
4842 * 0x20 - ARP/IPv4 Request Packet
4843 * 0x40 - Direct IPv4 Packet
4844 * 0x80 - Direct IPv6 Packet
4845 *
4846 * Settings not listed above will cause the sysctl to return an error.
4847 ************************************************************************/
4848 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)4849 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4850 {
4851 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4852 int error = 0;
4853 u32 new_wufc;
4854
4855 new_wufc = sc->wufc;
4856
4857 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4858 if ((error) || (req->newptr == NULL))
4859 return (error);
4860 if (new_wufc == sc->wufc)
4861 return (0);
4862
4863 if (new_wufc & 0xffffff00)
4864 return (EINVAL);
4865
4866 new_wufc &= 0xff;
4867 new_wufc |= (0xffffff & sc->wufc);
4868 sc->wufc = new_wufc;
4869
4870 return (0);
4871 } /* ixgbe_sysctl_wufc */
4872
4873 #ifdef IXGBE_DEBUG
4874 /************************************************************************
4875 * ixgbe_sysctl_print_rss_config
4876 ************************************************************************/
4877 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)4878 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4879 {
4880 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4881 struct ixgbe_hw *hw = &sc->hw;
4882 device_t dev = sc->dev;
4883 struct sbuf *buf;
4884 int error = 0, reta_size;
4885 u32 reg;
4886
4887 if (atomic_load_acq_int(&sc->recovery_mode))
4888 return (EPERM);
4889
4890 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4891 if (!buf) {
4892 device_printf(dev, "Could not allocate sbuf for output.\n");
4893 return (ENOMEM);
4894 }
4895
4896 // TODO: use sbufs to make a string to print out
4897 /* Set multiplier for RETA setup and table size based on MAC */
4898 switch (sc->hw.mac.type) {
4899 case ixgbe_mac_X550:
4900 case ixgbe_mac_X550EM_x:
4901 case ixgbe_mac_X550EM_a:
4902 reta_size = 128;
4903 break;
4904 default:
4905 reta_size = 32;
4906 break;
4907 }
4908
4909 /* Print out the redirection table */
4910 sbuf_cat(buf, "\n");
4911 for (int i = 0; i < reta_size; i++) {
4912 if (i < 32) {
4913 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4914 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4915 } else {
4916 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4917 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4918 }
4919 }
4920
4921 // TODO: print more config
4922
4923 error = sbuf_finish(buf);
4924 if (error)
4925 device_printf(dev, "Error finishing sbuf: %d\n", error);
4926
4927 sbuf_delete(buf);
4928
4929 return (0);
4930 } /* ixgbe_sysctl_print_rss_config */
4931 #endif /* IXGBE_DEBUG */
4932
4933 /************************************************************************
4934 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4935 *
4936 * For X552/X557-AT devices using an external PHY
4937 ************************************************************************/
4938 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)4939 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4940 {
4941 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4942 struct ixgbe_hw *hw = &sc->hw;
4943 u16 reg;
4944
4945 if (atomic_load_acq_int(&sc->recovery_mode))
4946 return (EPERM);
4947
4948 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4949 device_printf(iflib_get_dev(sc->ctx),
4950 "Device has no supported external thermal sensor.\n");
4951 return (ENODEV);
4952 }
4953
4954 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4955 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4956 device_printf(iflib_get_dev(sc->ctx),
4957 "Error reading from PHY's current temperature register"
4958 "\n");
4959 return (EAGAIN);
4960 }
4961
4962 /* Shift temp for output */
4963 reg = reg >> 8;
4964
4965 return (sysctl_handle_16(oidp, NULL, reg, req));
4966 } /* ixgbe_sysctl_phy_temp */
4967
4968 /************************************************************************
4969 * ixgbe_sysctl_phy_overtemp_occurred
4970 *
4971 * Reports (directly from the PHY) whether the current PHY
4972 * temperature is over the overtemp threshold.
4973 ************************************************************************/
4974 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)4975 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4976 {
4977 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4978 struct ixgbe_hw *hw = &sc->hw;
4979 u16 reg;
4980
4981 if (atomic_load_acq_int(&sc->recovery_mode))
4982 return (EPERM);
4983
4984 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4985 device_printf(iflib_get_dev(sc->ctx),
4986 "Device has no supported external thermal sensor.\n");
4987 return (ENODEV);
4988 }
4989
4990 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4991 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4992 device_printf(iflib_get_dev(sc->ctx),
4993 "Error reading from PHY's temperature status register\n");
4994 return (EAGAIN);
4995 }
4996
4997 /* Get occurrence bit */
4998 reg = !!(reg & 0x4000);
4999
5000 return (sysctl_handle_16(oidp, 0, reg, req));
5001 } /* ixgbe_sysctl_phy_overtemp_occurred */
5002
5003 /************************************************************************
5004 * ixgbe_sysctl_eee_state
5005 *
5006 * Sysctl to set EEE power saving feature
5007 * Values:
5008 * 0 - disable EEE
5009 * 1 - enable EEE
5010 * (none) - get current device EEE state
5011 ************************************************************************/
5012 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)5013 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5014 {
5015 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5016 device_t dev = sc->dev;
5017 if_t ifp = iflib_get_ifp(sc->ctx);
5018 int curr_eee, new_eee, error = 0;
5019 s32 retval;
5020
5021 if (atomic_load_acq_int(&sc->recovery_mode))
5022 return (EPERM);
5023
5024 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
5025
5026 error = sysctl_handle_int(oidp, &new_eee, 0, req);
5027 if ((error) || (req->newptr == NULL))
5028 return (error);
5029
5030 /* Nothing to do */
5031 if (new_eee == curr_eee)
5032 return (0);
5033
5034 /* Not supported */
5035 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
5036 return (EINVAL);
5037
5038 /* Bounds checking */
5039 if ((new_eee < 0) || (new_eee > 1))
5040 return (EINVAL);
5041
5042 retval = ixgbe_setup_eee(&sc->hw, new_eee);
5043 if (retval) {
5044 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5045 return (EINVAL);
5046 }
5047
5048 /* Restart auto-neg */
5049 if_init(ifp, ifp);
5050
5051 device_printf(dev, "New EEE state: %d\n", new_eee);
5052
5053 /* Cache new value */
5054 if (new_eee)
5055 sc->feat_en |= IXGBE_FEATURE_EEE;
5056 else
5057 sc->feat_en &= ~IXGBE_FEATURE_EEE;
5058
5059 return (error);
5060 } /* ixgbe_sysctl_eee_state */
5061
5062 static int
ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)5063 ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
5064 {
5065 struct ixgbe_softc *sc;
5066 u32 reg, val, shift;
5067 int error, mask;
5068
5069 sc = oidp->oid_arg1;
5070 switch (oidp->oid_arg2) {
5071 case 0:
5072 reg = IXGBE_DTXTCPFLGL;
5073 shift = 0;
5074 break;
5075 case 1:
5076 reg = IXGBE_DTXTCPFLGL;
5077 shift = 16;
5078 break;
5079 case 2:
5080 reg = IXGBE_DTXTCPFLGH;
5081 shift = 0;
5082 break;
5083 default:
5084 return (EINVAL);
5085 break;
5086 }
5087 val = IXGBE_READ_REG(&sc->hw, reg);
5088 mask = (val >> shift) & 0xfff;
5089 error = sysctl_handle_int(oidp, &mask, 0, req);
5090 if (error != 0 || req->newptr == NULL)
5091 return (error);
5092 if (mask < 0 || mask > 0xfff)
5093 return (EINVAL);
5094 val = (val & ~(0xfff << shift)) | (mask << shift);
5095 IXGBE_WRITE_REG(&sc->hw, reg, val);
5096 return (0);
5097 }
5098
5099 /************************************************************************
5100 * ixgbe_init_device_features
5101 ************************************************************************/
5102 static void
ixgbe_init_device_features(struct ixgbe_softc * sc)5103 ixgbe_init_device_features(struct ixgbe_softc *sc)
5104 {
5105 sc->feat_cap = IXGBE_FEATURE_NETMAP |
5106 IXGBE_FEATURE_RSS |
5107 IXGBE_FEATURE_MSI |
5108 IXGBE_FEATURE_MSIX |
5109 IXGBE_FEATURE_LEGACY_IRQ;
5110
5111 /* Set capabilities first... */
5112 switch (sc->hw.mac.type) {
5113 case ixgbe_mac_82598EB:
5114 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
5115 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5116 break;
5117 case ixgbe_mac_X540:
5118 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5119 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5120 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5121 (sc->hw.bus.func == 0))
5122 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
5123 break;
5124 case ixgbe_mac_X550:
5125 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5126 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5127 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5128 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5129 break;
5130 case ixgbe_mac_X550EM_x:
5131 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5132 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5133 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5134 if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5135 sc->feat_cap |= IXGBE_FEATURE_EEE;
5136 break;
5137 case ixgbe_mac_X550EM_a:
5138 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5139 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5140 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5141 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5142 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5143 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5144 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5145 sc->feat_cap |= IXGBE_FEATURE_EEE;
5146 }
5147 break;
5148 case ixgbe_mac_82599EB:
5149 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5150 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5151 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5152 (sc->hw.bus.func == 0))
5153 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
5154 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5155 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5156 break;
5157 case ixgbe_mac_E610:
5158 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5159 break;
5160 default:
5161 break;
5162 }
5163
5164 /* Enabled by default... */
5165 /* Fan failure detection */
5166 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5167 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5168 /* Netmap */
5169 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
5170 sc->feat_en |= IXGBE_FEATURE_NETMAP;
5171 /* EEE */
5172 if (sc->feat_cap & IXGBE_FEATURE_EEE)
5173 sc->feat_en |= IXGBE_FEATURE_EEE;
5174 /* Thermal Sensor */
5175 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5176 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5177 /* Recovery mode */
5178 if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
5179 sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
5180
5181 /* Enabled via global sysctl... */
5182 /* Flow Director */
5183 if (ixgbe_enable_fdir) {
5184 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
5185 sc->feat_en |= IXGBE_FEATURE_FDIR;
5186 else
5187 device_printf(sc->dev,
5188 "Device does not support Flow Director."
5189 " Leaving disabled.");
5190 }
5191 /*
5192 * Message Signal Interrupts - Extended (MSI-X)
5193 * Normal MSI is only enabled if MSI-X calls fail.
5194 */
5195 if (!ixgbe_enable_msix)
5196 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
5197 /* Receive-Side Scaling (RSS) */
5198 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5199 sc->feat_en |= IXGBE_FEATURE_RSS;
5200
5201 /* Disable features with unmet dependencies... */
5202 /* No MSI-X */
5203 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
5204 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
5205 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5206 sc->feat_en &= ~IXGBE_FEATURE_RSS;
5207 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
5208 }
5209 } /* ixgbe_init_device_features */
5210
5211 /************************************************************************
5212 * ixgbe_check_fan_failure
5213 ************************************************************************/
5214 static void
ixgbe_check_fan_failure(struct ixgbe_softc * sc,u32 reg,bool in_interrupt)5215 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
5216 {
5217 u32 mask;
5218
5219 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
5220 IXGBE_ESDP_SDP1;
5221
5222 if (reg & mask)
5223 device_printf(sc->dev,
5224 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5225 } /* ixgbe_check_fan_failure */
5226
5227 /************************************************************************
5228 * ixgbe_sbuf_fw_version
5229 ************************************************************************/
5230 static void
ixgbe_sbuf_fw_version(struct ixgbe_hw * hw,struct sbuf * buf)5231 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
5232 {
5233 struct ixgbe_nvm_version nvm_ver = {0};
5234 const char *space = "";
5235
5236 ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
5237 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
5238 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
5239 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
5240
5241 /* FW version */
5242 if ((nvm_ver.phy_fw_maj == 0x0 &&
5243 nvm_ver.phy_fw_min == 0x0 &&
5244 nvm_ver.phy_fw_id == 0x0) ||
5245 (nvm_ver.phy_fw_maj == 0xF &&
5246 nvm_ver.phy_fw_min == 0xFF &&
5247 nvm_ver.phy_fw_id == 0xF)) {
5248 /* If major, minor and id numbers are set to 0,
5249 * reading FW version is unsupported. If major number
5250 * is set to 0xF, minor is set to 0xFF and id is set
5251 * to 0xF, this means that number read is invalid. */
5252 } else
5253 sbuf_printf(buf, "fw %d.%d.%d ",
5254 nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min,
5255 nvm_ver.phy_fw_id);
5256
5257 /* NVM version */
5258 if ((nvm_ver.nvm_major == 0x0 &&
5259 nvm_ver.nvm_minor == 0x0 &&
5260 nvm_ver.nvm_id == 0x0) ||
5261 (nvm_ver.nvm_major == 0xF &&
5262 nvm_ver.nvm_minor == 0xFF &&
5263 nvm_ver.nvm_id == 0xF)) {
5264 /* If major, minor and id numbers are set to 0,
5265 * reading NVM version is unsupported. If major number
5266 * is set to 0xF, minor is set to 0xFF and id is set
5267 * to 0xF, this means that number read is invalid. */
5268 } else
5269 sbuf_printf(buf, "nvm %x.%02x.%x ",
5270 nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
5271
5272 if (nvm_ver.oem_valid) {
5273 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
5274 nvm_ver.oem_minor, nvm_ver.oem_release);
5275 space = " ";
5276 }
5277
5278 if (nvm_ver.or_valid) {
5279 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
5280 space, nvm_ver.or_major, nvm_ver.or_build,
5281 nvm_ver.or_patch);
5282 space = " ";
5283 }
5284
5285 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
5286 NVM_VER_INVALID | 0xFFFFFFFF)) {
5287 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
5288 }
5289 } /* ixgbe_sbuf_fw_version */
5290
5291 /************************************************************************
5292 * ixgbe_print_fw_version
5293 ************************************************************************/
5294 static void
ixgbe_print_fw_version(if_ctx_t ctx)5295 ixgbe_print_fw_version(if_ctx_t ctx)
5296 {
5297 struct ixgbe_softc *sc = iflib_get_softc(ctx);
5298 struct ixgbe_hw *hw = &sc->hw;
5299 device_t dev = sc->dev;
5300 struct sbuf *buf;
5301 int error = 0;
5302
5303 buf = sbuf_new_auto();
5304 if (!buf) {
5305 device_printf(dev, "Could not allocate sbuf for output.\n");
5306 return;
5307 }
5308
5309 ixgbe_sbuf_fw_version(hw, buf);
5310
5311 error = sbuf_finish(buf);
5312 if (error)
5313 device_printf(dev, "Error finishing sbuf: %d\n", error);
5314 else if (sbuf_len(buf))
5315 device_printf(dev, "%s\n", sbuf_data(buf));
5316
5317 sbuf_delete(buf);
5318 } /* ixgbe_print_fw_version */
5319
5320 /************************************************************************
5321 * ixgbe_sysctl_print_fw_version
5322 ************************************************************************/
5323 static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)5324 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
5325 {
5326 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5327 struct ixgbe_hw *hw = &sc->hw;
5328 device_t dev = sc->dev;
5329 struct sbuf *buf;
5330 int error = 0;
5331
5332 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5333 if (!buf) {
5334 device_printf(dev, "Could not allocate sbuf for output.\n");
5335 return (ENOMEM);
5336 }
5337
5338 ixgbe_sbuf_fw_version(hw, buf);
5339
5340 error = sbuf_finish(buf);
5341 if (error)
5342 device_printf(dev, "Error finishing sbuf: %d\n", error);
5343
5344 sbuf_delete(buf);
5345
5346 return (0);
5347 } /* ixgbe_sysctl_print_fw_version */
5348