1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46 * Driver version
47 ************************************************************************/
48 static const char ixgbe_driver_version[] = "4.0.1-k";
49
50 /************************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"),
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"),
66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"),
68 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"),
70 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"),
73 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"),
75 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"),
77 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, "Intel(R) X520 82599 LS"),
79 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"),
81 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
83 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"),
85 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"),
87 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
89 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"),
91 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"),
93 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
95 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
97 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
99 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
101 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
103 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
105 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
106 /* required last entry */
107 PVID_END
108 };
109
110 static void *ixgbe_register(device_t);
111 static int ixgbe_if_attach_pre(if_ctx_t);
112 static int ixgbe_if_attach_post(if_ctx_t);
113 static int ixgbe_if_detach(if_ctx_t);
114 static int ixgbe_if_shutdown(if_ctx_t);
115 static int ixgbe_if_suspend(if_ctx_t);
116 static int ixgbe_if_resume(if_ctx_t);
117
118 static void ixgbe_if_stop(if_ctx_t);
119 void ixgbe_if_enable_intr(if_ctx_t);
120 static void ixgbe_if_disable_intr(if_ctx_t);
121 static void ixgbe_link_intr_enable(if_ctx_t);
122 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
123 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
124 static int ixgbe_if_media_change(if_ctx_t);
125 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
126 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
127 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
128 static void ixgbe_if_multi_set(if_ctx_t);
129 static int ixgbe_if_promisc_set(if_ctx_t, int);
130 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
131 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
132 static void ixgbe_if_queues_free(if_ctx_t);
133 static void ixgbe_if_timer(if_ctx_t, uint16_t);
134 static void ixgbe_if_update_admin_status(if_ctx_t);
135 static void ixgbe_if_vlan_register(if_ctx_t, u16);
136 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
137 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
138 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
139 int ixgbe_intr(void *);
140
141 /************************************************************************
142 * Function prototypes
143 ************************************************************************/
144 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
145
146 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
147 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
148 static void ixgbe_add_device_sysctls(if_ctx_t);
149 static int ixgbe_allocate_pci_resources(if_ctx_t);
150 static int ixgbe_setup_low_power_mode(if_ctx_t);
151
152 static void ixgbe_config_dmac(struct ixgbe_softc *);
153 static void ixgbe_configure_ivars(struct ixgbe_softc *);
154 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
155 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
156 static bool ixgbe_sfp_probe(if_ctx_t);
157
158 static void ixgbe_free_pci_resources(if_ctx_t);
159
160 static int ixgbe_msix_link(void *);
161 static int ixgbe_msix_que(void *);
162 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
163 static void ixgbe_initialize_receive_units(if_ctx_t);
164 static void ixgbe_initialize_transmit_units(if_ctx_t);
165
166 static int ixgbe_setup_interface(if_ctx_t);
167 static void ixgbe_init_device_features(struct ixgbe_softc *);
168 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
169 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
170 static void ixgbe_print_fw_version(if_ctx_t);
171 static void ixgbe_add_media_types(if_ctx_t);
172 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
173 static void ixgbe_config_link(if_ctx_t);
174 static void ixgbe_get_slot_info(struct ixgbe_softc *);
175 static void ixgbe_fw_mode_timer(void *);
176 static void ixgbe_check_wol_support(struct ixgbe_softc *);
177 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
178 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
179
180 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
181 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
182 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
183 static int ixgbe_get_default_advertise(struct ixgbe_softc *);
184 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
185 static void ixgbe_config_gpie(struct ixgbe_softc *);
186 static void ixgbe_config_delay_values(struct ixgbe_softc *);
187
188 /* Sysctl handlers */
189 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
190 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
191 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
192 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
193 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
194 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
195 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
196 #ifdef IXGBE_DEBUG
197 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
198 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
199 #endif
200 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
201 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
202 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
203 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
204 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
205 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
206 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
207 static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
208
209 /* Deferred interrupt tasklets */
210 static void ixgbe_handle_msf(void *);
211 static void ixgbe_handle_mod(void *);
212 static void ixgbe_handle_phy(void *);
213
214 /************************************************************************
215 * FreeBSD Device Interface Entry Points
216 ************************************************************************/
217 static device_method_t ix_methods[] = {
218 /* Device interface */
219 DEVMETHOD(device_register, ixgbe_register),
220 DEVMETHOD(device_probe, iflib_device_probe),
221 DEVMETHOD(device_attach, iflib_device_attach),
222 DEVMETHOD(device_detach, iflib_device_detach),
223 DEVMETHOD(device_shutdown, iflib_device_shutdown),
224 DEVMETHOD(device_suspend, iflib_device_suspend),
225 DEVMETHOD(device_resume, iflib_device_resume),
226 #ifdef PCI_IOV
227 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
228 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
229 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
230 #endif /* PCI_IOV */
231 DEVMETHOD_END
232 };
233
234 static driver_t ix_driver = {
235 "ix", ix_methods, sizeof(struct ixgbe_softc),
236 };
237
238 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
239 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
240 MODULE_DEPEND(ix, pci, 1, 1, 1);
241 MODULE_DEPEND(ix, ether, 1, 1, 1);
242 MODULE_DEPEND(ix, iflib, 1, 1, 1);
243
244 static device_method_t ixgbe_if_methods[] = {
245 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
246 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
247 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
248 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
249 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
250 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
251 DEVMETHOD(ifdi_init, ixgbe_if_init),
252 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
253 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
254 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
255 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
256 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
257 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
258 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
259 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
260 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
261 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
262 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
263 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
264 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
265 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
266 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
267 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
268 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
269 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
270 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
271 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
272 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
273 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
274 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
275 #ifdef PCI_IOV
276 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
277 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
278 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
279 #endif /* PCI_IOV */
280 DEVMETHOD_END
281 };
282
283 /*
284 * TUNEABLE PARAMETERS:
285 */
286
287 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
288 "IXGBE driver parameters");
289 static driver_t ixgbe_if_driver = {
290 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
291 };
292
293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
295 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
296
297 /* Flow control setting, default to full */
298 static int ixgbe_flow_control = ixgbe_fc_full;
299 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
300 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
301
302 /* Advertise Speed, default to 0 (auto) */
303 static int ixgbe_advertise_speed = 0;
304 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
305 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
306
307 /*
308 * Smart speed setting, default to on
309 * this only works as a compile option
310 * right now as its during attach, set
311 * this to 'ixgbe_smart_speed_off' to
312 * disable.
313 */
314 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
315
316 /*
317 * MSI-X should be the default for best performance,
318 * but this allows it to be forced off for testing.
319 */
320 static int ixgbe_enable_msix = 1;
321 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
322 "Enable MSI-X interrupts");
323
324 /*
325 * Defining this on will allow the use
326 * of unsupported SFP+ modules, note that
327 * doing so you are on your own :)
328 */
329 static int allow_unsupported_sfp = false;
330 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
331 &allow_unsupported_sfp, 0,
332 "Allow unsupported SFP modules...use at your own risk");
333
334 /*
335 * Not sure if Flow Director is fully baked,
336 * so we'll default to turning it off.
337 */
338 static int ixgbe_enable_fdir = 0;
339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
340 "Enable Flow Director");
341
342 /* Receive-Side Scaling */
343 static int ixgbe_enable_rss = 1;
344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
345 "Enable Receive-Side Scaling (RSS)");
346
347 /*
348 * AIM: Adaptive Interrupt Moderation
349 * which means that the interrupt rate
350 * is varied over time based on the
351 * traffic for that interrupt vector
352 */
353 static int ixgbe_enable_aim = false;
354 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
355 "Enable adaptive interrupt moderation");
356
357 #if 0
358 /* Keep running tab on them for sanity check */
359 static int ixgbe_total_ports;
360 #endif
361
362 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
363
364 /*
365 * For Flow Director: this is the number of TX packets we sample
366 * for the filter pool, this means every 20th packet will be probed.
367 *
368 * This feature can be disabled by setting this to 0.
369 */
370 static int atr_sample_rate = 20;
371
372 extern struct if_txrx ixgbe_txrx;
373
374 static struct if_shared_ctx ixgbe_sctx_init = {
375 .isc_magic = IFLIB_MAGIC,
376 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
377 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
378 .isc_tx_maxsegsize = PAGE_SIZE,
379 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
380 .isc_tso_maxsegsize = PAGE_SIZE,
381 .isc_rx_maxsize = PAGE_SIZE*4,
382 .isc_rx_nsegments = 1,
383 .isc_rx_maxsegsize = PAGE_SIZE*4,
384 .isc_nfl = 1,
385 .isc_ntxqs = 1,
386 .isc_nrxqs = 1,
387
388 .isc_admin_intrcnt = 1,
389 .isc_vendor_info = ixgbe_vendor_info_array,
390 .isc_driver_version = ixgbe_driver_version,
391 .isc_driver = &ixgbe_if_driver,
392 .isc_flags = IFLIB_TSO_INIT_IP,
393
394 .isc_nrxd_min = {MIN_RXD},
395 .isc_ntxd_min = {MIN_TXD},
396 .isc_nrxd_max = {MAX_RXD},
397 .isc_ntxd_max = {MAX_TXD},
398 .isc_nrxd_default = {DEFAULT_RXD},
399 .isc_ntxd_default = {DEFAULT_TXD},
400 };
401
402 /************************************************************************
403 * ixgbe_if_tx_queues_alloc
404 ************************************************************************/
405 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)406 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
407 int ntxqs, int ntxqsets)
408 {
409 struct ixgbe_softc *sc = iflib_get_softc(ctx);
410 if_softc_ctx_t scctx = sc->shared;
411 struct ix_tx_queue *que;
412 int i, j, error;
413
414 MPASS(sc->num_tx_queues > 0);
415 MPASS(sc->num_tx_queues == ntxqsets);
416 MPASS(ntxqs == 1);
417
418 /* Allocate queue structure memory */
419 sc->tx_queues =
420 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
421 M_IXGBE, M_NOWAIT | M_ZERO);
422 if (!sc->tx_queues) {
423 device_printf(iflib_get_dev(ctx),
424 "Unable to allocate TX ring memory\n");
425 return (ENOMEM);
426 }
427
428 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
429 struct tx_ring *txr = &que->txr;
430
431 /* In case SR-IOV is enabled, align the index properly */
432 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
433 i);
434
435 txr->sc = que->sc = sc;
436
437 /* Allocate report status array */
438 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
439 if (txr->tx_rsq == NULL) {
440 error = ENOMEM;
441 goto fail;
442 }
443 for (j = 0; j < scctx->isc_ntxd[0]; j++)
444 txr->tx_rsq[j] = QIDX_INVALID;
445 /* get the virtual and physical address of the hardware queues */
446 txr->tail = IXGBE_TDT(txr->me);
447 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
448 txr->tx_paddr = paddrs[i];
449
450 txr->bytes = 0;
451 txr->total_packets = 0;
452
453 /* Set the rate at which we sample packets */
454 if (sc->feat_en & IXGBE_FEATURE_FDIR)
455 txr->atr_sample = atr_sample_rate;
456
457 }
458
459 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
460 sc->num_tx_queues);
461
462 return (0);
463
464 fail:
465 ixgbe_if_queues_free(ctx);
466
467 return (error);
468 } /* ixgbe_if_tx_queues_alloc */
469
470 /************************************************************************
471 * ixgbe_if_rx_queues_alloc
472 ************************************************************************/
473 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)474 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
475 int nrxqs, int nrxqsets)
476 {
477 struct ixgbe_softc *sc = iflib_get_softc(ctx);
478 struct ix_rx_queue *que;
479 int i;
480
481 MPASS(sc->num_rx_queues > 0);
482 MPASS(sc->num_rx_queues == nrxqsets);
483 MPASS(nrxqs == 1);
484
485 /* Allocate queue structure memory */
486 sc->rx_queues =
487 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
488 M_IXGBE, M_NOWAIT | M_ZERO);
489 if (!sc->rx_queues) {
490 device_printf(iflib_get_dev(ctx),
491 "Unable to allocate TX ring memory\n");
492 return (ENOMEM);
493 }
494
495 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
496 struct rx_ring *rxr = &que->rxr;
497
498 /* In case SR-IOV is enabled, align the index properly */
499 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
500 i);
501
502 rxr->sc = que->sc = sc;
503
504 /* get the virtual and physical address of the hw queues */
505 rxr->tail = IXGBE_RDT(rxr->me);
506 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
507 rxr->rx_paddr = paddrs[i];
508 rxr->bytes = 0;
509 rxr->que = que;
510 }
511
512 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
513 sc->num_rx_queues);
514
515 return (0);
516 } /* ixgbe_if_rx_queues_alloc */
517
518 /************************************************************************
519 * ixgbe_if_queues_free
520 ************************************************************************/
521 static void
ixgbe_if_queues_free(if_ctx_t ctx)522 ixgbe_if_queues_free(if_ctx_t ctx)
523 {
524 struct ixgbe_softc *sc = iflib_get_softc(ctx);
525 struct ix_tx_queue *tx_que = sc->tx_queues;
526 struct ix_rx_queue *rx_que = sc->rx_queues;
527 int i;
528
529 if (tx_que != NULL) {
530 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
531 struct tx_ring *txr = &tx_que->txr;
532 if (txr->tx_rsq == NULL)
533 break;
534
535 free(txr->tx_rsq, M_IXGBE);
536 txr->tx_rsq = NULL;
537 }
538
539 free(sc->tx_queues, M_IXGBE);
540 sc->tx_queues = NULL;
541 }
542 if (rx_que != NULL) {
543 free(sc->rx_queues, M_IXGBE);
544 sc->rx_queues = NULL;
545 }
546 } /* ixgbe_if_queues_free */
547
548 /************************************************************************
549 * ixgbe_initialize_rss_mapping
550 ************************************************************************/
551 static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc * sc)552 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
553 {
554 struct ixgbe_hw *hw = &sc->hw;
555 u32 reta = 0, mrqc, rss_key[10];
556 int queue_id, table_size, index_mult;
557 int i, j;
558 u32 rss_hash_config;
559
560 if (sc->feat_en & IXGBE_FEATURE_RSS) {
561 /* Fetch the configured RSS key */
562 rss_getkey((uint8_t *)&rss_key);
563 } else {
564 /* set up random bits */
565 arc4rand(&rss_key, sizeof(rss_key), 0);
566 }
567
568 /* Set multiplier for RETA setup and table size based on MAC */
569 index_mult = 0x1;
570 table_size = 128;
571 switch (sc->hw.mac.type) {
572 case ixgbe_mac_82598EB:
573 index_mult = 0x11;
574 break;
575 case ixgbe_mac_X550:
576 case ixgbe_mac_X550EM_x:
577 case ixgbe_mac_X550EM_a:
578 table_size = 512;
579 break;
580 default:
581 break;
582 }
583
584 /* Set up the redirection table */
585 for (i = 0, j = 0; i < table_size; i++, j++) {
586 if (j == sc->num_rx_queues)
587 j = 0;
588
589 if (sc->feat_en & IXGBE_FEATURE_RSS) {
590 /*
591 * Fetch the RSS bucket id for the given indirection
592 * entry. Cap it at the number of configured buckets
593 * (which is num_rx_queues.)
594 */
595 queue_id = rss_get_indirection_to_bucket(i);
596 queue_id = queue_id % sc->num_rx_queues;
597 } else
598 queue_id = (j * index_mult);
599
600 /*
601 * The low 8 bits are for hash value (n+0);
602 * The next 8 bits are for hash value (n+1), etc.
603 */
604 reta = reta >> 8;
605 reta = reta | (((uint32_t)queue_id) << 24);
606 if ((i & 3) == 3) {
607 if (i < 128)
608 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
609 else
610 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
611 reta);
612 reta = 0;
613 }
614 }
615
616 /* Now fill our hash function seeds */
617 for (i = 0; i < 10; i++)
618 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
619
620 /* Perform hash on these packet types */
621 if (sc->feat_en & IXGBE_FEATURE_RSS)
622 rss_hash_config = rss_gethashconfig();
623 else {
624 /*
625 * Disable UDP - IP fragments aren't currently being handled
626 * and so we end up with a mix of 2-tuple and 4-tuple
627 * traffic.
628 */
629 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
630 | RSS_HASHTYPE_RSS_TCP_IPV4
631 | RSS_HASHTYPE_RSS_IPV6
632 | RSS_HASHTYPE_RSS_TCP_IPV6
633 | RSS_HASHTYPE_RSS_IPV6_EX
634 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
635 }
636
637 mrqc = IXGBE_MRQC_RSSEN;
638 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
639 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
640 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
641 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
642 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
643 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
644 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
646 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
648 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
649 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
650 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
651 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
652 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
653 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
654 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
655 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
656 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
657 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
658 } /* ixgbe_initialize_rss_mapping */
659
660 /************************************************************************
661 * ixgbe_initialize_receive_units - Setup receive registers and features.
662 ************************************************************************/
663 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
664
665 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)666 ixgbe_initialize_receive_units(if_ctx_t ctx)
667 {
668 struct ixgbe_softc *sc = iflib_get_softc(ctx);
669 if_softc_ctx_t scctx = sc->shared;
670 struct ixgbe_hw *hw = &sc->hw;
671 if_t ifp = iflib_get_ifp(ctx);
672 struct ix_rx_queue *que;
673 int i, j;
674 u32 bufsz, fctrl, srrctl, rxcsum;
675 u32 hlreg;
676
677 /*
678 * Make sure receives are disabled while
679 * setting up the descriptor ring
680 */
681 ixgbe_disable_rx(hw);
682
683 /* Enable broadcasts */
684 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
685 fctrl |= IXGBE_FCTRL_BAM;
686 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
687 fctrl |= IXGBE_FCTRL_DPF;
688 fctrl |= IXGBE_FCTRL_PMCF;
689 }
690 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
691
692 /* Set for Jumbo Frames? */
693 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
694 if (if_getmtu(ifp) > ETHERMTU)
695 hlreg |= IXGBE_HLREG0_JUMBOEN;
696 else
697 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
698 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
699
700 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
701 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
702
703 /* Setup the Base and Length of the Rx Descriptor Ring */
704 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
705 struct rx_ring *rxr = &que->rxr;
706 u64 rdba = rxr->rx_paddr;
707
708 j = rxr->me;
709
710 /* Setup the Base and Length of the Rx Descriptor Ring */
711 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
712 (rdba & 0x00000000ffffffffULL));
713 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
714 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
715 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
716
717 /* Set up the SRRCTL register */
718 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
719 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
720 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
721 srrctl |= bufsz;
722 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
723
724 /*
725 * Set DROP_EN iff we have no flow control and >1 queue.
726 * Note that srrctl was cleared shortly before during reset,
727 * so we do not need to clear the bit, but do it just in case
728 * this code is moved elsewhere.
729 */
730 if (sc->num_rx_queues > 1 &&
731 sc->hw.fc.requested_mode == ixgbe_fc_none) {
732 srrctl |= IXGBE_SRRCTL_DROP_EN;
733 } else {
734 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
735 }
736
737 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
738
739 /* Setup the HW Rx Head and Tail Descriptor Pointers */
740 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
741 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
742
743 /* Set the driver rx tail address */
744 rxr->tail = IXGBE_RDT(rxr->me);
745 }
746
747 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
748 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
749 | IXGBE_PSRTYPE_UDPHDR
750 | IXGBE_PSRTYPE_IPV4HDR
751 | IXGBE_PSRTYPE_IPV6HDR;
752 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
753 }
754
755 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
756
757 ixgbe_initialize_rss_mapping(sc);
758
759 if (sc->feat_en & IXGBE_FEATURE_RSS) {
760 /* RSS and RX IPP Checksum are mutually exclusive */
761 rxcsum |= IXGBE_RXCSUM_PCSD;
762 }
763
764 if (if_getcapenable(ifp) & IFCAP_RXCSUM)
765 rxcsum |= IXGBE_RXCSUM_PCSD;
766
767 /* This is useful for calculating UDP/IP fragment checksums */
768 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
769 rxcsum |= IXGBE_RXCSUM_IPPCSE;
770
771 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
772
773 } /* ixgbe_initialize_receive_units */
774
775 /************************************************************************
776 * ixgbe_initialize_transmit_units - Enable transmit units.
777 ************************************************************************/
778 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)779 ixgbe_initialize_transmit_units(if_ctx_t ctx)
780 {
781 struct ixgbe_softc *sc = iflib_get_softc(ctx);
782 struct ixgbe_hw *hw = &sc->hw;
783 if_softc_ctx_t scctx = sc->shared;
784 struct ix_tx_queue *que;
785 int i;
786
787 /* Setup the Base and Length of the Tx Descriptor Ring */
788 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
789 i++, que++) {
790 struct tx_ring *txr = &que->txr;
791 u64 tdba = txr->tx_paddr;
792 u32 txctrl = 0;
793 int j = txr->me;
794
795 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
796 (tdba & 0x00000000ffffffffULL));
797 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
798 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
799 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
800
801 /* Setup the HW Tx Head and Tail descriptor pointers */
802 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
803 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
804
805 /* Cache the tail address */
806 txr->tail = IXGBE_TDT(txr->me);
807
808 txr->tx_rs_cidx = txr->tx_rs_pidx;
809 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
810 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
811 txr->tx_rsq[k] = QIDX_INVALID;
812
813 /* Disable Head Writeback */
814 /*
815 * Note: for X550 series devices, these registers are actually
816 * prefixed with TPH_ isntead of DCA_, but the addresses and
817 * fields remain the same.
818 */
819 switch (hw->mac.type) {
820 case ixgbe_mac_82598EB:
821 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
822 break;
823 default:
824 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
825 break;
826 }
827 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
828 switch (hw->mac.type) {
829 case ixgbe_mac_82598EB:
830 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
831 break;
832 default:
833 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
834 break;
835 }
836
837 }
838
839 if (hw->mac.type != ixgbe_mac_82598EB) {
840 u32 dmatxctl, rttdcs;
841
842 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
843 dmatxctl |= IXGBE_DMATXCTL_TE;
844 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
845 /* Disable arbiter to set MTQC */
846 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
847 rttdcs |= IXGBE_RTTDCS_ARBDIS;
848 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
849 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
850 ixgbe_get_mtqc(sc->iov_mode));
851 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
852 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
853 }
854
855 } /* ixgbe_initialize_transmit_units */
856
857 /************************************************************************
858 * ixgbe_register
859 ************************************************************************/
860 static void *
ixgbe_register(device_t dev)861 ixgbe_register(device_t dev)
862 {
863 return (&ixgbe_sctx_init);
864 } /* ixgbe_register */
865
866 /************************************************************************
867 * ixgbe_if_attach_pre - Device initialization routine, part 1
868 *
869 * Called when the driver is being loaded.
870 * Identifies the type of hardware, initializes the hardware,
871 * and initializes iflib structures.
872 *
873 * return 0 on success, positive on failure
874 ************************************************************************/
875 static int
ixgbe_if_attach_pre(if_ctx_t ctx)876 ixgbe_if_attach_pre(if_ctx_t ctx)
877 {
878 struct ixgbe_softc *sc;
879 device_t dev;
880 if_softc_ctx_t scctx;
881 struct ixgbe_hw *hw;
882 int error = 0;
883 u32 ctrl_ext;
884 size_t i;
885
886 INIT_DEBUGOUT("ixgbe_attach: begin");
887
888 /* Allocate, clear, and link in our adapter structure */
889 dev = iflib_get_dev(ctx);
890 sc = iflib_get_softc(ctx);
891 sc->hw.back = sc;
892 sc->ctx = ctx;
893 sc->dev = dev;
894 scctx = sc->shared = iflib_get_softc_ctx(ctx);
895 sc->media = iflib_get_media(ctx);
896 hw = &sc->hw;
897
898 /* Determine hardware revision */
899 hw->vendor_id = pci_get_vendor(dev);
900 hw->device_id = pci_get_device(dev);
901 hw->revision_id = pci_get_revid(dev);
902 hw->subsystem_vendor_id = pci_get_subvendor(dev);
903 hw->subsystem_device_id = pci_get_subdevice(dev);
904
905 /* Do base PCI setup - map BAR0 */
906 if (ixgbe_allocate_pci_resources(ctx)) {
907 device_printf(dev, "Allocation of PCI resources failed\n");
908 return (ENXIO);
909 }
910
911 /* let hardware know driver is loaded */
912 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
913 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
914 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
915
916 /*
917 * Initialize the shared code
918 */
919 if (ixgbe_init_shared_code(hw) != 0) {
920 device_printf(dev, "Unable to initialize the shared code\n");
921 error = ENXIO;
922 goto err_pci;
923 }
924
925 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
926 device_printf(dev, "Firmware recovery mode detected. Limiting "
927 "functionality.\nRefer to the Intel(R) Ethernet Adapters "
928 "and Devices User Guide for details on firmware recovery "
929 "mode.");
930 error = ENOSYS;
931 goto err_pci;
932 }
933
934 /* 82598 Does not support SR-IOV, initialize everything else */
935 if (hw->mac.type >= ixgbe_mac_82599_vf) {
936 for (i = 0; i < sc->num_vfs; i++)
937 hw->mbx.ops[i].init_params(hw);
938 }
939
940 hw->allow_unsupported_sfp = allow_unsupported_sfp;
941
942 if (hw->mac.type != ixgbe_mac_82598EB)
943 hw->phy.smart_speed = ixgbe_smart_speed;
944
945 ixgbe_init_device_features(sc);
946
947 /* Enable WoL (if supported) */
948 ixgbe_check_wol_support(sc);
949
950 /* Verify adapter fan is still functional (if applicable) */
951 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
952 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
953 ixgbe_check_fan_failure(sc, esdp, false);
954 }
955
956 /* Ensure SW/FW semaphore is free */
957 ixgbe_init_swfw_semaphore(hw);
958
959 /* Set an initial default flow control value */
960 hw->fc.requested_mode = ixgbe_flow_control;
961
962 hw->phy.reset_if_overtemp = true;
963 error = ixgbe_reset_hw(hw);
964 hw->phy.reset_if_overtemp = false;
965 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
966 /*
967 * No optics in this port, set up
968 * so the timer routine will probe
969 * for later insertion.
970 */
971 sc->sfp_probe = true;
972 error = 0;
973 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
974 device_printf(dev, "Unsupported SFP+ module detected!\n");
975 error = EIO;
976 goto err_pci;
977 } else if (error) {
978 device_printf(dev, "Hardware initialization failed\n");
979 error = EIO;
980 goto err_pci;
981 }
982
983 /* Make sure we have a good EEPROM before we read from it */
984 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
985 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
986 error = EIO;
987 goto err_pci;
988 }
989
990 error = ixgbe_start_hw(hw);
991 switch (error) {
992 case IXGBE_ERR_EEPROM_VERSION:
993 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
994 break;
995 case IXGBE_ERR_SFP_NOT_SUPPORTED:
996 device_printf(dev, "Unsupported SFP+ Module\n");
997 error = EIO;
998 goto err_pci;
999 case IXGBE_ERR_SFP_NOT_PRESENT:
1000 device_printf(dev, "No SFP+ Module found\n");
1001 /* falls thru */
1002 default:
1003 break;
1004 }
1005
1006 /* Most of the iflib initialization... */
1007
1008 iflib_set_mac(ctx, hw->mac.addr);
1009 switch (sc->hw.mac.type) {
1010 case ixgbe_mac_X550:
1011 case ixgbe_mac_X550EM_x:
1012 case ixgbe_mac_X550EM_a:
1013 scctx->isc_rss_table_size = 512;
1014 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1015 break;
1016 default:
1017 scctx->isc_rss_table_size = 128;
1018 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1019 }
1020
1021 /* Allow legacy interrupts */
1022 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1023
1024 scctx->isc_txqsizes[0] =
1025 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1026 sizeof(u32), DBA_ALIGN),
1027 scctx->isc_rxqsizes[0] =
1028 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1029 DBA_ALIGN);
1030
1031 /* XXX */
1032 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1033 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1034 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1035 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1036 } else {
1037 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1038 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1039 }
1040
1041 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1042
1043 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1044 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1045 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1046
1047 scctx->isc_txrx = &ixgbe_txrx;
1048
1049 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1050
1051 return (0);
1052
1053 err_pci:
1054 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1055 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1056 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1057 ixgbe_free_pci_resources(ctx);
1058
1059 return (error);
1060 } /* ixgbe_if_attach_pre */
1061
1062 /*********************************************************************
1063 * ixgbe_if_attach_post - Device initialization routine, part 2
1064 *
1065 * Called during driver load, but after interrupts and
1066 * resources have been allocated and configured.
1067 * Sets up some data structures not relevant to iflib.
1068 *
1069 * return 0 on success, positive on failure
1070 *********************************************************************/
1071 static int
ixgbe_if_attach_post(if_ctx_t ctx)1072 ixgbe_if_attach_post(if_ctx_t ctx)
1073 {
1074 device_t dev;
1075 struct ixgbe_softc *sc;
1076 struct ixgbe_hw *hw;
1077 int error = 0;
1078
1079 dev = iflib_get_dev(ctx);
1080 sc = iflib_get_softc(ctx);
1081 hw = &sc->hw;
1082
1083
1084 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1085 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1086 device_printf(dev, "Device does not support legacy interrupts");
1087 error = ENXIO;
1088 goto err;
1089 }
1090
1091 /* Allocate multicast array memory. */
1092 sc->mta = malloc(sizeof(*sc->mta) *
1093 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
1094 if (sc->mta == NULL) {
1095 device_printf(dev, "Can not allocate multicast setup array\n");
1096 error = ENOMEM;
1097 goto err;
1098 }
1099
1100 /* hw.ix defaults init */
1101 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1102
1103 /* Enable the optics for 82599 SFP+ fiber */
1104 ixgbe_enable_tx_laser(hw);
1105
1106 /* Enable power to the phy. */
1107 ixgbe_set_phy_power(hw, true);
1108
1109 ixgbe_initialize_iov(sc);
1110
1111 error = ixgbe_setup_interface(ctx);
1112 if (error) {
1113 device_printf(dev, "Interface setup failed: %d\n", error);
1114 goto err;
1115 }
1116
1117 ixgbe_if_update_admin_status(ctx);
1118
1119 /* Initialize statistics */
1120 ixgbe_update_stats_counters(sc);
1121 ixgbe_add_hw_stats(sc);
1122
1123 /* Check PCIE slot type/speed/width */
1124 ixgbe_get_slot_info(sc);
1125
1126 /*
1127 * Do time init and sysctl init here, but
1128 * only on the first port of a bypass sc.
1129 */
1130 ixgbe_bypass_init(sc);
1131
1132 /* Display NVM and Option ROM versions */
1133 ixgbe_print_fw_version(ctx);
1134
1135 /* Set an initial dmac value */
1136 sc->dmac = 0;
1137 /* Set initial advertised speeds (if applicable) */
1138 sc->advertise = ixgbe_get_default_advertise(sc);
1139
1140 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1141 ixgbe_define_iov_schemas(dev, &error);
1142
1143 /* Add sysctls */
1144 ixgbe_add_device_sysctls(ctx);
1145
1146 /* Init recovery mode timer and state variable */
1147 if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1148 sc->recovery_mode = 0;
1149
1150 /* Set up the timer callout */
1151 callout_init(&sc->fw_mode_timer, true);
1152
1153 /* Start the task */
1154 callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1155 }
1156
1157 return (0);
1158 err:
1159 return (error);
1160 } /* ixgbe_if_attach_post */
1161
1162 /************************************************************************
1163 * ixgbe_check_wol_support
1164 *
1165 * Checks whether the adapter's ports are capable of
1166 * Wake On LAN by reading the adapter's NVM.
1167 *
1168 * Sets each port's hw->wol_enabled value depending
1169 * on the value read here.
1170 ************************************************************************/
1171 static void
ixgbe_check_wol_support(struct ixgbe_softc * sc)1172 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1173 {
1174 struct ixgbe_hw *hw = &sc->hw;
1175 u16 dev_caps = 0;
1176
1177 /* Find out WoL support for port */
1178 sc->wol_support = hw->wol_enabled = 0;
1179 ixgbe_get_device_caps(hw, &dev_caps);
1180 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1181 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1182 hw->bus.func == 0))
1183 sc->wol_support = hw->wol_enabled = 1;
1184
1185 /* Save initial wake up filter configuration */
1186 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1187
1188 return;
1189 } /* ixgbe_check_wol_support */
1190
1191 /************************************************************************
1192 * ixgbe_setup_interface
1193 *
1194 * Setup networking device structure and register an interface.
1195 ************************************************************************/
1196 static int
ixgbe_setup_interface(if_ctx_t ctx)1197 ixgbe_setup_interface(if_ctx_t ctx)
1198 {
1199 if_t ifp = iflib_get_ifp(ctx);
1200 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1201
1202 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1203
1204 if_setbaudrate(ifp, IF_Gbps(10));
1205
1206 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1207
1208 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1209
1210 ixgbe_add_media_types(ctx);
1211
1212 /* Autoselect media by default */
1213 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1214
1215 return (0);
1216 } /* ixgbe_setup_interface */
1217
1218 /************************************************************************
1219 * ixgbe_if_get_counter
1220 ************************************************************************/
1221 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1222 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1223 {
1224 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1225 if_t ifp = iflib_get_ifp(ctx);
1226
1227 switch (cnt) {
1228 case IFCOUNTER_IPACKETS:
1229 return (sc->ipackets);
1230 case IFCOUNTER_OPACKETS:
1231 return (sc->opackets);
1232 case IFCOUNTER_IBYTES:
1233 return (sc->ibytes);
1234 case IFCOUNTER_OBYTES:
1235 return (sc->obytes);
1236 case IFCOUNTER_IMCASTS:
1237 return (sc->imcasts);
1238 case IFCOUNTER_OMCASTS:
1239 return (sc->omcasts);
1240 case IFCOUNTER_COLLISIONS:
1241 return (0);
1242 case IFCOUNTER_IQDROPS:
1243 return (sc->iqdrops);
1244 case IFCOUNTER_OQDROPS:
1245 return (0);
1246 case IFCOUNTER_IERRORS:
1247 return (sc->ierrors);
1248 default:
1249 return (if_get_counter_default(ifp, cnt));
1250 }
1251 } /* ixgbe_if_get_counter */
1252
1253 /************************************************************************
1254 * ixgbe_if_i2c_req
1255 ************************************************************************/
1256 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1257 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1258 {
1259 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1260 struct ixgbe_hw *hw = &sc->hw;
1261 int i;
1262
1263
1264 if (hw->phy.ops.read_i2c_byte == NULL)
1265 return (ENXIO);
1266 for (i = 0; i < req->len; i++)
1267 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1268 req->dev_addr, &req->data[i]);
1269 return (0);
1270 } /* ixgbe_if_i2c_req */
1271
1272 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1273 * @ctx: iflib context
1274 * @event: event code to check
1275 *
1276 * Defaults to returning false for unknown events.
1277 *
1278 * @returns true if iflib needs to reinit the interface
1279 */
1280 static bool
ixgbe_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1281 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1282 {
1283 switch (event) {
1284 case IFLIB_RESTART_VLAN_CONFIG:
1285 default:
1286 return (false);
1287 }
1288 }
1289
1290 /************************************************************************
1291 * ixgbe_add_media_types
1292 ************************************************************************/
1293 static void
ixgbe_add_media_types(if_ctx_t ctx)1294 ixgbe_add_media_types(if_ctx_t ctx)
1295 {
1296 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1297 struct ixgbe_hw *hw = &sc->hw;
1298 device_t dev = iflib_get_dev(ctx);
1299 u64 layer;
1300
1301 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1302
1303 /* Media types with matching FreeBSD media defines */
1304 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1305 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1306 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1307 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1308 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1309 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1310 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1311 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1312
1313 if (hw->mac.type == ixgbe_mac_X550) {
1314 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1315 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1316 }
1317
1318 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1319 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1320 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1321 NULL);
1322 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1323 }
1324
1325 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1326 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1327 if (hw->phy.multispeed_fiber)
1328 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1329 NULL);
1330 }
1331 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1332 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1333 if (hw->phy.multispeed_fiber)
1334 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1335 NULL);
1336 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1337 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1338 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1339 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1340
1341 #ifdef IFM_ETH_XTYPE
1342 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1343 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1344 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1345 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1346 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1347 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1348 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1349 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1350 #else
1351 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1352 device_printf(dev, "Media supported: 10GbaseKR\n");
1353 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1354 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1355 }
1356 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1357 device_printf(dev, "Media supported: 10GbaseKX4\n");
1358 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1359 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1360 }
1361 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1362 device_printf(dev, "Media supported: 1000baseKX\n");
1363 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1364 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1365 }
1366 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1367 device_printf(dev, "Media supported: 2500baseKX\n");
1368 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1369 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1370 }
1371 #endif
1372 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1373 device_printf(dev, "Media supported: 1000baseBX\n");
1374
1375 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1376 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1377 0, NULL);
1378 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1379 }
1380
1381 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1382 } /* ixgbe_add_media_types */
1383
1384 /************************************************************************
1385 * ixgbe_is_sfp
1386 ************************************************************************/
1387 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1388 ixgbe_is_sfp(struct ixgbe_hw *hw)
1389 {
1390 switch (hw->mac.type) {
1391 case ixgbe_mac_82598EB:
1392 if (hw->phy.type == ixgbe_phy_nl)
1393 return (true);
1394 return (false);
1395 case ixgbe_mac_82599EB:
1396 switch (hw->mac.ops.get_media_type(hw)) {
1397 case ixgbe_media_type_fiber:
1398 case ixgbe_media_type_fiber_qsfp:
1399 return (true);
1400 default:
1401 return (false);
1402 }
1403 case ixgbe_mac_X550EM_x:
1404 case ixgbe_mac_X550EM_a:
1405 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1406 return (true);
1407 return (false);
1408 default:
1409 return (false);
1410 }
1411 } /* ixgbe_is_sfp */
1412
1413 /************************************************************************
1414 * ixgbe_config_link
1415 ************************************************************************/
1416 static void
ixgbe_config_link(if_ctx_t ctx)1417 ixgbe_config_link(if_ctx_t ctx)
1418 {
1419 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1420 struct ixgbe_hw *hw = &sc->hw;
1421 u32 autoneg, err = 0;
1422 bool sfp, negotiate;
1423
1424 sfp = ixgbe_is_sfp(hw);
1425
1426 if (sfp) {
1427 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1428 iflib_admin_intr_deferred(ctx);
1429 } else {
1430 if (hw->mac.ops.check_link)
1431 err = ixgbe_check_link(hw, &sc->link_speed,
1432 &sc->link_up, false);
1433 if (err)
1434 return;
1435 autoneg = hw->phy.autoneg_advertised;
1436 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1437 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1438 &negotiate);
1439 if (err)
1440 return;
1441
1442 if (hw->mac.type == ixgbe_mac_X550 &&
1443 hw->phy.autoneg_advertised == 0) {
1444 /*
1445 * 2.5G and 5G autonegotiation speeds on X550
1446 * are disabled by default due to reported
1447 * interoperability issues with some switches.
1448 *
1449 * The second condition checks if any operations
1450 * involving setting autonegotiation speeds have
1451 * been performed prior to this ixgbe_config_link()
1452 * call.
1453 *
1454 * If hw->phy.autoneg_advertised does not
1455 * equal 0, this means that the user might have
1456 * set autonegotiation speeds via the sysctl
1457 * before bringing the interface up. In this
1458 * case, we should not disable 2.5G and 5G
1459 * since that speeds might be selected by the
1460 * user.
1461 *
1462 * Otherwise (i.e. if hw->phy.autoneg_advertised
1463 * is set to 0), it is the first time we set
1464 * autonegotiation preferences and the default
1465 * set of speeds should exclude 2.5G and 5G.
1466 */
1467 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1468 IXGBE_LINK_SPEED_5GB_FULL);
1469 }
1470
1471 if (hw->mac.ops.setup_link)
1472 err = hw->mac.ops.setup_link(hw, autoneg,
1473 sc->link_up);
1474 }
1475 } /* ixgbe_config_link */
1476
1477 /************************************************************************
1478 * ixgbe_update_stats_counters - Update board statistics counters.
1479 ************************************************************************/
1480 static void
ixgbe_update_stats_counters(struct ixgbe_softc * sc)1481 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1482 {
1483 struct ixgbe_hw *hw = &sc->hw;
1484 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1485 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1486 u32 lxoffrxc;
1487 u64 total_missed_rx = 0;
1488
1489 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1490 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1491 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1492 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1493 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1494
1495 for (int i = 0; i < 16; i++) {
1496 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1497 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1498 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1499 }
1500 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1501 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1502 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1503
1504 /* Hardware workaround, gprc counts missed packets */
1505 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1506 stats->gprc -= missed_rx;
1507
1508 if (hw->mac.type != ixgbe_mac_82598EB) {
1509 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1510 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1511 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1512 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1513 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1514 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1515 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1516 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1517 stats->lxoffrxc += lxoffrxc;
1518 } else {
1519 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1520 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1521 stats->lxoffrxc += lxoffrxc;
1522 /* 82598 only has a counter in the high register */
1523 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1524 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1525 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1526 }
1527
1528 /*
1529 * For watchdog management we need to know if we have been paused
1530 * during the last interval, so capture that here.
1531 */
1532 if (lxoffrxc)
1533 sc->shared->isc_pause_frames = 1;
1534
1535 /*
1536 * Workaround: mprc hardware is incorrectly counting
1537 * broadcasts, so for now we subtract those.
1538 */
1539 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1540 stats->bprc += bprc;
1541 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1542 if (hw->mac.type == ixgbe_mac_82598EB)
1543 stats->mprc -= bprc;
1544
1545 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1546 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1547 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1548 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1549 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1550 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1551
1552 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1553 stats->lxontxc += lxon;
1554 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1555 stats->lxofftxc += lxoff;
1556 total = lxon + lxoff;
1557
1558 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1559 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1560 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1561 stats->gptc -= total;
1562 stats->mptc -= total;
1563 stats->ptc64 -= total;
1564 stats->gotc -= total * ETHER_MIN_LEN;
1565
1566 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1567 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1568 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1569 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1570 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1571 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1572 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1573 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1574 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1575 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1576 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1577 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1578 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1579 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1580 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1581 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1582 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1583 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1584 /* Only read FCOE on 82599 */
1585 if (hw->mac.type != ixgbe_mac_82598EB) {
1586 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1587 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1588 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1589 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1590 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1591 }
1592
1593 /* Fill out the OS statistics structure */
1594 IXGBE_SET_IPACKETS(sc, stats->gprc);
1595 IXGBE_SET_OPACKETS(sc, stats->gptc);
1596 IXGBE_SET_IBYTES(sc, stats->gorc);
1597 IXGBE_SET_OBYTES(sc, stats->gotc);
1598 IXGBE_SET_IMCASTS(sc, stats->mprc);
1599 IXGBE_SET_OMCASTS(sc, stats->mptc);
1600 IXGBE_SET_COLLISIONS(sc, 0);
1601 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1602
1603 /*
1604 * Aggregate following types of errors as RX errors:
1605 * - CRC error count,
1606 * - illegal byte error count,
1607 * - missed packets count,
1608 * - length error count,
1609 * - undersized packets count,
1610 * - fragmented packets count,
1611 * - oversized packets count,
1612 * - jabber count.
1613 */
1614 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1615 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
1616 stats->rjc);
1617 } /* ixgbe_update_stats_counters */
1618
1619 /************************************************************************
1620 * ixgbe_add_hw_stats
1621 *
1622 * Add sysctl variables, one per statistic, to the system.
1623 ************************************************************************/
1624 static void
ixgbe_add_hw_stats(struct ixgbe_softc * sc)1625 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1626 {
1627 device_t dev = iflib_get_dev(sc->ctx);
1628 struct ix_rx_queue *rx_que;
1629 struct ix_tx_queue *tx_que;
1630 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1631 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1632 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1633 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1634 struct sysctl_oid *stat_node, *queue_node;
1635 struct sysctl_oid_list *stat_list, *queue_list;
1636 int i;
1637
1638 #define QUEUE_NAME_LEN 32
1639 char namebuf[QUEUE_NAME_LEN];
1640
1641 /* Driver Statistics */
1642 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1643 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1644 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1645 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1646 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1647 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1648
1649 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
1650 struct tx_ring *txr = &tx_que->txr;
1651 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1652 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1653 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1654 queue_list = SYSCTL_CHILDREN(queue_node);
1655
1656 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1657 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1658 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1659 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1660 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1661 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1662 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1663 CTLFLAG_RD, &txr->tso_tx, "TSO");
1664 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1665 CTLFLAG_RD, &txr->total_packets,
1666 "Queue Packets Transmitted");
1667 }
1668
1669 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
1670 struct rx_ring *rxr = &rx_que->rxr;
1671 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1672 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1673 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1674 queue_list = SYSCTL_CHILDREN(queue_node);
1675
1676 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1677 CTLTYPE_UINT | CTLFLAG_RW,
1678 &sc->rx_queues[i], 0,
1679 ixgbe_sysctl_interrupt_rate_handler, "IU",
1680 "Interrupt Rate");
1681 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1682 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1683 "irqs on this queue");
1684 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1685 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1686 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1687 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1688 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1689 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1690 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1691 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1692 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1693 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1694 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1695 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1696 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1697 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1698 }
1699
1700 /* MAC stats get their own sub node */
1701
1702 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1703 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1704 stat_list = SYSCTL_CHILDREN(stat_node);
1705
1706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1707 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1708 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1709 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1710 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1711 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1713 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1715 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1716 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1717 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1719 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1721 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1722 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1723 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1724
1725 /* Flow Control stats */
1726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1727 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1728 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1729 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1731 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1733 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1734
1735 /* Packet Reception Stats */
1736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1737 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1739 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1740 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1741 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1742 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1743 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1744 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1745 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1746 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1747 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1748 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1749 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1750 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1751 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1752 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1753 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1754 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1755 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1756 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1757 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1758 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1759 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1760 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1761 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1762 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1763 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1764 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1765 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1766 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1767 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1768 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1769 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1770 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1771 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1772 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1773 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1774
1775 /* Packet Transmission Stats */
1776 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1777 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1778 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1779 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1780 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1781 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1782 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1783 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1784 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1785 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1786 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1787 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1788 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1789 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1790 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1791 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1792 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1793 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1794 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1795 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1796 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1797 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1798 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1799 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1800 } /* ixgbe_add_hw_stats */
1801
1802 /************************************************************************
1803 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1804 *
1805 * Retrieves the TDH value from the hardware
1806 ************************************************************************/
1807 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)1808 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1809 {
1810 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1811 int error;
1812 unsigned int val;
1813
1814 if (!txr)
1815 return (0);
1816
1817
1818 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1819 return (EPERM);
1820
1821 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1822 error = sysctl_handle_int(oidp, &val, 0, req);
1823 if (error || !req->newptr)
1824 return error;
1825
1826 return (0);
1827 } /* ixgbe_sysctl_tdh_handler */
1828
1829 /************************************************************************
1830 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1831 *
1832 * Retrieves the TDT value from the hardware
1833 ************************************************************************/
1834 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)1835 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1836 {
1837 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1838 int error;
1839 unsigned int val;
1840
1841 if (!txr)
1842 return (0);
1843
1844 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1845 return (EPERM);
1846
1847 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1848 error = sysctl_handle_int(oidp, &val, 0, req);
1849 if (error || !req->newptr)
1850 return error;
1851
1852 return (0);
1853 } /* ixgbe_sysctl_tdt_handler */
1854
1855 /************************************************************************
1856 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1857 *
1858 * Retrieves the RDH value from the hardware
1859 ************************************************************************/
1860 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)1861 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1862 {
1863 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1864 int error;
1865 unsigned int val;
1866
1867 if (!rxr)
1868 return (0);
1869
1870 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1871 return (EPERM);
1872
1873 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1874 error = sysctl_handle_int(oidp, &val, 0, req);
1875 if (error || !req->newptr)
1876 return error;
1877
1878 return (0);
1879 } /* ixgbe_sysctl_rdh_handler */
1880
1881 /************************************************************************
1882 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1883 *
1884 * Retrieves the RDT value from the hardware
1885 ************************************************************************/
1886 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)1887 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1888 {
1889 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1890 int error;
1891 unsigned int val;
1892
1893 if (!rxr)
1894 return (0);
1895
1896 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1897 return (EPERM);
1898
1899 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1900 error = sysctl_handle_int(oidp, &val, 0, req);
1901 if (error || !req->newptr)
1902 return error;
1903
1904 return (0);
1905 } /* ixgbe_sysctl_rdt_handler */
1906
1907 /************************************************************************
1908 * ixgbe_if_vlan_register
1909 *
1910 * Run via vlan config EVENT, it enables us to use the
1911 * HW Filter table since we can get the vlan id. This
1912 * just creates the entry in the soft version of the
1913 * VFTA, init will repopulate the real table.
1914 ************************************************************************/
1915 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)1916 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1917 {
1918 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1919 u16 index, bit;
1920
1921 index = (vtag >> 5) & 0x7F;
1922 bit = vtag & 0x1F;
1923 sc->shadow_vfta[index] |= (1 << bit);
1924 ++sc->num_vlans;
1925 ixgbe_setup_vlan_hw_support(ctx);
1926 } /* ixgbe_if_vlan_register */
1927
1928 /************************************************************************
1929 * ixgbe_if_vlan_unregister
1930 *
1931 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1932 ************************************************************************/
1933 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)1934 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1935 {
1936 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1937 u16 index, bit;
1938
1939 index = (vtag >> 5) & 0x7F;
1940 bit = vtag & 0x1F;
1941 sc->shadow_vfta[index] &= ~(1 << bit);
1942 --sc->num_vlans;
1943 /* Re-init to load the changes */
1944 ixgbe_setup_vlan_hw_support(ctx);
1945 } /* ixgbe_if_vlan_unregister */
1946
1947 /************************************************************************
1948 * ixgbe_setup_vlan_hw_support
1949 ************************************************************************/
1950 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)1951 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
1952 {
1953 if_t ifp = iflib_get_ifp(ctx);
1954 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1955 struct ixgbe_hw *hw = &sc->hw;
1956 struct rx_ring *rxr;
1957 int i;
1958 u32 ctrl;
1959
1960
1961 /*
1962 * We get here thru init_locked, meaning
1963 * a soft reset, this has already cleared
1964 * the VFTA and other state, so if there
1965 * have been no vlan's registered do nothing.
1966 */
1967 if (sc->num_vlans == 0 || (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
1968 /* Clear the vlan hw flag */
1969 for (i = 0; i < sc->num_rx_queues; i++) {
1970 rxr = &sc->rx_queues[i].rxr;
1971 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1972 if (hw->mac.type != ixgbe_mac_82598EB) {
1973 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1974 ctrl &= ~IXGBE_RXDCTL_VME;
1975 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1976 }
1977 rxr->vtag_strip = false;
1978 }
1979 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1980 /* Enable the Filter Table if enabled */
1981 ctrl |= IXGBE_VLNCTRL_CFIEN;
1982 ctrl &= ~IXGBE_VLNCTRL_VFE;
1983 if (hw->mac.type == ixgbe_mac_82598EB)
1984 ctrl &= ~IXGBE_VLNCTRL_VME;
1985 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1986 return;
1987 }
1988
1989 /* Setup the queues for vlans */
1990 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1991 for (i = 0; i < sc->num_rx_queues; i++) {
1992 rxr = &sc->rx_queues[i].rxr;
1993 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1994 if (hw->mac.type != ixgbe_mac_82598EB) {
1995 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1996 ctrl |= IXGBE_RXDCTL_VME;
1997 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1998 }
1999 rxr->vtag_strip = true;
2000 }
2001 }
2002
2003 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2004 return;
2005 /*
2006 * A soft reset zero's out the VFTA, so
2007 * we need to repopulate it now.
2008 */
2009 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2010 if (sc->shadow_vfta[i] != 0)
2011 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2012 sc->shadow_vfta[i]);
2013
2014 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2015 /* Enable the Filter Table if enabled */
2016 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2017 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2018 ctrl |= IXGBE_VLNCTRL_VFE;
2019 }
2020 if (hw->mac.type == ixgbe_mac_82598EB)
2021 ctrl |= IXGBE_VLNCTRL_VME;
2022 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2023 } /* ixgbe_setup_vlan_hw_support */
2024
2025 /************************************************************************
2026 * ixgbe_get_slot_info
2027 *
2028 * Get the width and transaction speed of
2029 * the slot this adapter is plugged into.
2030 ************************************************************************/
2031 static void
ixgbe_get_slot_info(struct ixgbe_softc * sc)2032 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2033 {
2034 device_t dev = iflib_get_dev(sc->ctx);
2035 struct ixgbe_hw *hw = &sc->hw;
2036 int bus_info_valid = true;
2037 u32 offset;
2038 u16 link;
2039
2040 /* Some devices are behind an internal bridge */
2041 switch (hw->device_id) {
2042 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2043 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2044 goto get_parent_info;
2045 default:
2046 break;
2047 }
2048
2049 ixgbe_get_bus_info(hw);
2050
2051 /*
2052 * Some devices don't use PCI-E, but there is no need
2053 * to display "Unknown" for bus speed and width.
2054 */
2055 switch (hw->mac.type) {
2056 case ixgbe_mac_X550EM_x:
2057 case ixgbe_mac_X550EM_a:
2058 return;
2059 default:
2060 goto display;
2061 }
2062
2063 get_parent_info:
2064 /*
2065 * For the Quad port adapter we need to parse back
2066 * up the PCI tree to find the speed of the expansion
2067 * slot into which this adapter is plugged. A bit more work.
2068 */
2069 dev = device_get_parent(device_get_parent(dev));
2070 #ifdef IXGBE_DEBUG
2071 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2072 pci_get_slot(dev), pci_get_function(dev));
2073 #endif
2074 dev = device_get_parent(device_get_parent(dev));
2075 #ifdef IXGBE_DEBUG
2076 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2077 pci_get_slot(dev), pci_get_function(dev));
2078 #endif
2079 /* Now get the PCI Express Capabilities offset */
2080 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2081 /*
2082 * Hmm...can't get PCI-Express capabilities.
2083 * Falling back to default method.
2084 */
2085 bus_info_valid = false;
2086 ixgbe_get_bus_info(hw);
2087 goto display;
2088 }
2089 /* ...and read the Link Status Register */
2090 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2091 ixgbe_set_pci_config_data_generic(hw, link);
2092
2093 display:
2094 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2095 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2096 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2097 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2098 "Unknown"),
2099 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2100 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2101 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2102 "Unknown"));
2103
2104 if (bus_info_valid) {
2105 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2106 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2107 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2108 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2109 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
2110 }
2111 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2112 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2113 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2114 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
2115 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
2116 }
2117 } else
2118 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2119
2120 return;
2121 } /* ixgbe_get_slot_info */
2122
2123 /************************************************************************
2124 * ixgbe_if_msix_intr_assign
2125 *
2126 * Setup MSI-X Interrupt resources and handlers
2127 ************************************************************************/
2128 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)2129 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2130 {
2131 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2132 struct ix_rx_queue *rx_que = sc->rx_queues;
2133 struct ix_tx_queue *tx_que;
2134 int error, rid, vector = 0;
2135 char buf[16];
2136
2137 /* Admin Que is vector 0*/
2138 rid = vector + 1;
2139 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2140 rid = vector + 1;
2141
2142 snprintf(buf, sizeof(buf), "rxq%d", i);
2143 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2144 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
2145
2146 if (error) {
2147 device_printf(iflib_get_dev(ctx),
2148 "Failed to allocate que int %d err: %d", i, error);
2149 sc->num_rx_queues = i + 1;
2150 goto fail;
2151 }
2152
2153 rx_que->msix = vector;
2154 }
2155 for (int i = 0; i < sc->num_tx_queues; i++) {
2156 snprintf(buf, sizeof(buf), "txq%d", i);
2157 tx_que = &sc->tx_queues[i];
2158 tx_que->msix = i % sc->num_rx_queues;
2159 iflib_softirq_alloc_generic(ctx,
2160 &sc->rx_queues[tx_que->msix].que_irq,
2161 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2162 }
2163 rid = vector + 1;
2164 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2165 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2166 if (error) {
2167 device_printf(iflib_get_dev(ctx),
2168 "Failed to register admin handler");
2169 return (error);
2170 }
2171
2172 sc->vector = vector;
2173
2174 return (0);
2175 fail:
2176 iflib_irq_free(ctx, &sc->irq);
2177 rx_que = sc->rx_queues;
2178 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2179 iflib_irq_free(ctx, &rx_que->que_irq);
2180
2181 return (error);
2182 } /* ixgbe_if_msix_intr_assign */
2183
2184 static inline void
ixgbe_perform_aim(struct ixgbe_softc * sc,struct ix_rx_queue * que)2185 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2186 {
2187 uint32_t newitr = 0;
2188 struct rx_ring *rxr = &que->rxr;
2189 /* FIXME struct tx_ring *txr = ... ->txr; */
2190
2191 /*
2192 * Do Adaptive Interrupt Moderation:
2193 * - Write out last calculated setting
2194 * - Calculate based on average size over
2195 * the last interval.
2196 */
2197 if (que->eitr_setting) {
2198 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2199 que->eitr_setting);
2200 }
2201
2202 que->eitr_setting = 0;
2203 /* Idle, do nothing */
2204 if (rxr->bytes == 0) {
2205 /* FIXME && txr->bytes == 0 */
2206 return;
2207 }
2208
2209 if ((rxr->bytes) && (rxr->packets))
2210 newitr = rxr->bytes / rxr->packets;
2211 /* FIXME for transmit accounting
2212 * if ((txr->bytes) && (txr->packets))
2213 * newitr = txr->bytes/txr->packets;
2214 * if ((rxr->bytes) && (rxr->packets))
2215 * newitr = max(newitr, (rxr->bytes / rxr->packets));
2216 */
2217
2218 newitr += 24; /* account for hardware frame, crc */
2219 /* set an upper boundary */
2220 newitr = min(newitr, 3000);
2221
2222 /* Be nice to the mid range */
2223 if ((newitr > 300) && (newitr < 1200)) {
2224 newitr = (newitr / 3);
2225 } else {
2226 newitr = (newitr / 2);
2227 }
2228
2229 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2230 newitr |= newitr << 16;
2231 } else {
2232 newitr |= IXGBE_EITR_CNT_WDIS;
2233 }
2234
2235 /* save for next interrupt */
2236 que->eitr_setting = newitr;
2237
2238 /* Reset state */
2239 /* FIXME txr->bytes = 0; */
2240 /* FIXME txr->packets = 0; */
2241 rxr->bytes = 0;
2242 rxr->packets = 0;
2243
2244 return;
2245 }
2246
2247 /*********************************************************************
2248 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2249 **********************************************************************/
2250 static int
ixgbe_msix_que(void * arg)2251 ixgbe_msix_que(void *arg)
2252 {
2253 struct ix_rx_queue *que = arg;
2254 struct ixgbe_softc *sc = que->sc;
2255 if_t ifp = iflib_get_ifp(que->sc->ctx);
2256
2257 /* Protect against spurious interrupts */
2258 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2259 return (FILTER_HANDLED);
2260
2261 ixgbe_disable_queue(sc, que->msix);
2262 ++que->irqs;
2263
2264 /* Check for AIM */
2265 if (sc->enable_aim) {
2266 ixgbe_perform_aim(sc, que);
2267 }
2268
2269 return (FILTER_SCHEDULE_THREAD);
2270 } /* ixgbe_msix_que */
2271
2272 /************************************************************************
2273 * ixgbe_media_status - Media Ioctl callback
2274 *
2275 * Called whenever the user queries the status of
2276 * the interface using ifconfig.
2277 ************************************************************************/
2278 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2279 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2280 {
2281 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2282 struct ixgbe_hw *hw = &sc->hw;
2283 int layer;
2284
2285 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2286
2287 ifmr->ifm_status = IFM_AVALID;
2288 ifmr->ifm_active = IFM_ETHER;
2289
2290 if (!sc->link_active)
2291 return;
2292
2293 ifmr->ifm_status |= IFM_ACTIVE;
2294 layer = sc->phy_layer;
2295
2296 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2297 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2298 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2299 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2300 switch (sc->link_speed) {
2301 case IXGBE_LINK_SPEED_10GB_FULL:
2302 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2303 break;
2304 case IXGBE_LINK_SPEED_1GB_FULL:
2305 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2306 break;
2307 case IXGBE_LINK_SPEED_100_FULL:
2308 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2309 break;
2310 case IXGBE_LINK_SPEED_10_FULL:
2311 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2312 break;
2313 }
2314 if (hw->mac.type == ixgbe_mac_X550)
2315 switch (sc->link_speed) {
2316 case IXGBE_LINK_SPEED_5GB_FULL:
2317 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2318 break;
2319 case IXGBE_LINK_SPEED_2_5GB_FULL:
2320 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2321 break;
2322 }
2323 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2324 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2325 switch (sc->link_speed) {
2326 case IXGBE_LINK_SPEED_10GB_FULL:
2327 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2328 break;
2329 case IXGBE_LINK_SPEED_1GB_FULL:
2330 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2331 break;
2332 }
2333 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2334 switch (sc->link_speed) {
2335 case IXGBE_LINK_SPEED_10GB_FULL:
2336 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2337 break;
2338 case IXGBE_LINK_SPEED_1GB_FULL:
2339 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2340 break;
2341 }
2342 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2343 switch (sc->link_speed) {
2344 case IXGBE_LINK_SPEED_10GB_FULL:
2345 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2346 break;
2347 case IXGBE_LINK_SPEED_1GB_FULL:
2348 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2349 break;
2350 }
2351 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2352 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2353 switch (sc->link_speed) {
2354 case IXGBE_LINK_SPEED_10GB_FULL:
2355 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2356 break;
2357 case IXGBE_LINK_SPEED_1GB_FULL:
2358 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2359 break;
2360 }
2361 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2362 switch (sc->link_speed) {
2363 case IXGBE_LINK_SPEED_10GB_FULL:
2364 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2365 break;
2366 }
2367 /*
2368 * XXX: These need to use the proper media types once
2369 * they're added.
2370 */
2371 #ifndef IFM_ETH_XTYPE
2372 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2373 switch (sc->link_speed) {
2374 case IXGBE_LINK_SPEED_10GB_FULL:
2375 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2376 break;
2377 case IXGBE_LINK_SPEED_2_5GB_FULL:
2378 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2379 break;
2380 case IXGBE_LINK_SPEED_1GB_FULL:
2381 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2382 break;
2383 }
2384 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2385 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2386 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2387 switch (sc->link_speed) {
2388 case IXGBE_LINK_SPEED_10GB_FULL:
2389 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2390 break;
2391 case IXGBE_LINK_SPEED_2_5GB_FULL:
2392 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2393 break;
2394 case IXGBE_LINK_SPEED_1GB_FULL:
2395 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2396 break;
2397 }
2398 #else
2399 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2400 switch (sc->link_speed) {
2401 case IXGBE_LINK_SPEED_10GB_FULL:
2402 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2403 break;
2404 case IXGBE_LINK_SPEED_2_5GB_FULL:
2405 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2406 break;
2407 case IXGBE_LINK_SPEED_1GB_FULL:
2408 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2409 break;
2410 }
2411 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2412 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2413 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2414 switch (sc->link_speed) {
2415 case IXGBE_LINK_SPEED_10GB_FULL:
2416 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2417 break;
2418 case IXGBE_LINK_SPEED_2_5GB_FULL:
2419 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2420 break;
2421 case IXGBE_LINK_SPEED_1GB_FULL:
2422 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2423 break;
2424 }
2425 #endif
2426
2427 /* If nothing is recognized... */
2428 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2429 ifmr->ifm_active |= IFM_UNKNOWN;
2430
2431 /* Display current flow control setting used on link */
2432 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2433 hw->fc.current_mode == ixgbe_fc_full)
2434 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2435 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2436 hw->fc.current_mode == ixgbe_fc_full)
2437 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2438 } /* ixgbe_media_status */
2439
2440 /************************************************************************
2441 * ixgbe_media_change - Media Ioctl callback
2442 *
2443 * Called when the user changes speed/duplex using
2444 * media/mediopt option with ifconfig.
2445 ************************************************************************/
2446 static int
ixgbe_if_media_change(if_ctx_t ctx)2447 ixgbe_if_media_change(if_ctx_t ctx)
2448 {
2449 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2450 struct ifmedia *ifm = iflib_get_media(ctx);
2451 struct ixgbe_hw *hw = &sc->hw;
2452 ixgbe_link_speed speed = 0;
2453
2454 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2455
2456 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2457 return (EINVAL);
2458
2459 if (hw->phy.media_type == ixgbe_media_type_backplane)
2460 return (EPERM);
2461
2462 /*
2463 * We don't actually need to check against the supported
2464 * media types of the adapter; ifmedia will take care of
2465 * that for us.
2466 */
2467 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2468 case IFM_AUTO:
2469 case IFM_10G_T:
2470 speed |= IXGBE_LINK_SPEED_100_FULL;
2471 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2472 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2473 break;
2474 case IFM_10G_LRM:
2475 case IFM_10G_LR:
2476 #ifndef IFM_ETH_XTYPE
2477 case IFM_10G_SR: /* KR, too */
2478 case IFM_10G_CX4: /* KX4 */
2479 #else
2480 case IFM_10G_KR:
2481 case IFM_10G_KX4:
2482 #endif
2483 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2484 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2485 break;
2486 #ifndef IFM_ETH_XTYPE
2487 case IFM_1000_CX: /* KX */
2488 #else
2489 case IFM_1000_KX:
2490 #endif
2491 case IFM_1000_LX:
2492 case IFM_1000_SX:
2493 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2494 break;
2495 case IFM_1000_T:
2496 speed |= IXGBE_LINK_SPEED_100_FULL;
2497 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2498 break;
2499 case IFM_10G_TWINAX:
2500 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2501 break;
2502 case IFM_5000_T:
2503 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2504 break;
2505 case IFM_2500_T:
2506 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2507 break;
2508 case IFM_100_TX:
2509 speed |= IXGBE_LINK_SPEED_100_FULL;
2510 break;
2511 case IFM_10_T:
2512 speed |= IXGBE_LINK_SPEED_10_FULL;
2513 break;
2514 default:
2515 goto invalid;
2516 }
2517
2518 hw->mac.autotry_restart = true;
2519 hw->mac.ops.setup_link(hw, speed, true);
2520 sc->advertise =
2521 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
2522 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
2523 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2524 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
2525 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
2526 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
2527
2528 return (0);
2529
2530 invalid:
2531 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2532
2533 return (EINVAL);
2534 } /* ixgbe_if_media_change */
2535
2536 /************************************************************************
2537 * ixgbe_set_promisc
2538 ************************************************************************/
2539 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2540 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2541 {
2542 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2543 if_t ifp = iflib_get_ifp(ctx);
2544 u32 rctl;
2545 int mcnt = 0;
2546
2547 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2548 rctl &= (~IXGBE_FCTRL_UPE);
2549 if (if_getflags(ifp) & IFF_ALLMULTI)
2550 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2551 else {
2552 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
2553 }
2554 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2555 rctl &= (~IXGBE_FCTRL_MPE);
2556 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2557
2558 if (if_getflags(ifp) & IFF_PROMISC) {
2559 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2560 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2561 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
2562 rctl |= IXGBE_FCTRL_MPE;
2563 rctl &= ~IXGBE_FCTRL_UPE;
2564 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2565 }
2566 return (0);
2567 } /* ixgbe_if_promisc_set */
2568
2569 /************************************************************************
2570 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2571 ************************************************************************/
2572 static int
ixgbe_msix_link(void * arg)2573 ixgbe_msix_link(void *arg)
2574 {
2575 struct ixgbe_softc *sc = arg;
2576 struct ixgbe_hw *hw = &sc->hw;
2577 u32 eicr, eicr_mask;
2578 s32 retval;
2579
2580 ++sc->link_irq;
2581
2582 /* Pause other interrupts */
2583 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2584
2585 /* First get the cause */
2586 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2587 /* Be sure the queue bits are not cleared */
2588 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2589 /* Clear interrupt with write */
2590 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2591
2592 /* Link status change */
2593 if (eicr & IXGBE_EICR_LSC) {
2594 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2595 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2596 }
2597
2598 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2599 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2600 (eicr & IXGBE_EICR_FLOW_DIR)) {
2601 /* This is probably overkill :) */
2602 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2603 return (FILTER_HANDLED);
2604 /* Disable the interrupt */
2605 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2606 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2607 } else
2608 if (eicr & IXGBE_EICR_ECC) {
2609 device_printf(iflib_get_dev(sc->ctx),
2610 "Received ECC Err, initiating reset\n");
2611 hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2612 ixgbe_reset_hw(hw);
2613 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2614 }
2615
2616 /* Check for over temp condition */
2617 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2618 switch (sc->hw.mac.type) {
2619 case ixgbe_mac_X550EM_a:
2620 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2621 break;
2622 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2623 IXGBE_EICR_GPI_SDP0_X550EM_a);
2624 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2625 IXGBE_EICR_GPI_SDP0_X550EM_a);
2626 retval = hw->phy.ops.check_overtemp(hw);
2627 if (retval != IXGBE_ERR_OVERTEMP)
2628 break;
2629 device_printf(iflib_get_dev(sc->ctx),
2630 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2631 device_printf(iflib_get_dev(sc->ctx),
2632 "System shutdown required!\n");
2633 break;
2634 default:
2635 if (!(eicr & IXGBE_EICR_TS))
2636 break;
2637 retval = hw->phy.ops.check_overtemp(hw);
2638 if (retval != IXGBE_ERR_OVERTEMP)
2639 break;
2640 device_printf(iflib_get_dev(sc->ctx),
2641 "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2642 device_printf(iflib_get_dev(sc->ctx),
2643 "System shutdown required!\n");
2644 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2645 break;
2646 }
2647 }
2648
2649 /* Check for VF message */
2650 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2651 (eicr & IXGBE_EICR_MAILBOX))
2652 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2653 }
2654
2655 if (ixgbe_is_sfp(hw)) {
2656 /* Pluggable optics-related interrupt */
2657 if (hw->mac.type >= ixgbe_mac_X540)
2658 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2659 else
2660 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2661
2662 if (eicr & eicr_mask) {
2663 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2664 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2665 }
2666
2667 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2668 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2669 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2670 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2671 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2672 }
2673 }
2674
2675 /* Check for fan failure */
2676 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2677 ixgbe_check_fan_failure(sc, eicr, true);
2678 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2679 }
2680
2681 /* External PHY interrupt */
2682 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2683 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2684 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2685 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2686 }
2687
2688 return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2689 } /* ixgbe_msix_link */
2690
2691 /************************************************************************
2692 * ixgbe_sysctl_interrupt_rate_handler
2693 ************************************************************************/
2694 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)2695 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2696 {
2697 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2698 int error;
2699 unsigned int reg, usec, rate;
2700
2701 if (atomic_load_acq_int(&que->sc->recovery_mode))
2702 return (EPERM);
2703
2704 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2705 usec = ((reg & 0x0FF8) >> 3);
2706 if (usec > 0)
2707 rate = 500000 / usec;
2708 else
2709 rate = 0;
2710 error = sysctl_handle_int(oidp, &rate, 0, req);
2711 if (error || !req->newptr)
2712 return error;
2713 reg &= ~0xfff; /* default, no limitation */
2714 ixgbe_max_interrupt_rate = 0;
2715 if (rate > 0 && rate < 500000) {
2716 if (rate < 1000)
2717 rate = 1000;
2718 ixgbe_max_interrupt_rate = rate;
2719 reg |= ((4000000/rate) & 0xff8);
2720 }
2721 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2722
2723 return (0);
2724 } /* ixgbe_sysctl_interrupt_rate_handler */
2725
2726 /************************************************************************
2727 * ixgbe_add_device_sysctls
2728 ************************************************************************/
2729 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)2730 ixgbe_add_device_sysctls(if_ctx_t ctx)
2731 {
2732 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2733 device_t dev = iflib_get_dev(ctx);
2734 struct ixgbe_hw *hw = &sc->hw;
2735 struct sysctl_oid_list *child;
2736 struct sysctl_ctx_list *ctx_list;
2737
2738 ctx_list = device_get_sysctl_ctx(dev);
2739 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2740
2741 /* Sysctls for all devices */
2742 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2743 CTLTYPE_INT | CTLFLAG_RW,
2744 sc, 0, ixgbe_sysctl_flowcntl, "I",
2745 IXGBE_SYSCTL_DESC_SET_FC);
2746
2747 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2748 CTLTYPE_INT | CTLFLAG_RW,
2749 sc, 0, ixgbe_sysctl_advertise, "I",
2750 IXGBE_SYSCTL_DESC_ADV_SPEED);
2751
2752 sc->enable_aim = ixgbe_enable_aim;
2753 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2754 &sc->enable_aim, 0, "Interrupt Moderation");
2755
2756 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2757 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2758 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2759
2760 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2761 "tso_tcp_flags_mask_first_segment",
2762 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2763 sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2764 "TSO TCP flags mask for first segment");
2765
2766 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2767 "tso_tcp_flags_mask_middle_segment",
2768 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2769 sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2770 "TSO TCP flags mask for middle segment");
2771
2772 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2773 "tso_tcp_flags_mask_last_segment",
2774 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2775 sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2776 "TSO TCP flags mask for last segment");
2777
2778 #ifdef IXGBE_DEBUG
2779 /* testing sysctls (for all devices) */
2780 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2781 CTLTYPE_INT | CTLFLAG_RW,
2782 sc, 0, ixgbe_sysctl_power_state,
2783 "I", "PCI Power State");
2784
2785 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2786 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2787 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2788 #endif
2789 /* for X550 series devices */
2790 if (hw->mac.type >= ixgbe_mac_X550)
2791 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2792 CTLTYPE_U16 | CTLFLAG_RW,
2793 sc, 0, ixgbe_sysctl_dmac,
2794 "I", "DMA Coalesce");
2795
2796 /* for WoL-capable devices */
2797 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2798 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2799 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2800 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2801
2802 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2803 CTLTYPE_U32 | CTLFLAG_RW,
2804 sc, 0, ixgbe_sysctl_wufc,
2805 "I", "Enable/Disable Wake Up Filters");
2806 }
2807
2808 /* for X552/X557-AT devices */
2809 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2810 struct sysctl_oid *phy_node;
2811 struct sysctl_oid_list *phy_list;
2812
2813 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2814 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
2815 phy_list = SYSCTL_CHILDREN(phy_node);
2816
2817 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2818 CTLTYPE_U16 | CTLFLAG_RD,
2819 sc, 0, ixgbe_sysctl_phy_temp,
2820 "I", "Current External PHY Temperature (Celsius)");
2821
2822 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2823 "overtemp_occurred",
2824 CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2825 ixgbe_sysctl_phy_overtemp_occurred, "I",
2826 "External PHY High Temperature Event Occurred");
2827 }
2828
2829 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2830 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2831 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2832 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2833 }
2834 } /* ixgbe_add_device_sysctls */
2835
2836 /************************************************************************
2837 * ixgbe_allocate_pci_resources
2838 ************************************************************************/
2839 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)2840 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2841 {
2842 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2843 device_t dev = iflib_get_dev(ctx);
2844 int rid;
2845
2846 rid = PCIR_BAR(0);
2847 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2848 RF_ACTIVE);
2849
2850 if (!(sc->pci_mem)) {
2851 device_printf(dev, "Unable to allocate bus resource: memory\n");
2852 return (ENXIO);
2853 }
2854
2855 /* Save bus_space values for READ/WRITE_REG macros */
2856 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2857 sc->osdep.mem_bus_space_handle =
2858 rman_get_bushandle(sc->pci_mem);
2859 /* Set hw values for shared code */
2860 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2861
2862 return (0);
2863 } /* ixgbe_allocate_pci_resources */
2864
2865 /************************************************************************
2866 * ixgbe_detach - Device removal routine
2867 *
2868 * Called when the driver is being removed.
2869 * Stops the adapter and deallocates all the resources
2870 * that were allocated for driver operation.
2871 *
2872 * return 0 on success, positive on failure
2873 ************************************************************************/
2874 static int
ixgbe_if_detach(if_ctx_t ctx)2875 ixgbe_if_detach(if_ctx_t ctx)
2876 {
2877 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2878 device_t dev = iflib_get_dev(ctx);
2879 u32 ctrl_ext;
2880
2881 INIT_DEBUGOUT("ixgbe_detach: begin");
2882
2883 if (ixgbe_pci_iov_detach(dev) != 0) {
2884 device_printf(dev, "SR-IOV in use; detach first.\n");
2885 return (EBUSY);
2886 }
2887
2888 ixgbe_setup_low_power_mode(ctx);
2889
2890 /* let hardware know driver is unloading */
2891 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2892 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2893 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2894
2895 callout_drain(&sc->fw_mode_timer);
2896
2897 ixgbe_free_pci_resources(ctx);
2898 free(sc->mta, M_IXGBE);
2899
2900 return (0);
2901 } /* ixgbe_if_detach */
2902
2903 /************************************************************************
2904 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2905 *
2906 * Prepare the adapter/port for LPLU and/or WoL
2907 ************************************************************************/
2908 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)2909 ixgbe_setup_low_power_mode(if_ctx_t ctx)
2910 {
2911 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2912 struct ixgbe_hw *hw = &sc->hw;
2913 device_t dev = iflib_get_dev(ctx);
2914 s32 error = 0;
2915
2916 if (!hw->wol_enabled)
2917 ixgbe_set_phy_power(hw, false);
2918
2919 /* Limit power management flow to X550EM baseT */
2920 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2921 hw->phy.ops.enter_lplu) {
2922 /* Turn off support for APM wakeup. (Using ACPI instead) */
2923 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
2924 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
2925
2926 /*
2927 * Clear Wake Up Status register to prevent any previous wakeup
2928 * events from waking us up immediately after we suspend.
2929 */
2930 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2931
2932 /*
2933 * Program the Wakeup Filter Control register with user filter
2934 * settings
2935 */
2936 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
2937
2938 /* Enable wakeups and power management in Wakeup Control */
2939 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2940 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2941
2942 /* X550EM baseT adapters need a special LPLU flow */
2943 hw->phy.reset_disable = true;
2944 ixgbe_if_stop(ctx);
2945 error = hw->phy.ops.enter_lplu(hw);
2946 if (error)
2947 device_printf(dev, "Error entering LPLU: %d\n", error);
2948 hw->phy.reset_disable = false;
2949 } else {
2950 /* Just stop for other adapters */
2951 ixgbe_if_stop(ctx);
2952 }
2953
2954 return error;
2955 } /* ixgbe_setup_low_power_mode */
2956
2957 /************************************************************************
2958 * ixgbe_shutdown - Shutdown entry point
2959 ************************************************************************/
2960 static int
ixgbe_if_shutdown(if_ctx_t ctx)2961 ixgbe_if_shutdown(if_ctx_t ctx)
2962 {
2963 int error = 0;
2964
2965 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2966
2967 error = ixgbe_setup_low_power_mode(ctx);
2968
2969 return (error);
2970 } /* ixgbe_if_shutdown */
2971
2972 /************************************************************************
2973 * ixgbe_suspend
2974 *
2975 * From D0 to D3
2976 ************************************************************************/
2977 static int
ixgbe_if_suspend(if_ctx_t ctx)2978 ixgbe_if_suspend(if_ctx_t ctx)
2979 {
2980 int error = 0;
2981
2982 INIT_DEBUGOUT("ixgbe_suspend: begin");
2983
2984 error = ixgbe_setup_low_power_mode(ctx);
2985
2986 return (error);
2987 } /* ixgbe_if_suspend */
2988
2989 /************************************************************************
2990 * ixgbe_resume
2991 *
2992 * From D3 to D0
2993 ************************************************************************/
2994 static int
ixgbe_if_resume(if_ctx_t ctx)2995 ixgbe_if_resume(if_ctx_t ctx)
2996 {
2997 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2998 device_t dev = iflib_get_dev(ctx);
2999 if_t ifp = iflib_get_ifp(ctx);
3000 struct ixgbe_hw *hw = &sc->hw;
3001 u32 wus;
3002
3003 INIT_DEBUGOUT("ixgbe_resume: begin");
3004
3005 /* Read & clear WUS register */
3006 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3007 if (wus)
3008 device_printf(dev, "Woken up by (WUS): %#010x\n",
3009 IXGBE_READ_REG(hw, IXGBE_WUS));
3010 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3011 /* And clear WUFC until next low-power transition */
3012 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3013
3014 /*
3015 * Required after D3->D0 transition;
3016 * will re-advertise all previous advertised speeds
3017 */
3018 if (if_getflags(ifp) & IFF_UP)
3019 ixgbe_if_init(ctx);
3020
3021 return (0);
3022 } /* ixgbe_if_resume */
3023
3024 /************************************************************************
3025 * ixgbe_if_mtu_set - Ioctl mtu entry point
3026 *
3027 * Return 0 on success, EINVAL on failure
3028 ************************************************************************/
3029 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)3030 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3031 {
3032 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3033 int error = 0;
3034
3035 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3036
3037 if (mtu > IXGBE_MAX_MTU) {
3038 error = EINVAL;
3039 } else {
3040 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3041 }
3042
3043 return error;
3044 } /* ixgbe_if_mtu_set */
3045
3046 /************************************************************************
3047 * ixgbe_if_crcstrip_set
3048 ************************************************************************/
3049 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)3050 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3051 {
3052 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3053 struct ixgbe_hw *hw = &sc->hw;
3054 /* crc stripping is set in two places:
3055 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3056 * IXGBE_RDRXCTL (set by the original driver in
3057 * ixgbe_setup_hw_rsc() called in init_locked.
3058 * We disable the setting when netmap is compiled in).
3059 * We update the values here, but also in ixgbe.c because
3060 * init_locked sometimes is called outside our control.
3061 */
3062 uint32_t hl, rxc;
3063
3064 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3065 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3066 #ifdef NETMAP
3067 if (netmap_verbose)
3068 D("%s read HLREG 0x%x rxc 0x%x",
3069 onoff ? "enter" : "exit", hl, rxc);
3070 #endif
3071 /* hw requirements ... */
3072 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3073 rxc |= IXGBE_RDRXCTL_RSCACKC;
3074 if (onoff && !crcstrip) {
3075 /* keep the crc. Fast rx */
3076 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3077 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3078 } else {
3079 /* reset default mode */
3080 hl |= IXGBE_HLREG0_RXCRCSTRP;
3081 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3082 }
3083 #ifdef NETMAP
3084 if (netmap_verbose)
3085 D("%s write HLREG 0x%x rxc 0x%x",
3086 onoff ? "enter" : "exit", hl, rxc);
3087 #endif
3088 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3089 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3090 } /* ixgbe_if_crcstrip_set */
3091
3092 /*********************************************************************
3093 * ixgbe_if_init - Init entry point
3094 *
3095 * Used in two ways: It is used by the stack as an init
3096 * entry point in network interface structure. It is also
3097 * used by the driver as a hw/sw initialization routine to
3098 * get to a consistent state.
3099 *
3100 * Return 0 on success, positive on failure
3101 **********************************************************************/
3102 void
ixgbe_if_init(if_ctx_t ctx)3103 ixgbe_if_init(if_ctx_t ctx)
3104 {
3105 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3106 if_t ifp = iflib_get_ifp(ctx);
3107 device_t dev = iflib_get_dev(ctx);
3108 struct ixgbe_hw *hw = &sc->hw;
3109 struct ix_rx_queue *rx_que;
3110 struct ix_tx_queue *tx_que;
3111 u32 txdctl, mhadd;
3112 u32 rxdctl, rxctrl;
3113 u32 ctrl_ext;
3114
3115 int i, j, err;
3116
3117 INIT_DEBUGOUT("ixgbe_if_init: begin");
3118
3119 /* Queue indices may change with IOV mode */
3120 ixgbe_align_all_queue_indices(sc);
3121
3122 /* reprogram the RAR[0] in case user changed it. */
3123 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3124
3125 /* Get the latest mac address, User can use a LAA */
3126 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3127 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3128 hw->addr_ctrl.rar_used_count = 1;
3129
3130 ixgbe_init_hw(hw);
3131
3132 ixgbe_initialize_iov(sc);
3133
3134 ixgbe_initialize_transmit_units(ctx);
3135
3136 /* Setup Multicast table */
3137 ixgbe_if_multi_set(ctx);
3138
3139 /* Determine the correct mbuf pool, based on frame size */
3140 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3141
3142 /* Configure RX settings */
3143 ixgbe_initialize_receive_units(ctx);
3144
3145 /*
3146 * Initialize variable holding task enqueue requests
3147 * from MSI-X interrupts
3148 */
3149 sc->task_requests = 0;
3150
3151 /* Enable SDP & MSI-X interrupts based on adapter */
3152 ixgbe_config_gpie(sc);
3153
3154 /* Set MTU size */
3155 if (if_getmtu(ifp) > ETHERMTU) {
3156 /* aka IXGBE_MAXFRS on 82599 and newer */
3157 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3158 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3159 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3160 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3161 }
3162
3163 /* Now enable all the queues */
3164 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
3165 struct tx_ring *txr = &tx_que->txr;
3166
3167 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3168 txdctl |= IXGBE_TXDCTL_ENABLE;
3169 /* Set WTHRESH to 8, burst writeback */
3170 txdctl |= (8 << 16);
3171 /*
3172 * When the internal queue falls below PTHRESH (32),
3173 * start prefetching as long as there are at least
3174 * HTHRESH (1) buffers ready. The values are taken
3175 * from the Intel linux driver 3.8.21.
3176 * Prefetching enables tx line rate even with 1 queue.
3177 */
3178 txdctl |= (32 << 0) | (1 << 8);
3179 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3180 }
3181
3182 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
3183 struct rx_ring *rxr = &rx_que->rxr;
3184
3185 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3186 if (hw->mac.type == ixgbe_mac_82598EB) {
3187 /*
3188 * PTHRESH = 21
3189 * HTHRESH = 4
3190 * WTHRESH = 8
3191 */
3192 rxdctl &= ~0x3FFFFF;
3193 rxdctl |= 0x080420;
3194 }
3195 rxdctl |= IXGBE_RXDCTL_ENABLE;
3196 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3197 for (j = 0; j < 10; j++) {
3198 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3199 IXGBE_RXDCTL_ENABLE)
3200 break;
3201 else
3202 msec_delay(1);
3203 }
3204 wmb();
3205 }
3206
3207 /* Enable Receive engine */
3208 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3209 if (hw->mac.type == ixgbe_mac_82598EB)
3210 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3211 rxctrl |= IXGBE_RXCTRL_RXEN;
3212 ixgbe_enable_rx_dma(hw, rxctrl);
3213
3214 /* Set up MSI/MSI-X routing */
3215 if (ixgbe_enable_msix) {
3216 ixgbe_configure_ivars(sc);
3217 /* Set up auto-mask */
3218 if (hw->mac.type == ixgbe_mac_82598EB)
3219 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3220 else {
3221 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3222 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3223 }
3224 } else { /* Simple settings for Legacy/MSI */
3225 ixgbe_set_ivar(sc, 0, 0, 0);
3226 ixgbe_set_ivar(sc, 0, 0, 1);
3227 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3228 }
3229
3230 ixgbe_init_fdir(sc);
3231
3232 /*
3233 * Check on any SFP devices that
3234 * need to be kick-started
3235 */
3236 if (hw->phy.type == ixgbe_phy_none) {
3237 err = hw->phy.ops.identify(hw);
3238 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3239 device_printf(dev,
3240 "Unsupported SFP+ module type was detected.\n");
3241 return;
3242 }
3243 }
3244
3245 /* Set moderation on the Link interrupt */
3246 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3247
3248 /* Enable power to the phy. */
3249 ixgbe_set_phy_power(hw, true);
3250
3251 /* Config/Enable Link */
3252 ixgbe_config_link(ctx);
3253
3254 /* Hardware Packet Buffer & Flow Control setup */
3255 ixgbe_config_delay_values(sc);
3256
3257 /* Initialize the FC settings */
3258 ixgbe_start_hw(hw);
3259
3260 /* Set up VLAN support and filter */
3261 ixgbe_setup_vlan_hw_support(ctx);
3262
3263 /* Setup DMA Coalescing */
3264 ixgbe_config_dmac(sc);
3265
3266 /* And now turn on interrupts */
3267 ixgbe_if_enable_intr(ctx);
3268
3269 /* Enable the use of the MBX by the VF's */
3270 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3271 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3272 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3273 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3274 }
3275
3276 } /* ixgbe_init_locked */
3277
3278 /************************************************************************
3279 * ixgbe_set_ivar
3280 *
3281 * Setup the correct IVAR register for a particular MSI-X interrupt
3282 * (yes this is all very magic and confusing :)
3283 * - entry is the register array entry
3284 * - vector is the MSI-X vector for this queue
3285 * - type is RX/TX/MISC
3286 ************************************************************************/
3287 static void
ixgbe_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)3288 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3289 {
3290 struct ixgbe_hw *hw = &sc->hw;
3291 u32 ivar, index;
3292
3293 vector |= IXGBE_IVAR_ALLOC_VAL;
3294
3295 switch (hw->mac.type) {
3296 case ixgbe_mac_82598EB:
3297 if (type == -1)
3298 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3299 else
3300 entry += (type * 64);
3301 index = (entry >> 2) & 0x1F;
3302 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3303 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3304 ivar |= (vector << (8 * (entry & 0x3)));
3305 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3306 break;
3307 case ixgbe_mac_82599EB:
3308 case ixgbe_mac_X540:
3309 case ixgbe_mac_X550:
3310 case ixgbe_mac_X550EM_x:
3311 case ixgbe_mac_X550EM_a:
3312 if (type == -1) { /* MISC IVAR */
3313 index = (entry & 1) * 8;
3314 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3315 ivar &= ~(0xFF << index);
3316 ivar |= (vector << index);
3317 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3318 } else { /* RX/TX IVARS */
3319 index = (16 * (entry & 1)) + (8 * type);
3320 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3321 ivar &= ~(0xFF << index);
3322 ivar |= (vector << index);
3323 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3324 }
3325 default:
3326 break;
3327 }
3328 } /* ixgbe_set_ivar */
3329
3330 /************************************************************************
3331 * ixgbe_configure_ivars
3332 ************************************************************************/
3333 static void
ixgbe_configure_ivars(struct ixgbe_softc * sc)3334 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3335 {
3336 struct ix_rx_queue *rx_que = sc->rx_queues;
3337 struct ix_tx_queue *tx_que = sc->tx_queues;
3338 u32 newitr;
3339
3340 if (ixgbe_max_interrupt_rate > 0)
3341 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3342 else {
3343 /*
3344 * Disable DMA coalescing if interrupt moderation is
3345 * disabled.
3346 */
3347 sc->dmac = 0;
3348 newitr = 0;
3349 }
3350
3351 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3352 struct rx_ring *rxr = &rx_que->rxr;
3353
3354 /* First the RX queue entry */
3355 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3356
3357 /* Set an Initial EITR value */
3358 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3359 }
3360 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3361 struct tx_ring *txr = &tx_que->txr;
3362
3363 /* ... and the TX */
3364 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3365 }
3366 /* For the Link interrupt */
3367 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3368 } /* ixgbe_configure_ivars */
3369
3370 /************************************************************************
3371 * ixgbe_config_gpie
3372 ************************************************************************/
3373 static void
ixgbe_config_gpie(struct ixgbe_softc * sc)3374 ixgbe_config_gpie(struct ixgbe_softc *sc)
3375 {
3376 struct ixgbe_hw *hw = &sc->hw;
3377 u32 gpie;
3378
3379 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3380
3381 if (sc->intr_type == IFLIB_INTR_MSIX) {
3382 /* Enable Enhanced MSI-X mode */
3383 gpie |= IXGBE_GPIE_MSIX_MODE
3384 | IXGBE_GPIE_EIAME
3385 | IXGBE_GPIE_PBA_SUPPORT
3386 | IXGBE_GPIE_OCD;
3387 }
3388
3389 /* Fan Failure Interrupt */
3390 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3391 gpie |= IXGBE_SDP1_GPIEN;
3392
3393 /* Thermal Sensor Interrupt */
3394 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3395 gpie |= IXGBE_SDP0_GPIEN_X540;
3396
3397 /* Link detection */
3398 switch (hw->mac.type) {
3399 case ixgbe_mac_82599EB:
3400 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3401 break;
3402 case ixgbe_mac_X550EM_x:
3403 case ixgbe_mac_X550EM_a:
3404 gpie |= IXGBE_SDP0_GPIEN_X540;
3405 break;
3406 default:
3407 break;
3408 }
3409
3410 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3411
3412 } /* ixgbe_config_gpie */
3413
3414 /************************************************************************
3415 * ixgbe_config_delay_values
3416 *
3417 * Requires sc->max_frame_size to be set.
3418 ************************************************************************/
3419 static void
ixgbe_config_delay_values(struct ixgbe_softc * sc)3420 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3421 {
3422 struct ixgbe_hw *hw = &sc->hw;
3423 u32 rxpb, frame, size, tmp;
3424
3425 frame = sc->max_frame_size;
3426
3427 /* Calculate High Water */
3428 switch (hw->mac.type) {
3429 case ixgbe_mac_X540:
3430 case ixgbe_mac_X550:
3431 case ixgbe_mac_X550EM_x:
3432 case ixgbe_mac_X550EM_a:
3433 tmp = IXGBE_DV_X540(frame, frame);
3434 break;
3435 default:
3436 tmp = IXGBE_DV(frame, frame);
3437 break;
3438 }
3439 size = IXGBE_BT2KB(tmp);
3440 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3441 hw->fc.high_water[0] = rxpb - size;
3442
3443 /* Now calculate Low Water */
3444 switch (hw->mac.type) {
3445 case ixgbe_mac_X540:
3446 case ixgbe_mac_X550:
3447 case ixgbe_mac_X550EM_x:
3448 case ixgbe_mac_X550EM_a:
3449 tmp = IXGBE_LOW_DV_X540(frame);
3450 break;
3451 default:
3452 tmp = IXGBE_LOW_DV(frame);
3453 break;
3454 }
3455 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3456
3457 hw->fc.pause_time = IXGBE_FC_PAUSE;
3458 hw->fc.send_xon = true;
3459 } /* ixgbe_config_delay_values */
3460
3461 /************************************************************************
3462 * ixgbe_set_multi - Multicast Update
3463 *
3464 * Called whenever multicast address list is updated.
3465 ************************************************************************/
3466 static u_int
ixgbe_mc_filter_apply(void * arg,struct sockaddr_dl * sdl,u_int idx)3467 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3468 {
3469 struct ixgbe_softc *sc = arg;
3470 struct ixgbe_mc_addr *mta = sc->mta;
3471
3472 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3473 return (0);
3474 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3475 mta[idx].vmdq = sc->pool;
3476
3477 return (1);
3478 } /* ixgbe_mc_filter_apply */
3479
3480 static void
ixgbe_if_multi_set(if_ctx_t ctx)3481 ixgbe_if_multi_set(if_ctx_t ctx)
3482 {
3483 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3484 struct ixgbe_mc_addr *mta;
3485 if_t ifp = iflib_get_ifp(ctx);
3486 u8 *update_ptr;
3487 u32 fctrl;
3488 u_int mcnt;
3489
3490 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3491
3492 mta = sc->mta;
3493 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3494
3495 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
3496
3497 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3498 update_ptr = (u8 *)mta;
3499 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3500 ixgbe_mc_array_itr, true);
3501 }
3502
3503 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3504
3505 if (if_getflags(ifp) & IFF_PROMISC)
3506 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3507 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3508 if_getflags(ifp) & IFF_ALLMULTI) {
3509 fctrl |= IXGBE_FCTRL_MPE;
3510 fctrl &= ~IXGBE_FCTRL_UPE;
3511 } else
3512 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3513
3514 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3515 } /* ixgbe_if_multi_set */
3516
3517 /************************************************************************
3518 * ixgbe_mc_array_itr
3519 *
3520 * An iterator function needed by the multicast shared code.
3521 * It feeds the shared code routine the addresses in the
3522 * array of ixgbe_set_multi() one by one.
3523 ************************************************************************/
3524 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)3525 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3526 {
3527 struct ixgbe_mc_addr *mta;
3528
3529 mta = (struct ixgbe_mc_addr *)*update_ptr;
3530 *vmdq = mta->vmdq;
3531
3532 *update_ptr = (u8*)(mta + 1);
3533
3534 return (mta->addr);
3535 } /* ixgbe_mc_array_itr */
3536
3537 /************************************************************************
3538 * ixgbe_local_timer - Timer routine
3539 *
3540 * Checks for link status, updates statistics,
3541 * and runs the watchdog check.
3542 ************************************************************************/
3543 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)3544 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3545 {
3546 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3547
3548 if (qid != 0)
3549 return;
3550
3551 /* Check for pluggable optics */
3552 if (sc->sfp_probe)
3553 if (!ixgbe_sfp_probe(ctx))
3554 return; /* Nothing to do */
3555
3556 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3557
3558 /* Fire off the adminq task */
3559 iflib_admin_intr_deferred(ctx);
3560
3561 } /* ixgbe_if_timer */
3562
3563 /************************************************************************
3564 * ixgbe_fw_mode_timer - FW mode timer routine
3565 ************************************************************************/
3566 static void
ixgbe_fw_mode_timer(void * arg)3567 ixgbe_fw_mode_timer(void *arg)
3568 {
3569 struct ixgbe_softc *sc = arg;
3570 struct ixgbe_hw *hw = &sc->hw;
3571
3572 if (ixgbe_fw_recovery_mode(hw)) {
3573 if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
3574 /* Firmware error detected, entering recovery mode */
3575 device_printf(sc->dev, "Firmware recovery mode detected. Limiting"
3576 " functionality. Refer to the Intel(R) Ethernet Adapters"
3577 " and Devices User Guide for details on firmware recovery"
3578 " mode.\n");
3579
3580 if (hw->adapter_stopped == FALSE)
3581 ixgbe_if_stop(sc->ctx);
3582 }
3583 } else
3584 atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
3585
3586
3587 callout_reset(&sc->fw_mode_timer, hz,
3588 ixgbe_fw_mode_timer, sc);
3589 } /* ixgbe_fw_mode_timer */
3590
3591 /************************************************************************
3592 * ixgbe_sfp_probe
3593 *
3594 * Determine if a port had optics inserted.
3595 ************************************************************************/
3596 static bool
ixgbe_sfp_probe(if_ctx_t ctx)3597 ixgbe_sfp_probe(if_ctx_t ctx)
3598 {
3599 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3600 struct ixgbe_hw *hw = &sc->hw;
3601 device_t dev = iflib_get_dev(ctx);
3602 bool result = false;
3603
3604 if ((hw->phy.type == ixgbe_phy_nl) &&
3605 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3606 s32 ret = hw->phy.ops.identify_sfp(hw);
3607 if (ret)
3608 goto out;
3609 ret = hw->phy.ops.reset(hw);
3610 sc->sfp_probe = false;
3611 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3612 device_printf(dev, "Unsupported SFP+ module detected!");
3613 device_printf(dev,
3614 "Reload driver with supported module.\n");
3615 goto out;
3616 } else
3617 device_printf(dev, "SFP+ module detected!\n");
3618 /* We now have supported optics */
3619 result = true;
3620 }
3621 out:
3622
3623 return (result);
3624 } /* ixgbe_sfp_probe */
3625
3626 /************************************************************************
3627 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3628 ************************************************************************/
3629 static void
ixgbe_handle_mod(void * context)3630 ixgbe_handle_mod(void *context)
3631 {
3632 if_ctx_t ctx = context;
3633 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3634 struct ixgbe_hw *hw = &sc->hw;
3635 device_t dev = iflib_get_dev(ctx);
3636 u32 err, cage_full = 0;
3637
3638 if (sc->hw.need_crosstalk_fix) {
3639 switch (hw->mac.type) {
3640 case ixgbe_mac_82599EB:
3641 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3642 IXGBE_ESDP_SDP2;
3643 break;
3644 case ixgbe_mac_X550EM_x:
3645 case ixgbe_mac_X550EM_a:
3646 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3647 IXGBE_ESDP_SDP0;
3648 break;
3649 default:
3650 break;
3651 }
3652
3653 if (!cage_full)
3654 goto handle_mod_out;
3655 }
3656
3657 err = hw->phy.ops.identify_sfp(hw);
3658 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3659 device_printf(dev,
3660 "Unsupported SFP+ module type was detected.\n");
3661 goto handle_mod_out;
3662 }
3663
3664 if (hw->mac.type == ixgbe_mac_82598EB)
3665 err = hw->phy.ops.reset(hw);
3666 else
3667 err = hw->mac.ops.setup_sfp(hw);
3668
3669 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3670 device_printf(dev,
3671 "Setup failure - unsupported SFP+ module type.\n");
3672 goto handle_mod_out;
3673 }
3674 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3675 return;
3676
3677 handle_mod_out:
3678 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3679 } /* ixgbe_handle_mod */
3680
3681
3682 /************************************************************************
3683 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3684 ************************************************************************/
3685 static void
ixgbe_handle_msf(void * context)3686 ixgbe_handle_msf(void *context)
3687 {
3688 if_ctx_t ctx = context;
3689 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3690 struct ixgbe_hw *hw = &sc->hw;
3691 u32 autoneg;
3692 bool negotiate;
3693
3694 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3695 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3696
3697 autoneg = hw->phy.autoneg_advertised;
3698 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3699 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3700 if (hw->mac.ops.setup_link)
3701 hw->mac.ops.setup_link(hw, autoneg, true);
3702
3703 /* Adjust media types shown in ifconfig */
3704 ifmedia_removeall(sc->media);
3705 ixgbe_add_media_types(sc->ctx);
3706 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3707 } /* ixgbe_handle_msf */
3708
3709 /************************************************************************
3710 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3711 ************************************************************************/
3712 static void
ixgbe_handle_phy(void * context)3713 ixgbe_handle_phy(void *context)
3714 {
3715 if_ctx_t ctx = context;
3716 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3717 struct ixgbe_hw *hw = &sc->hw;
3718 int error;
3719
3720 error = hw->phy.ops.handle_lasi(hw);
3721 if (error == IXGBE_ERR_OVERTEMP)
3722 device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3723 else if (error)
3724 device_printf(sc->dev,
3725 "Error handling LASI interrupt: %d\n", error);
3726 } /* ixgbe_handle_phy */
3727
3728 /************************************************************************
3729 * ixgbe_if_stop - Stop the hardware
3730 *
3731 * Disables all traffic on the adapter by issuing a
3732 * global reset on the MAC and deallocates TX/RX buffers.
3733 ************************************************************************/
3734 static void
ixgbe_if_stop(if_ctx_t ctx)3735 ixgbe_if_stop(if_ctx_t ctx)
3736 {
3737 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3738 struct ixgbe_hw *hw = &sc->hw;
3739
3740 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3741
3742 ixgbe_reset_hw(hw);
3743 hw->adapter_stopped = false;
3744 ixgbe_stop_adapter(hw);
3745 if (hw->mac.type == ixgbe_mac_82599EB)
3746 ixgbe_stop_mac_link_on_d3_82599(hw);
3747 /* Turn off the laser - noop with no optics */
3748 ixgbe_disable_tx_laser(hw);
3749
3750 /* Update the stack */
3751 sc->link_up = false;
3752 ixgbe_if_update_admin_status(ctx);
3753
3754 /* reprogram the RAR[0] in case user changed it. */
3755 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3756
3757 return;
3758 } /* ixgbe_if_stop */
3759
3760 /************************************************************************
3761 * ixgbe_update_link_status - Update OS on link state
3762 *
3763 * Note: Only updates the OS on the cached link state.
3764 * The real check of the hardware only happens with
3765 * a link interrupt.
3766 ************************************************************************/
3767 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)3768 ixgbe_if_update_admin_status(if_ctx_t ctx)
3769 {
3770 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3771 device_t dev = iflib_get_dev(ctx);
3772
3773 if (sc->link_up) {
3774 if (sc->link_active == false) {
3775 if (bootverbose)
3776 device_printf(dev, "Link is up %d Gbps %s \n",
3777 ((sc->link_speed == 128) ? 10 : 1),
3778 "Full Duplex");
3779 sc->link_active = true;
3780 /* Update any Flow Control changes */
3781 ixgbe_fc_enable(&sc->hw);
3782 /* Update DMA coalescing config */
3783 ixgbe_config_dmac(sc);
3784 iflib_link_state_change(ctx, LINK_STATE_UP,
3785 ixgbe_link_speed_to_baudrate(sc->link_speed));
3786
3787 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3788 ixgbe_ping_all_vfs(sc);
3789 }
3790 } else { /* Link down */
3791 if (sc->link_active == true) {
3792 if (bootverbose)
3793 device_printf(dev, "Link is Down\n");
3794 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3795 sc->link_active = false;
3796 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3797 ixgbe_ping_all_vfs(sc);
3798 }
3799 }
3800
3801 /* Handle task requests from msix_link() */
3802 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3803 ixgbe_handle_mod(ctx);
3804 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3805 ixgbe_handle_msf(ctx);
3806 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3807 ixgbe_handle_mbx(ctx);
3808 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3809 ixgbe_reinit_fdir(ctx);
3810 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3811 ixgbe_handle_phy(ctx);
3812 sc->task_requests = 0;
3813
3814 ixgbe_update_stats_counters(sc);
3815 } /* ixgbe_if_update_admin_status */
3816
3817 /************************************************************************
3818 * ixgbe_config_dmac - Configure DMA Coalescing
3819 ************************************************************************/
3820 static void
ixgbe_config_dmac(struct ixgbe_softc * sc)3821 ixgbe_config_dmac(struct ixgbe_softc *sc)
3822 {
3823 struct ixgbe_hw *hw = &sc->hw;
3824 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3825
3826 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3827 return;
3828
3829 if (dcfg->watchdog_timer ^ sc->dmac ||
3830 dcfg->link_speed ^ sc->link_speed) {
3831 dcfg->watchdog_timer = sc->dmac;
3832 dcfg->fcoe_en = false;
3833 dcfg->link_speed = sc->link_speed;
3834 dcfg->num_tcs = 1;
3835
3836 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3837 dcfg->watchdog_timer, dcfg->link_speed);
3838
3839 hw->mac.ops.dmac_config(hw);
3840 }
3841 } /* ixgbe_config_dmac */
3842
3843 /************************************************************************
3844 * ixgbe_if_enable_intr
3845 ************************************************************************/
3846 void
ixgbe_if_enable_intr(if_ctx_t ctx)3847 ixgbe_if_enable_intr(if_ctx_t ctx)
3848 {
3849 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3850 struct ixgbe_hw *hw = &sc->hw;
3851 struct ix_rx_queue *que = sc->rx_queues;
3852 u32 mask, fwsm;
3853
3854 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3855
3856 switch (sc->hw.mac.type) {
3857 case ixgbe_mac_82599EB:
3858 mask |= IXGBE_EIMS_ECC;
3859 /* Temperature sensor on some scs */
3860 mask |= IXGBE_EIMS_GPI_SDP0;
3861 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3862 mask |= IXGBE_EIMS_GPI_SDP1;
3863 mask |= IXGBE_EIMS_GPI_SDP2;
3864 break;
3865 case ixgbe_mac_X540:
3866 /* Detect if Thermal Sensor is enabled */
3867 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3868 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3869 mask |= IXGBE_EIMS_TS;
3870 mask |= IXGBE_EIMS_ECC;
3871 break;
3872 case ixgbe_mac_X550:
3873 /* MAC thermal sensor is automatically enabled */
3874 mask |= IXGBE_EIMS_TS;
3875 mask |= IXGBE_EIMS_ECC;
3876 break;
3877 case ixgbe_mac_X550EM_x:
3878 case ixgbe_mac_X550EM_a:
3879 /* Some devices use SDP0 for important information */
3880 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3881 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3882 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3883 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3884 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3885 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3886 mask |= IXGBE_EICR_GPI_SDP0_X540;
3887 mask |= IXGBE_EIMS_ECC;
3888 break;
3889 default:
3890 break;
3891 }
3892
3893 /* Enable Fan Failure detection */
3894 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3895 mask |= IXGBE_EIMS_GPI_SDP1;
3896 /* Enable SR-IOV */
3897 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3898 mask |= IXGBE_EIMS_MAILBOX;
3899 /* Enable Flow Director */
3900 if (sc->feat_en & IXGBE_FEATURE_FDIR)
3901 mask |= IXGBE_EIMS_FLOW_DIR;
3902
3903 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3904
3905 /* With MSI-X we use auto clear */
3906 if (sc->intr_type == IFLIB_INTR_MSIX) {
3907 mask = IXGBE_EIMS_ENABLE_MASK;
3908 /* Don't autoclear Link */
3909 mask &= ~IXGBE_EIMS_OTHER;
3910 mask &= ~IXGBE_EIMS_LSC;
3911 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
3912 mask &= ~IXGBE_EIMS_MAILBOX;
3913 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3914 }
3915
3916 /*
3917 * Now enable all queues, this is done separately to
3918 * allow for handling the extended (beyond 32) MSI-X
3919 * vectors that can be used by 82599
3920 */
3921 for (int i = 0; i < sc->num_rx_queues; i++, que++)
3922 ixgbe_enable_queue(sc, que->msix);
3923
3924 IXGBE_WRITE_FLUSH(hw);
3925
3926 } /* ixgbe_if_enable_intr */
3927
3928 /************************************************************************
3929 * ixgbe_disable_intr
3930 ************************************************************************/
3931 static void
ixgbe_if_disable_intr(if_ctx_t ctx)3932 ixgbe_if_disable_intr(if_ctx_t ctx)
3933 {
3934 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3935
3936 if (sc->intr_type == IFLIB_INTR_MSIX)
3937 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3938 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3939 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3940 } else {
3941 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3942 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3943 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3944 }
3945 IXGBE_WRITE_FLUSH(&sc->hw);
3946
3947 } /* ixgbe_if_disable_intr */
3948
3949 /************************************************************************
3950 * ixgbe_link_intr_enable
3951 ************************************************************************/
3952 static void
ixgbe_link_intr_enable(if_ctx_t ctx)3953 ixgbe_link_intr_enable(if_ctx_t ctx)
3954 {
3955 struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
3956
3957 /* Re-enable other interrupts */
3958 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3959 } /* ixgbe_link_intr_enable */
3960
3961 /************************************************************************
3962 * ixgbe_if_rx_queue_intr_enable
3963 ************************************************************************/
3964 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)3965 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
3966 {
3967 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3968 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
3969
3970 ixgbe_enable_queue(sc, que->msix);
3971
3972 return (0);
3973 } /* ixgbe_if_rx_queue_intr_enable */
3974
3975 /************************************************************************
3976 * ixgbe_enable_queue
3977 ************************************************************************/
3978 static void
ixgbe_enable_queue(struct ixgbe_softc * sc,u32 vector)3979 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
3980 {
3981 struct ixgbe_hw *hw = &sc->hw;
3982 u64 queue = 1ULL << vector;
3983 u32 mask;
3984
3985 if (hw->mac.type == ixgbe_mac_82598EB) {
3986 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
3987 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3988 } else {
3989 mask = (queue & 0xFFFFFFFF);
3990 if (mask)
3991 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3992 mask = (queue >> 32);
3993 if (mask)
3994 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3995 }
3996 } /* ixgbe_enable_queue */
3997
3998 /************************************************************************
3999 * ixgbe_disable_queue
4000 ************************************************************************/
4001 static void
ixgbe_disable_queue(struct ixgbe_softc * sc,u32 vector)4002 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
4003 {
4004 struct ixgbe_hw *hw = &sc->hw;
4005 u64 queue = 1ULL << vector;
4006 u32 mask;
4007
4008 if (hw->mac.type == ixgbe_mac_82598EB) {
4009 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4010 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
4011 } else {
4012 mask = (queue & 0xFFFFFFFF);
4013 if (mask)
4014 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
4015 mask = (queue >> 32);
4016 if (mask)
4017 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
4018 }
4019 } /* ixgbe_disable_queue */
4020
4021 /************************************************************************
4022 * ixgbe_intr - Legacy Interrupt Service Routine
4023 ************************************************************************/
4024 int
ixgbe_intr(void * arg)4025 ixgbe_intr(void *arg)
4026 {
4027 struct ixgbe_softc *sc = arg;
4028 struct ix_rx_queue *que = sc->rx_queues;
4029 struct ixgbe_hw *hw = &sc->hw;
4030 if_ctx_t ctx = sc->ctx;
4031 u32 eicr, eicr_mask;
4032
4033 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4034
4035 ++que->irqs;
4036 if (eicr == 0) {
4037 ixgbe_if_enable_intr(ctx);
4038 return (FILTER_HANDLED);
4039 }
4040
4041 /* Check for fan failure */
4042 if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4043 (eicr & IXGBE_EICR_GPI_SDP1)) {
4044 device_printf(sc->dev,
4045 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4046 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4047 }
4048
4049 /* Link status change */
4050 if (eicr & IXGBE_EICR_LSC) {
4051 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4052 iflib_admin_intr_deferred(ctx);
4053 }
4054
4055 if (ixgbe_is_sfp(hw)) {
4056 /* Pluggable optics-related interrupt */
4057 if (hw->mac.type >= ixgbe_mac_X540)
4058 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4059 else
4060 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4061
4062 if (eicr & eicr_mask) {
4063 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4064 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4065 }
4066
4067 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4068 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4069 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4070 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4071 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4072 }
4073 }
4074
4075 /* External PHY interrupt */
4076 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4077 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4078 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4079
4080 return (FILTER_SCHEDULE_THREAD);
4081 } /* ixgbe_intr */
4082
4083 /************************************************************************
4084 * ixgbe_free_pci_resources
4085 ************************************************************************/
4086 static void
ixgbe_free_pci_resources(if_ctx_t ctx)4087 ixgbe_free_pci_resources(if_ctx_t ctx)
4088 {
4089 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4090 struct ix_rx_queue *que = sc->rx_queues;
4091 device_t dev = iflib_get_dev(ctx);
4092
4093 /* Release all MSI-X queue resources */
4094 if (sc->intr_type == IFLIB_INTR_MSIX)
4095 iflib_irq_free(ctx, &sc->irq);
4096
4097 if (que != NULL) {
4098 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4099 iflib_irq_free(ctx, &que->que_irq);
4100 }
4101 }
4102
4103 if (sc->pci_mem != NULL)
4104 bus_release_resource(dev, SYS_RES_MEMORY,
4105 rman_get_rid(sc->pci_mem), sc->pci_mem);
4106 } /* ixgbe_free_pci_resources */
4107
4108 /************************************************************************
4109 * ixgbe_sysctl_flowcntl
4110 *
4111 * SYSCTL wrapper around setting Flow Control
4112 ************************************************************************/
4113 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)4114 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4115 {
4116 struct ixgbe_softc *sc;
4117 int error, fc;
4118
4119 sc = (struct ixgbe_softc *)arg1;
4120 fc = sc->hw.fc.current_mode;
4121
4122 error = sysctl_handle_int(oidp, &fc, 0, req);
4123 if ((error) || (req->newptr == NULL))
4124 return (error);
4125
4126 /* Don't bother if it's not changed */
4127 if (fc == sc->hw.fc.current_mode)
4128 return (0);
4129
4130 return ixgbe_set_flowcntl(sc, fc);
4131 } /* ixgbe_sysctl_flowcntl */
4132
4133 /************************************************************************
4134 * ixgbe_set_flowcntl - Set flow control
4135 *
4136 * Flow control values:
4137 * 0 - off
4138 * 1 - rx pause
4139 * 2 - tx pause
4140 * 3 - full
4141 ************************************************************************/
4142 static int
ixgbe_set_flowcntl(struct ixgbe_softc * sc,int fc)4143 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4144 {
4145 switch (fc) {
4146 case ixgbe_fc_rx_pause:
4147 case ixgbe_fc_tx_pause:
4148 case ixgbe_fc_full:
4149 sc->hw.fc.requested_mode = fc;
4150 if (sc->num_rx_queues > 1)
4151 ixgbe_disable_rx_drop(sc);
4152 break;
4153 case ixgbe_fc_none:
4154 sc->hw.fc.requested_mode = ixgbe_fc_none;
4155 if (sc->num_rx_queues > 1)
4156 ixgbe_enable_rx_drop(sc);
4157 break;
4158 default:
4159 return (EINVAL);
4160 }
4161
4162 /* Don't autoneg if forcing a value */
4163 sc->hw.fc.disable_fc_autoneg = true;
4164 ixgbe_fc_enable(&sc->hw);
4165
4166 return (0);
4167 } /* ixgbe_set_flowcntl */
4168
4169 /************************************************************************
4170 * ixgbe_enable_rx_drop
4171 *
4172 * Enable the hardware to drop packets when the buffer is
4173 * full. This is useful with multiqueue, so that no single
4174 * queue being full stalls the entire RX engine. We only
4175 * enable this when Multiqueue is enabled AND Flow Control
4176 * is disabled.
4177 ************************************************************************/
4178 static void
ixgbe_enable_rx_drop(struct ixgbe_softc * sc)4179 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4180 {
4181 struct ixgbe_hw *hw = &sc->hw;
4182 struct rx_ring *rxr;
4183 u32 srrctl;
4184
4185 for (int i = 0; i < sc->num_rx_queues; i++) {
4186 rxr = &sc->rx_queues[i].rxr;
4187 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4188 srrctl |= IXGBE_SRRCTL_DROP_EN;
4189 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4190 }
4191
4192 /* enable drop for each vf */
4193 for (int i = 0; i < sc->num_vfs; i++) {
4194 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4195 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4196 IXGBE_QDE_ENABLE));
4197 }
4198 } /* ixgbe_enable_rx_drop */
4199
4200 /************************************************************************
4201 * ixgbe_disable_rx_drop
4202 ************************************************************************/
4203 static void
ixgbe_disable_rx_drop(struct ixgbe_softc * sc)4204 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4205 {
4206 struct ixgbe_hw *hw = &sc->hw;
4207 struct rx_ring *rxr;
4208 u32 srrctl;
4209
4210 for (int i = 0; i < sc->num_rx_queues; i++) {
4211 rxr = &sc->rx_queues[i].rxr;
4212 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4213 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4214 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4215 }
4216
4217 /* disable drop for each vf */
4218 for (int i = 0; i < sc->num_vfs; i++) {
4219 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4220 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4221 }
4222 } /* ixgbe_disable_rx_drop */
4223
4224 /************************************************************************
4225 * ixgbe_sysctl_advertise
4226 *
4227 * SYSCTL wrapper around setting advertised speed
4228 ************************************************************************/
4229 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)4230 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4231 {
4232 struct ixgbe_softc *sc;
4233 int error, advertise;
4234
4235 sc = (struct ixgbe_softc *)arg1;
4236 if (atomic_load_acq_int(&sc->recovery_mode))
4237 return (EPERM);
4238
4239 advertise = sc->advertise;
4240
4241 error = sysctl_handle_int(oidp, &advertise, 0, req);
4242 if ((error) || (req->newptr == NULL))
4243 return (error);
4244
4245 return ixgbe_set_advertise(sc, advertise);
4246 } /* ixgbe_sysctl_advertise */
4247
4248 /************************************************************************
4249 * ixgbe_set_advertise - Control advertised link speed
4250 *
4251 * Flags:
4252 * 0x1 - advertise 100 Mb
4253 * 0x2 - advertise 1G
4254 * 0x4 - advertise 10G
4255 * 0x8 - advertise 10 Mb (yes, Mb)
4256 * 0x10 - advertise 2.5G (disabled by default)
4257 * 0x20 - advertise 5G (disabled by default)
4258 *
4259 ************************************************************************/
4260 static int
ixgbe_set_advertise(struct ixgbe_softc * sc,int advertise)4261 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4262 {
4263 device_t dev = iflib_get_dev(sc->ctx);
4264 struct ixgbe_hw *hw;
4265 ixgbe_link_speed speed = 0;
4266 ixgbe_link_speed link_caps = 0;
4267 s32 err = IXGBE_NOT_IMPLEMENTED;
4268 bool negotiate = false;
4269
4270 /* Checks to validate new value */
4271 if (sc->advertise == advertise) /* no change */
4272 return (0);
4273
4274 hw = &sc->hw;
4275
4276 /* No speed changes for backplane media */
4277 if (hw->phy.media_type == ixgbe_media_type_backplane)
4278 return (ENODEV);
4279
4280 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4281 (hw->phy.multispeed_fiber))) {
4282 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4283 return (EINVAL);
4284 }
4285
4286 if (advertise < 0x1 || advertise > 0x3F) {
4287 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n");
4288 return (EINVAL);
4289 }
4290
4291 if (hw->mac.ops.get_link_capabilities) {
4292 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4293 &negotiate);
4294 if (err != IXGBE_SUCCESS) {
4295 device_printf(dev, "Unable to determine supported advertise speeds\n");
4296 return (ENODEV);
4297 }
4298 }
4299
4300 /* Set new value and report new advertised mode */
4301 if (advertise & 0x1) {
4302 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4303 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4304 return (EINVAL);
4305 }
4306 speed |= IXGBE_LINK_SPEED_100_FULL;
4307 }
4308 if (advertise & 0x2) {
4309 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4310 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4311 return (EINVAL);
4312 }
4313 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4314 }
4315 if (advertise & 0x4) {
4316 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4317 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4318 return (EINVAL);
4319 }
4320 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4321 }
4322 if (advertise & 0x8) {
4323 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4324 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4325 return (EINVAL);
4326 }
4327 speed |= IXGBE_LINK_SPEED_10_FULL;
4328 }
4329 if (advertise & 0x10) {
4330 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4331 device_printf(dev, "Interface does not support 2.5G advertised speed\n");
4332 return (EINVAL);
4333 }
4334 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4335 }
4336 if (advertise & 0x20) {
4337 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4338 device_printf(dev, "Interface does not support 5G advertised speed\n");
4339 return (EINVAL);
4340 }
4341 speed |= IXGBE_LINK_SPEED_5GB_FULL;
4342 }
4343
4344 hw->mac.autotry_restart = true;
4345 hw->mac.ops.setup_link(hw, speed, true);
4346 sc->advertise = advertise;
4347
4348 return (0);
4349 } /* ixgbe_set_advertise */
4350
4351 /************************************************************************
4352 * ixgbe_get_default_advertise - Get default advertised speed settings
4353 *
4354 * Formatted for sysctl usage.
4355 * Flags:
4356 * 0x1 - advertise 100 Mb
4357 * 0x2 - advertise 1G
4358 * 0x4 - advertise 10G
4359 * 0x8 - advertise 10 Mb (yes, Mb)
4360 * 0x10 - advertise 2.5G (disabled by default)
4361 * 0x20 - advertise 5G (disabled by default)
4362 ************************************************************************/
4363 static int
ixgbe_get_default_advertise(struct ixgbe_softc * sc)4364 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4365 {
4366 struct ixgbe_hw *hw = &sc->hw;
4367 int speed;
4368 ixgbe_link_speed link_caps = 0;
4369 s32 err;
4370 bool negotiate = false;
4371
4372 /*
4373 * Advertised speed means nothing unless it's copper or
4374 * multi-speed fiber
4375 */
4376 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4377 !(hw->phy.multispeed_fiber))
4378 return (0);
4379
4380 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4381 if (err != IXGBE_SUCCESS)
4382 return (0);
4383
4384 if (hw->mac.type == ixgbe_mac_X550) {
4385 /*
4386 * 2.5G and 5G autonegotiation speeds on X550
4387 * are disabled by default due to reported
4388 * interoperability issues with some switches.
4389 */
4390 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4391 IXGBE_LINK_SPEED_5GB_FULL);
4392 }
4393
4394 speed =
4395 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
4396 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
4397 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4398 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
4399 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
4400 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
4401
4402 return speed;
4403 } /* ixgbe_get_default_advertise */
4404
4405 /************************************************************************
4406 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4407 *
4408 * Control values:
4409 * 0/1 - off / on (use default value of 1000)
4410 *
4411 * Legal timer values are:
4412 * 50,100,250,500,1000,2000,5000,10000
4413 *
4414 * Turning off interrupt moderation will also turn this off.
4415 ************************************************************************/
4416 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)4417 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4418 {
4419 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4420 if_t ifp = iflib_get_ifp(sc->ctx);
4421 int error;
4422 u16 newval;
4423
4424 newval = sc->dmac;
4425 error = sysctl_handle_16(oidp, &newval, 0, req);
4426 if ((error) || (req->newptr == NULL))
4427 return (error);
4428
4429 switch (newval) {
4430 case 0:
4431 /* Disabled */
4432 sc->dmac = 0;
4433 break;
4434 case 1:
4435 /* Enable and use default */
4436 sc->dmac = 1000;
4437 break;
4438 case 50:
4439 case 100:
4440 case 250:
4441 case 500:
4442 case 1000:
4443 case 2000:
4444 case 5000:
4445 case 10000:
4446 /* Legal values - allow */
4447 sc->dmac = newval;
4448 break;
4449 default:
4450 /* Do nothing, illegal value */
4451 return (EINVAL);
4452 }
4453
4454 /* Re-initialize hardware if it's already running */
4455 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
4456 if_init(ifp, ifp);
4457
4458 return (0);
4459 } /* ixgbe_sysctl_dmac */
4460
4461 #ifdef IXGBE_DEBUG
4462 /************************************************************************
4463 * ixgbe_sysctl_power_state
4464 *
4465 * Sysctl to test power states
4466 * Values:
4467 * 0 - set device to D0
4468 * 3 - set device to D3
4469 * (none) - get current device power state
4470 ************************************************************************/
4471 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)4472 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4473 {
4474 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4475 device_t dev = sc->dev;
4476 int curr_ps, new_ps, error = 0;
4477
4478 curr_ps = new_ps = pci_get_powerstate(dev);
4479
4480 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4481 if ((error) || (req->newptr == NULL))
4482 return (error);
4483
4484 if (new_ps == curr_ps)
4485 return (0);
4486
4487 if (new_ps == 3 && curr_ps == 0)
4488 error = DEVICE_SUSPEND(dev);
4489 else if (new_ps == 0 && curr_ps == 3)
4490 error = DEVICE_RESUME(dev);
4491 else
4492 return (EINVAL);
4493
4494 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4495
4496 return (error);
4497 } /* ixgbe_sysctl_power_state */
4498 #endif
4499
4500 /************************************************************************
4501 * ixgbe_sysctl_wol_enable
4502 *
4503 * Sysctl to enable/disable the WoL capability,
4504 * if supported by the adapter.
4505 *
4506 * Values:
4507 * 0 - disabled
4508 * 1 - enabled
4509 ************************************************************************/
4510 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)4511 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4512 {
4513 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4514 struct ixgbe_hw *hw = &sc->hw;
4515 int new_wol_enabled;
4516 int error = 0;
4517
4518 new_wol_enabled = hw->wol_enabled;
4519 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4520 if ((error) || (req->newptr == NULL))
4521 return (error);
4522 new_wol_enabled = !!(new_wol_enabled);
4523 if (new_wol_enabled == hw->wol_enabled)
4524 return (0);
4525
4526 if (new_wol_enabled > 0 && !sc->wol_support)
4527 return (ENODEV);
4528 else
4529 hw->wol_enabled = new_wol_enabled;
4530
4531 return (0);
4532 } /* ixgbe_sysctl_wol_enable */
4533
4534 /************************************************************************
4535 * ixgbe_sysctl_wufc - Wake Up Filter Control
4536 *
4537 * Sysctl to enable/disable the types of packets that the
4538 * adapter will wake up on upon receipt.
4539 * Flags:
4540 * 0x1 - Link Status Change
4541 * 0x2 - Magic Packet
4542 * 0x4 - Direct Exact
4543 * 0x8 - Directed Multicast
4544 * 0x10 - Broadcast
4545 * 0x20 - ARP/IPv4 Request Packet
4546 * 0x40 - Direct IPv4 Packet
4547 * 0x80 - Direct IPv6 Packet
4548 *
4549 * Settings not listed above will cause the sysctl to return an error.
4550 ************************************************************************/
4551 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)4552 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4553 {
4554 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4555 int error = 0;
4556 u32 new_wufc;
4557
4558 new_wufc = sc->wufc;
4559
4560 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4561 if ((error) || (req->newptr == NULL))
4562 return (error);
4563 if (new_wufc == sc->wufc)
4564 return (0);
4565
4566 if (new_wufc & 0xffffff00)
4567 return (EINVAL);
4568
4569 new_wufc &= 0xff;
4570 new_wufc |= (0xffffff & sc->wufc);
4571 sc->wufc = new_wufc;
4572
4573 return (0);
4574 } /* ixgbe_sysctl_wufc */
4575
4576 #ifdef IXGBE_DEBUG
4577 /************************************************************************
4578 * ixgbe_sysctl_print_rss_config
4579 ************************************************************************/
4580 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)4581 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4582 {
4583 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4584 struct ixgbe_hw *hw = &sc->hw;
4585 device_t dev = sc->dev;
4586 struct sbuf *buf;
4587 int error = 0, reta_size;
4588 u32 reg;
4589
4590 if (atomic_load_acq_int(&sc->recovery_mode))
4591 return (EPERM);
4592
4593 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4594 if (!buf) {
4595 device_printf(dev, "Could not allocate sbuf for output.\n");
4596 return (ENOMEM);
4597 }
4598
4599 // TODO: use sbufs to make a string to print out
4600 /* Set multiplier for RETA setup and table size based on MAC */
4601 switch (sc->hw.mac.type) {
4602 case ixgbe_mac_X550:
4603 case ixgbe_mac_X550EM_x:
4604 case ixgbe_mac_X550EM_a:
4605 reta_size = 128;
4606 break;
4607 default:
4608 reta_size = 32;
4609 break;
4610 }
4611
4612 /* Print out the redirection table */
4613 sbuf_cat(buf, "\n");
4614 for (int i = 0; i < reta_size; i++) {
4615 if (i < 32) {
4616 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4617 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4618 } else {
4619 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4620 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4621 }
4622 }
4623
4624 // TODO: print more config
4625
4626 error = sbuf_finish(buf);
4627 if (error)
4628 device_printf(dev, "Error finishing sbuf: %d\n", error);
4629
4630 sbuf_delete(buf);
4631
4632 return (0);
4633 } /* ixgbe_sysctl_print_rss_config */
4634 #endif /* IXGBE_DEBUG */
4635
4636 /************************************************************************
4637 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4638 *
4639 * For X552/X557-AT devices using an external PHY
4640 ************************************************************************/
4641 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)4642 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4643 {
4644 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4645 struct ixgbe_hw *hw = &sc->hw;
4646 u16 reg;
4647
4648 if (atomic_load_acq_int(&sc->recovery_mode))
4649 return (EPERM);
4650
4651 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4652 device_printf(iflib_get_dev(sc->ctx),
4653 "Device has no supported external thermal sensor.\n");
4654 return (ENODEV);
4655 }
4656
4657 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4658 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4659 device_printf(iflib_get_dev(sc->ctx),
4660 "Error reading from PHY's current temperature register\n");
4661 return (EAGAIN);
4662 }
4663
4664 /* Shift temp for output */
4665 reg = reg >> 8;
4666
4667 return (sysctl_handle_16(oidp, NULL, reg, req));
4668 } /* ixgbe_sysctl_phy_temp */
4669
4670 /************************************************************************
4671 * ixgbe_sysctl_phy_overtemp_occurred
4672 *
4673 * Reports (directly from the PHY) whether the current PHY
4674 * temperature is over the overtemp threshold.
4675 ************************************************************************/
4676 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)4677 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4678 {
4679 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4680 struct ixgbe_hw *hw = &sc->hw;
4681 u16 reg;
4682
4683 if (atomic_load_acq_int(&sc->recovery_mode))
4684 return (EPERM);
4685
4686 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4687 device_printf(iflib_get_dev(sc->ctx),
4688 "Device has no supported external thermal sensor.\n");
4689 return (ENODEV);
4690 }
4691
4692 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4693 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4694 device_printf(iflib_get_dev(sc->ctx),
4695 "Error reading from PHY's temperature status register\n");
4696 return (EAGAIN);
4697 }
4698
4699 /* Get occurrence bit */
4700 reg = !!(reg & 0x4000);
4701
4702 return (sysctl_handle_16(oidp, 0, reg, req));
4703 } /* ixgbe_sysctl_phy_overtemp_occurred */
4704
4705 /************************************************************************
4706 * ixgbe_sysctl_eee_state
4707 *
4708 * Sysctl to set EEE power saving feature
4709 * Values:
4710 * 0 - disable EEE
4711 * 1 - enable EEE
4712 * (none) - get current device EEE state
4713 ************************************************************************/
4714 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)4715 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4716 {
4717 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4718 device_t dev = sc->dev;
4719 if_t ifp = iflib_get_ifp(sc->ctx);
4720 int curr_eee, new_eee, error = 0;
4721 s32 retval;
4722
4723 if (atomic_load_acq_int(&sc->recovery_mode))
4724 return (EPERM);
4725
4726 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4727
4728 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4729 if ((error) || (req->newptr == NULL))
4730 return (error);
4731
4732 /* Nothing to do */
4733 if (new_eee == curr_eee)
4734 return (0);
4735
4736 /* Not supported */
4737 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4738 return (EINVAL);
4739
4740 /* Bounds checking */
4741 if ((new_eee < 0) || (new_eee > 1))
4742 return (EINVAL);
4743
4744 retval = ixgbe_setup_eee(&sc->hw, new_eee);
4745 if (retval) {
4746 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4747 return (EINVAL);
4748 }
4749
4750 /* Restart auto-neg */
4751 if_init(ifp, ifp);
4752
4753 device_printf(dev, "New EEE state: %d\n", new_eee);
4754
4755 /* Cache new value */
4756 if (new_eee)
4757 sc->feat_en |= IXGBE_FEATURE_EEE;
4758 else
4759 sc->feat_en &= ~IXGBE_FEATURE_EEE;
4760
4761 return (error);
4762 } /* ixgbe_sysctl_eee_state */
4763
4764 static int
ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)4765 ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
4766 {
4767 struct ixgbe_softc *sc;
4768 u32 reg, val, shift;
4769 int error, mask;
4770
4771 sc = oidp->oid_arg1;
4772 switch (oidp->oid_arg2) {
4773 case 0:
4774 reg = IXGBE_DTXTCPFLGL;
4775 shift = 0;
4776 break;
4777 case 1:
4778 reg = IXGBE_DTXTCPFLGL;
4779 shift = 16;
4780 break;
4781 case 2:
4782 reg = IXGBE_DTXTCPFLGH;
4783 shift = 0;
4784 break;
4785 default:
4786 return (EINVAL);
4787 break;
4788 }
4789 val = IXGBE_READ_REG(&sc->hw, reg);
4790 mask = (val >> shift) & 0xfff;
4791 error = sysctl_handle_int(oidp, &mask, 0, req);
4792 if (error != 0 || req->newptr == NULL)
4793 return (error);
4794 if (mask < 0 || mask > 0xfff)
4795 return (EINVAL);
4796 val = (val & ~(0xfff << shift)) | (mask << shift);
4797 IXGBE_WRITE_REG(&sc->hw, reg, val);
4798 return (0);
4799 }
4800
4801 /************************************************************************
4802 * ixgbe_init_device_features
4803 ************************************************************************/
4804 static void
ixgbe_init_device_features(struct ixgbe_softc * sc)4805 ixgbe_init_device_features(struct ixgbe_softc *sc)
4806 {
4807 sc->feat_cap = IXGBE_FEATURE_NETMAP
4808 | IXGBE_FEATURE_RSS
4809 | IXGBE_FEATURE_MSI
4810 | IXGBE_FEATURE_MSIX
4811 | IXGBE_FEATURE_LEGACY_IRQ;
4812
4813 /* Set capabilities first... */
4814 switch (sc->hw.mac.type) {
4815 case ixgbe_mac_82598EB:
4816 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4817 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4818 break;
4819 case ixgbe_mac_X540:
4820 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4821 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4822 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4823 (sc->hw.bus.func == 0))
4824 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4825 break;
4826 case ixgbe_mac_X550:
4827 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4828 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4829 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4830 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4831 break;
4832 case ixgbe_mac_X550EM_x:
4833 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4834 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4835 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4836 if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4837 sc->feat_cap |= IXGBE_FEATURE_EEE;
4838 break;
4839 case ixgbe_mac_X550EM_a:
4840 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4841 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4842 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4843 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4844 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4845 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4846 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4847 sc->feat_cap |= IXGBE_FEATURE_EEE;
4848 }
4849 break;
4850 case ixgbe_mac_82599EB:
4851 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4852 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4853 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4854 (sc->hw.bus.func == 0))
4855 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4856 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4857 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4858 break;
4859 default:
4860 break;
4861 }
4862
4863 /* Enabled by default... */
4864 /* Fan failure detection */
4865 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4866 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4867 /* Netmap */
4868 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4869 sc->feat_en |= IXGBE_FEATURE_NETMAP;
4870 /* EEE */
4871 if (sc->feat_cap & IXGBE_FEATURE_EEE)
4872 sc->feat_en |= IXGBE_FEATURE_EEE;
4873 /* Thermal Sensor */
4874 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4875 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4876 /* Recovery mode */
4877 if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
4878 sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
4879
4880 /* Enabled via global sysctl... */
4881 /* Flow Director */
4882 if (ixgbe_enable_fdir) {
4883 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
4884 sc->feat_en |= IXGBE_FEATURE_FDIR;
4885 else
4886 device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
4887 }
4888 /*
4889 * Message Signal Interrupts - Extended (MSI-X)
4890 * Normal MSI is only enabled if MSI-X calls fail.
4891 */
4892 if (!ixgbe_enable_msix)
4893 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
4894 /* Receive-Side Scaling (RSS) */
4895 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4896 sc->feat_en |= IXGBE_FEATURE_RSS;
4897
4898 /* Disable features with unmet dependencies... */
4899 /* No MSI-X */
4900 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
4901 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
4902 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4903 sc->feat_en &= ~IXGBE_FEATURE_RSS;
4904 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
4905 }
4906 } /* ixgbe_init_device_features */
4907
4908 /************************************************************************
4909 * ixgbe_check_fan_failure
4910 ************************************************************************/
4911 static void
ixgbe_check_fan_failure(struct ixgbe_softc * sc,u32 reg,bool in_interrupt)4912 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
4913 {
4914 u32 mask;
4915
4916 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
4917 IXGBE_ESDP_SDP1;
4918
4919 if (reg & mask)
4920 device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4921 } /* ixgbe_check_fan_failure */
4922
4923 /************************************************************************
4924 * ixgbe_sbuf_fw_version
4925 ************************************************************************/
4926 static void
ixgbe_sbuf_fw_version(struct ixgbe_hw * hw,struct sbuf * buf)4927 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
4928 {
4929 struct ixgbe_nvm_version nvm_ver = {0};
4930 const char *space = "";
4931
4932 ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
4933 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
4934 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
4935 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
4936
4937 /* FW version */
4938 if ((nvm_ver.phy_fw_maj == 0x0 &&
4939 nvm_ver.phy_fw_min == 0x0 &&
4940 nvm_ver.phy_fw_id == 0x0) ||
4941 (nvm_ver.phy_fw_maj == 0xF &&
4942 nvm_ver.phy_fw_min == 0xFF &&
4943 nvm_ver.phy_fw_id == 0xF)) {
4944 /* If major, minor and id numbers are set to 0,
4945 * reading FW version is unsupported. If major number
4946 * is set to 0xF, minor is set to 0xFF and id is set
4947 * to 0xF, this means that number read is invalid. */
4948 } else
4949 sbuf_printf(buf, "fw %d.%d.%d ",
4950 nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min, nvm_ver.phy_fw_id);
4951
4952 /* NVM version */
4953 if ((nvm_ver.nvm_major == 0x0 &&
4954 nvm_ver.nvm_minor == 0x0 &&
4955 nvm_ver.nvm_id == 0x0) ||
4956 (nvm_ver.nvm_major == 0xF &&
4957 nvm_ver.nvm_minor == 0xFF &&
4958 nvm_ver.nvm_id == 0xF)) {
4959 /* If major, minor and id numbers are set to 0,
4960 * reading NVM version is unsupported. If major number
4961 * is set to 0xF, minor is set to 0xFF and id is set
4962 * to 0xF, this means that number read is invalid. */
4963 } else
4964 sbuf_printf(buf, "nvm %x.%02x.%x ",
4965 nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
4966
4967 if (nvm_ver.oem_valid) {
4968 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
4969 nvm_ver.oem_minor, nvm_ver.oem_release);
4970 space = " ";
4971 }
4972
4973 if (nvm_ver.or_valid) {
4974 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4975 space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
4976 space = " ";
4977 }
4978
4979 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
4980 NVM_VER_INVALID | 0xFFFFFFFF)) {
4981 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
4982 }
4983 } /* ixgbe_sbuf_fw_version */
4984
4985 /************************************************************************
4986 * ixgbe_print_fw_version
4987 ************************************************************************/
4988 static void
ixgbe_print_fw_version(if_ctx_t ctx)4989 ixgbe_print_fw_version(if_ctx_t ctx)
4990 {
4991 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4992 struct ixgbe_hw *hw = &sc->hw;
4993 device_t dev = sc->dev;
4994 struct sbuf *buf;
4995 int error = 0;
4996
4997 buf = sbuf_new_auto();
4998 if (!buf) {
4999 device_printf(dev, "Could not allocate sbuf for output.\n");
5000 return;
5001 }
5002
5003 ixgbe_sbuf_fw_version(hw, buf);
5004
5005 error = sbuf_finish(buf);
5006 if (error)
5007 device_printf(dev, "Error finishing sbuf: %d\n", error);
5008 else if (sbuf_len(buf))
5009 device_printf(dev, "%s\n", sbuf_data(buf));
5010
5011 sbuf_delete(buf);
5012 } /* ixgbe_print_fw_version */
5013
5014 /************************************************************************
5015 * ixgbe_sysctl_print_fw_version
5016 ************************************************************************/
5017 static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)5018 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
5019 {
5020 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5021 struct ixgbe_hw *hw = &sc->hw;
5022 device_t dev = sc->dev;
5023 struct sbuf *buf;
5024 int error = 0;
5025
5026 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5027 if (!buf) {
5028 device_printf(dev, "Could not allocate sbuf for output.\n");
5029 return (ENOMEM);
5030 }
5031
5032 ixgbe_sbuf_fw_version(hw, buf);
5033
5034 error = sbuf_finish(buf);
5035 if (error)
5036 device_printf(dev, "Error finishing sbuf: %d\n", error);
5037
5038 sbuf_delete(buf);
5039
5040 return (0);
5041 } /* ixgbe_sysctl_print_fw_version */
5042