1 /*****************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 *****************************************************************************/
33
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46 * Driver version
47 ************************************************************************/
48 static const char ixgbe_driver_version[] = "4.0.1-k";
49
50 /************************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,
62 "Intel(R) 82598EB AF (Dual Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,
64 "Intel(R) 82598EB AF (Fiber)"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,
66 "Intel(R) 82598EB AT (CX4)"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,
68 "Intel(R) 82598EB AT"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,
70 "Intel(R) 82598EB AT2"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,
73 "Intel(R) 82598EB AF DA (Dual Fiber)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,
75 "Intel(R) 82598EB AT (Dual CX4)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,
77 "Intel(R) 82598EB AF (Dual Fiber LR)"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,
79 "Intel(R) 82598EB AF (Dual Fiber SR)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,
81 "Intel(R) 82598EB LOM"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,
83 "Intel(R) X520 82599 (KX4)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,
85 "Intel(R) X520 82599 (KX4 Mezzanine)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,
87 "Intel(R) X520 82599ES (SFI/SFP+)"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,
89 "Intel(R) X520 82599 (XAUI/BX4)"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,
91 "Intel(R) X520 82599 (Dual CX4)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,
93 "Intel(R) X520-T 82599 LOM"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,
95 "Intel(R) X520 82599 LS"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,
97 "Intel(R) X520 82599 (Combined Backplane)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,
99 "Intel(R) X520 82599 (Backplane w/FCoE)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,
101 "Intel(R) X520 82599 (Dual SFP+)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,
103 "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,
105 "Intel(R) X520-1 82599EN (SFP+)"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,
107 "Intel(R) X520-4 82599 (Quad SFP+)"),
108 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,
109 "Intel(R) X520-Q1 82599 (QSFP+)"),
110 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,
111 "Intel(R) X540-AT2"),
112 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
113 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
114 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
115 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,
116 "Intel(R) X552 (KR Backplane)"),
117 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,
118 "Intel(R) X552 (KX4 Backplane)"),
119 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,
120 "Intel(R) X552/X557-AT (10GBASE-T)"),
121 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,
122 "Intel(R) X552 (1000BASE-T)"),
123 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP,
124 "Intel(R) X552 (SFP+)"),
125 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR,
126 "Intel(R) X553 (KR Backplane)"),
127 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L,
128 "Intel(R) X553 L (KR Backplane)"),
129 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP,
130 "Intel(R) X553 (SFP+)"),
131 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N,
132 "Intel(R) X553 N (SFP+)"),
133 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII,
134 "Intel(R) X553 (1GbE SGMII)"),
135 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L,
136 "Intel(R) X553 L (1GbE SGMII)"),
137 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T,
138 "Intel(R) X553/X557-AT (10GBASE-T)"),
139 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T,
140 "Intel(R) X553 (1GbE)"),
141 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L,
142 "Intel(R) X553 L (1GbE)"),
143 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS,
144 "Intel(R) X540-T2 (Bypass)"),
145 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
146 "Intel(R) X520 82599 (Bypass)"),
147 /* required last entry */
148 PVID_END
149 };
150
151 static void *ixgbe_register(device_t);
152 static int ixgbe_if_attach_pre(if_ctx_t);
153 static int ixgbe_if_attach_post(if_ctx_t);
154 static int ixgbe_if_detach(if_ctx_t);
155 static int ixgbe_if_shutdown(if_ctx_t);
156 static int ixgbe_if_suspend(if_ctx_t);
157 static int ixgbe_if_resume(if_ctx_t);
158
159 static void ixgbe_if_stop(if_ctx_t);
160 void ixgbe_if_enable_intr(if_ctx_t);
161 static void ixgbe_if_disable_intr(if_ctx_t);
162 static void ixgbe_link_intr_enable(if_ctx_t);
163 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
164 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
165 static int ixgbe_if_media_change(if_ctx_t);
166 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
167 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
168 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
169 static void ixgbe_if_multi_set(if_ctx_t);
170 static int ixgbe_if_promisc_set(if_ctx_t, int);
171 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
172 int);
173 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
174 int);
175 static void ixgbe_if_queues_free(if_ctx_t);
176 static void ixgbe_if_timer(if_ctx_t, uint16_t);
177 static void ixgbe_if_update_admin_status(if_ctx_t);
178 static void ixgbe_if_vlan_register(if_ctx_t, u16);
179 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
180 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
181 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
182 int ixgbe_intr(void *);
183
184 /************************************************************************
185 * Function prototypes
186 ************************************************************************/
187 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
188
189 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
190 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
191 static void ixgbe_add_device_sysctls(if_ctx_t);
192 static int ixgbe_allocate_pci_resources(if_ctx_t);
193 static int ixgbe_setup_low_power_mode(if_ctx_t);
194
195 static void ixgbe_config_dmac(struct ixgbe_softc *);
196 static void ixgbe_configure_ivars(struct ixgbe_softc *);
197 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
198 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
199 static bool ixgbe_sfp_probe(if_ctx_t);
200
201 static void ixgbe_free_pci_resources(if_ctx_t);
202
203 static int ixgbe_msix_link(void *);
204 static int ixgbe_msix_que(void *);
205 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
206 static void ixgbe_initialize_receive_units(if_ctx_t);
207 static void ixgbe_initialize_transmit_units(if_ctx_t);
208
209 static int ixgbe_setup_interface(if_ctx_t);
210 static void ixgbe_init_device_features(struct ixgbe_softc *);
211 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
212 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
213 static void ixgbe_print_fw_version(if_ctx_t);
214 static void ixgbe_add_media_types(if_ctx_t);
215 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
216 static void ixgbe_config_link(if_ctx_t);
217 static void ixgbe_get_slot_info(struct ixgbe_softc *);
218 static void ixgbe_fw_mode_timer(void *);
219 static void ixgbe_check_wol_support(struct ixgbe_softc *);
220 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
221 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
222
223 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
224 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
225 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
226 static int ixgbe_get_default_advertise(struct ixgbe_softc *);
227 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
228 static void ixgbe_config_gpie(struct ixgbe_softc *);
229 static void ixgbe_config_delay_values(struct ixgbe_softc *);
230
231 /* Sysctl handlers */
232 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
233 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
234 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
235 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
236 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
237 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
238 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
239 #ifdef IXGBE_DEBUG
240 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
241 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
242 #endif
243 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
244 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
245 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
246 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
247 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
248 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
249 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
250 static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
251
252 /* Deferred interrupt tasklets */
253 static void ixgbe_handle_msf(void *);
254 static void ixgbe_handle_mod(void *);
255 static void ixgbe_handle_phy(void *);
256
257 /************************************************************************
258 * FreeBSD Device Interface Entry Points
259 ************************************************************************/
260 static device_method_t ix_methods[] = {
261 /* Device interface */
262 DEVMETHOD(device_register, ixgbe_register),
263 DEVMETHOD(device_probe, iflib_device_probe),
264 DEVMETHOD(device_attach, iflib_device_attach),
265 DEVMETHOD(device_detach, iflib_device_detach),
266 DEVMETHOD(device_shutdown, iflib_device_shutdown),
267 DEVMETHOD(device_suspend, iflib_device_suspend),
268 DEVMETHOD(device_resume, iflib_device_resume),
269 #ifdef PCI_IOV
270 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
271 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
272 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
273 #endif /* PCI_IOV */
274 DEVMETHOD_END
275 };
276
277 static driver_t ix_driver = {
278 "ix", ix_methods, sizeof(struct ixgbe_softc),
279 };
280
281 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
282 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
283 MODULE_DEPEND(ix, pci, 1, 1, 1);
284 MODULE_DEPEND(ix, ether, 1, 1, 1);
285 MODULE_DEPEND(ix, iflib, 1, 1, 1);
286
287 static device_method_t ixgbe_if_methods[] = {
288 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
289 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
290 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
291 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
292 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
293 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
294 DEVMETHOD(ifdi_init, ixgbe_if_init),
295 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
296 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
297 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
298 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
299 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
300 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
301 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
302 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
303 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
304 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
305 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
306 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
307 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
308 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
309 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
310 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
311 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
312 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
313 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
314 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
315 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
316 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
317 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
318 #ifdef PCI_IOV
319 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
320 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
321 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
322 #endif /* PCI_IOV */
323 DEVMETHOD_END
324 };
325
326 /*
327 * TUNEABLE PARAMETERS:
328 */
329
330 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
331 "IXGBE driver parameters");
332 static driver_t ixgbe_if_driver = {
333 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
334 };
335
336 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
337 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
338 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
339
340 /* Flow control setting, default to full */
341 static int ixgbe_flow_control = ixgbe_fc_full;
342 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
343 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
344
345 /* Advertise Speed, default to 0 (auto) */
346 static int ixgbe_advertise_speed = 0;
347 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
348 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
349
350 /*
351 * Smart speed setting, default to on
352 * this only works as a compile option
353 * right now as its during attach, set
354 * this to 'ixgbe_smart_speed_off' to
355 * disable.
356 */
357 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
358
359 /*
360 * MSI-X should be the default for best performance,
361 * but this allows it to be forced off for testing.
362 */
363 static int ixgbe_enable_msix = 1;
364 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix,
365 0,
366 "Enable MSI-X interrupts");
367
368 /*
369 * Defining this on will allow the use
370 * of unsupported SFP+ modules, note that
371 * doing so you are on your own :)
372 */
373 static int allow_unsupported_sfp = false;
374 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
375 &allow_unsupported_sfp, 0,
376 "Allow unsupported SFP modules...use at your own risk");
377
378 /*
379 * Not sure if Flow Director is fully baked,
380 * so we'll default to turning it off.
381 */
382 static int ixgbe_enable_fdir = 0;
383 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir,
384 0,
385 "Enable Flow Director");
386
387 /* Receive-Side Scaling */
388 static int ixgbe_enable_rss = 1;
389 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss,
390 0,
391 "Enable Receive-Side Scaling (RSS)");
392
393 /*
394 * AIM: Adaptive Interrupt Moderation
395 * which means that the interrupt rate
396 * is varied over time based on the
397 * traffic for that interrupt vector
398 */
399 static int ixgbe_enable_aim = false;
400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim,
401 0,
402 "Enable adaptive interrupt moderation");
403
404 #if 0
405 /* Keep running tab on them for sanity check */
406 static int ixgbe_total_ports;
407 #endif
408
409 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
410
411 /*
412 * For Flow Director: this is the number of TX packets we sample
413 * for the filter pool, this means every 20th packet will be probed.
414 *
415 * This feature can be disabled by setting this to 0.
416 */
417 static int atr_sample_rate = 20;
418
419 extern struct if_txrx ixgbe_txrx;
420
421 static struct if_shared_ctx ixgbe_sctx_init = {
422 .isc_magic = IFLIB_MAGIC,
423 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
424 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
425 .isc_tx_maxsegsize = PAGE_SIZE,
426 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
427 .isc_tso_maxsegsize = PAGE_SIZE,
428 .isc_rx_maxsize = PAGE_SIZE*4,
429 .isc_rx_nsegments = 1,
430 .isc_rx_maxsegsize = PAGE_SIZE*4,
431 .isc_nfl = 1,
432 .isc_ntxqs = 1,
433 .isc_nrxqs = 1,
434
435 .isc_admin_intrcnt = 1,
436 .isc_vendor_info = ixgbe_vendor_info_array,
437 .isc_driver_version = ixgbe_driver_version,
438 .isc_driver = &ixgbe_if_driver,
439 .isc_flags = IFLIB_TSO_INIT_IP,
440
441 .isc_nrxd_min = {MIN_RXD},
442 .isc_ntxd_min = {MIN_TXD},
443 .isc_nrxd_max = {MAX_RXD},
444 .isc_ntxd_max = {MAX_TXD},
445 .isc_nrxd_default = {DEFAULT_RXD},
446 .isc_ntxd_default = {DEFAULT_TXD},
447 };
448
449 /************************************************************************
450 * ixgbe_if_tx_queues_alloc
451 ************************************************************************/
452 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)453 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
454 int ntxqs, int ntxqsets)
455 {
456 struct ixgbe_softc *sc = iflib_get_softc(ctx);
457 if_softc_ctx_t scctx = sc->shared;
458 struct ix_tx_queue *que;
459 int i, j, error;
460
461 MPASS(sc->num_tx_queues > 0);
462 MPASS(sc->num_tx_queues == ntxqsets);
463 MPASS(ntxqs == 1);
464
465 /* Allocate queue structure memory */
466 sc->tx_queues =
467 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
468 ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO);
469 if (!sc->tx_queues) {
470 device_printf(iflib_get_dev(ctx),
471 "Unable to allocate TX ring memory\n");
472 return (ENOMEM);
473 }
474
475 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
476 struct tx_ring *txr = &que->txr;
477
478 /* In case SR-IOV is enabled, align the index properly */
479 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
480
481 txr->sc = que->sc = sc;
482
483 /* Allocate report status array */
484 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
485 scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
486 if (txr->tx_rsq == NULL) {
487 error = ENOMEM;
488 goto fail;
489 }
490 for (j = 0; j < scctx->isc_ntxd[0]; j++)
491 txr->tx_rsq[j] = QIDX_INVALID;
492 /* get virtual and physical address of the hardware queues */
493 txr->tail = IXGBE_TDT(txr->me);
494 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
495 txr->tx_paddr = paddrs[i];
496
497 txr->bytes = 0;
498 txr->total_packets = 0;
499
500 /* Set the rate at which we sample packets */
501 if (sc->feat_en & IXGBE_FEATURE_FDIR)
502 txr->atr_sample = atr_sample_rate;
503
504 }
505
506 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
507 sc->num_tx_queues);
508
509 return (0);
510
511 fail:
512 ixgbe_if_queues_free(ctx);
513
514 return (error);
515 } /* ixgbe_if_tx_queues_alloc */
516
517 /************************************************************************
518 * ixgbe_if_rx_queues_alloc
519 ************************************************************************/
520 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)521 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
522 int nrxqs, int nrxqsets)
523 {
524 struct ixgbe_softc *sc = iflib_get_softc(ctx);
525 struct ix_rx_queue *que;
526 int i;
527
528 MPASS(sc->num_rx_queues > 0);
529 MPASS(sc->num_rx_queues == nrxqsets);
530 MPASS(nrxqs == 1);
531
532 /* Allocate queue structure memory */
533 sc->rx_queues =
534 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
535 M_IXGBE, M_NOWAIT | M_ZERO);
536 if (!sc->rx_queues) {
537 device_printf(iflib_get_dev(ctx),
538 "Unable to allocate TX ring memory\n");
539 return (ENOMEM);
540 }
541
542 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
543 struct rx_ring *rxr = &que->rxr;
544
545 /* In case SR-IOV is enabled, align the index properly */
546 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
547
548 rxr->sc = que->sc = sc;
549
550 /* get the virtual and physical address of the hw queues */
551 rxr->tail = IXGBE_RDT(rxr->me);
552 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
553 rxr->rx_paddr = paddrs[i];
554 rxr->bytes = 0;
555 rxr->que = que;
556 }
557
558 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
559 sc->num_rx_queues);
560
561 return (0);
562 } /* ixgbe_if_rx_queues_alloc */
563
564 /************************************************************************
565 * ixgbe_if_queues_free
566 ************************************************************************/
567 static void
ixgbe_if_queues_free(if_ctx_t ctx)568 ixgbe_if_queues_free(if_ctx_t ctx)
569 {
570 struct ixgbe_softc *sc = iflib_get_softc(ctx);
571 struct ix_tx_queue *tx_que = sc->tx_queues;
572 struct ix_rx_queue *rx_que = sc->rx_queues;
573 int i;
574
575 if (tx_que != NULL) {
576 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
577 struct tx_ring *txr = &tx_que->txr;
578 if (txr->tx_rsq == NULL)
579 break;
580
581 free(txr->tx_rsq, M_IXGBE);
582 txr->tx_rsq = NULL;
583 }
584
585 free(sc->tx_queues, M_IXGBE);
586 sc->tx_queues = NULL;
587 }
588 if (rx_que != NULL) {
589 free(sc->rx_queues, M_IXGBE);
590 sc->rx_queues = NULL;
591 }
592 } /* ixgbe_if_queues_free */
593
594 /************************************************************************
595 * ixgbe_initialize_rss_mapping
596 ************************************************************************/
597 static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc * sc)598 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
599 {
600 struct ixgbe_hw *hw = &sc->hw;
601 u32 reta = 0, mrqc, rss_key[10];
602 int queue_id, table_size, index_mult;
603 int i, j;
604 u32 rss_hash_config;
605
606 if (sc->feat_en & IXGBE_FEATURE_RSS) {
607 /* Fetch the configured RSS key */
608 rss_getkey((uint8_t *)&rss_key);
609 } else {
610 /* set up random bits */
611 arc4rand(&rss_key, sizeof(rss_key), 0);
612 }
613
614 /* Set multiplier for RETA setup and table size based on MAC */
615 index_mult = 0x1;
616 table_size = 128;
617 switch (sc->hw.mac.type) {
618 case ixgbe_mac_82598EB:
619 index_mult = 0x11;
620 break;
621 case ixgbe_mac_X550:
622 case ixgbe_mac_X550EM_x:
623 case ixgbe_mac_X550EM_a:
624 table_size = 512;
625 break;
626 default:
627 break;
628 }
629
630 /* Set up the redirection table */
631 for (i = 0, j = 0; i < table_size; i++, j++) {
632 if (j == sc->num_rx_queues)
633 j = 0;
634
635 if (sc->feat_en & IXGBE_FEATURE_RSS) {
636 /*
637 * Fetch the RSS bucket id for the given indirection
638 * entry. Cap it at the number of configured buckets
639 * (which is num_rx_queues.)
640 */
641 queue_id = rss_get_indirection_to_bucket(i);
642 queue_id = queue_id % sc->num_rx_queues;
643 } else
644 queue_id = (j * index_mult);
645
646 /*
647 * The low 8 bits are for hash value (n+0);
648 * The next 8 bits are for hash value (n+1), etc.
649 */
650 reta = reta >> 8;
651 reta = reta | (((uint32_t)queue_id) << 24);
652 if ((i & 3) == 3) {
653 if (i < 128)
654 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
655 else
656 IXGBE_WRITE_REG(hw,
657 IXGBE_ERETA((i >> 2) - 32), reta);
658 reta = 0;
659 }
660 }
661
662 /* Now fill our hash function seeds */
663 for (i = 0; i < 10; i++)
664 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
665
666 /* Perform hash on these packet types */
667 if (sc->feat_en & IXGBE_FEATURE_RSS)
668 rss_hash_config = rss_gethashconfig();
669 else {
670 /*
671 * Disable UDP - IP fragments aren't currently being handled
672 * and so we end up with a mix of 2-tuple and 4-tuple
673 * traffic.
674 */
675 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
676 RSS_HASHTYPE_RSS_TCP_IPV4 |
677 RSS_HASHTYPE_RSS_IPV6 |
678 RSS_HASHTYPE_RSS_TCP_IPV6 |
679 RSS_HASHTYPE_RSS_IPV6_EX |
680 RSS_HASHTYPE_RSS_TCP_IPV6_EX;
681 }
682
683 mrqc = IXGBE_MRQC_RSSEN;
684 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
685 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
686 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
687 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
688 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
689 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
690 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
691 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
692 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
693 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
694 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
695 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
696 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
697 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
698 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
699 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
700 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
701 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
702 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
703 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
704 } /* ixgbe_initialize_rss_mapping */
705
706 /************************************************************************
707 * ixgbe_initialize_receive_units - Setup receive registers and features.
708 ************************************************************************/
709 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
710
711 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)712 ixgbe_initialize_receive_units(if_ctx_t ctx)
713 {
714 struct ixgbe_softc *sc = iflib_get_softc(ctx);
715 if_softc_ctx_t scctx = sc->shared;
716 struct ixgbe_hw *hw = &sc->hw;
717 if_t ifp = iflib_get_ifp(ctx);
718 struct ix_rx_queue *que;
719 int i, j;
720 u32 bufsz, fctrl, srrctl, rxcsum;
721 u32 hlreg;
722
723 /*
724 * Make sure receives are disabled while
725 * setting up the descriptor ring
726 */
727 ixgbe_disable_rx(hw);
728
729 /* Enable broadcasts */
730 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
731 fctrl |= IXGBE_FCTRL_BAM;
732 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
733 fctrl |= IXGBE_FCTRL_DPF;
734 fctrl |= IXGBE_FCTRL_PMCF;
735 }
736 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
737
738 /* Set for Jumbo Frames? */
739 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
740 if (if_getmtu(ifp) > ETHERMTU)
741 hlreg |= IXGBE_HLREG0_JUMBOEN;
742 else
743 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
744 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
745
746 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
747 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
748
749 /* Setup the Base and Length of the Rx Descriptor Ring */
750 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
751 struct rx_ring *rxr = &que->rxr;
752 u64 rdba = rxr->rx_paddr;
753
754 j = rxr->me;
755
756 /* Setup the Base and Length of the Rx Descriptor Ring */
757 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
758 (rdba & 0x00000000ffffffffULL));
759 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
760 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
761 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
762
763 /* Set up the SRRCTL register */
764 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
765 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
766 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
767 srrctl |= bufsz;
768 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
769
770 /*
771 * Set DROP_EN iff we have no flow control and >1 queue.
772 * Note that srrctl was cleared shortly before during reset,
773 * so we do not need to clear the bit, but do it just in case
774 * this code is moved elsewhere.
775 */
776 if (sc->num_rx_queues > 1 &&
777 sc->hw.fc.requested_mode == ixgbe_fc_none) {
778 srrctl |= IXGBE_SRRCTL_DROP_EN;
779 } else {
780 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
781 }
782
783 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
784
785 /* Setup the HW Rx Head and Tail Descriptor Pointers */
786 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
787 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
788
789 /* Set the driver rx tail address */
790 rxr->tail = IXGBE_RDT(rxr->me);
791 }
792
793 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
794 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
795 IXGBE_PSRTYPE_UDPHDR |
796 IXGBE_PSRTYPE_IPV4HDR |
797 IXGBE_PSRTYPE_IPV6HDR;
798 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
799 }
800
801 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
802
803 ixgbe_initialize_rss_mapping(sc);
804
805 if (sc->feat_en & IXGBE_FEATURE_RSS) {
806 /* RSS and RX IPP Checksum are mutually exclusive */
807 rxcsum |= IXGBE_RXCSUM_PCSD;
808 }
809
810 if (if_getcapenable(ifp) & IFCAP_RXCSUM)
811 rxcsum |= IXGBE_RXCSUM_PCSD;
812
813 /* This is useful for calculating UDP/IP fragment checksums */
814 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
815 rxcsum |= IXGBE_RXCSUM_IPPCSE;
816
817 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
818
819 } /* ixgbe_initialize_receive_units */
820
821 /************************************************************************
822 * ixgbe_initialize_transmit_units - Enable transmit units.
823 ************************************************************************/
824 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)825 ixgbe_initialize_transmit_units(if_ctx_t ctx)
826 {
827 struct ixgbe_softc *sc = iflib_get_softc(ctx);
828 struct ixgbe_hw *hw = &sc->hw;
829 if_softc_ctx_t scctx = sc->shared;
830 struct ix_tx_queue *que;
831 int i;
832
833 /* Setup the Base and Length of the Tx Descriptor Ring */
834 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
835 i++, que++) {
836 struct tx_ring *txr = &que->txr;
837 u64 tdba = txr->tx_paddr;
838 u32 txctrl = 0;
839 int j = txr->me;
840
841 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
842 (tdba & 0x00000000ffffffffULL));
843 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
844 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
845 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
846
847 /* Setup the HW Tx Head and Tail descriptor pointers */
848 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
849 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
850
851 /* Cache the tail address */
852 txr->tail = IXGBE_TDT(txr->me);
853
854 txr->tx_rs_cidx = txr->tx_rs_pidx;
855 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
856 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
857 txr->tx_rsq[k] = QIDX_INVALID;
858
859 /* Disable Head Writeback */
860 /*
861 * Note: for X550 series devices, these registers are actually
862 * prefixed with TPH_ isntead of DCA_, but the addresses and
863 * fields remain the same.
864 */
865 switch (hw->mac.type) {
866 case ixgbe_mac_82598EB:
867 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
868 break;
869 default:
870 txctrl =
871 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
872 break;
873 }
874 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
875 switch (hw->mac.type) {
876 case ixgbe_mac_82598EB:
877 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
878 break;
879 default:
880 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
881 txctrl);
882 break;
883 }
884
885 }
886
887 if (hw->mac.type != ixgbe_mac_82598EB) {
888 u32 dmatxctl, rttdcs;
889
890 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
891 dmatxctl |= IXGBE_DMATXCTL_TE;
892 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
893 /* Disable arbiter to set MTQC */
894 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
895 rttdcs |= IXGBE_RTTDCS_ARBDIS;
896 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
897 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
898 ixgbe_get_mtqc(sc->iov_mode));
899 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
900 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
901 }
902
903 } /* ixgbe_initialize_transmit_units */
904
905 /************************************************************************
906 * ixgbe_register
907 ************************************************************************/
908 static void *
ixgbe_register(device_t dev)909 ixgbe_register(device_t dev)
910 {
911 return (&ixgbe_sctx_init);
912 } /* ixgbe_register */
913
914 /************************************************************************
915 * ixgbe_if_attach_pre - Device initialization routine, part 1
916 *
917 * Called when the driver is being loaded.
918 * Identifies the type of hardware, initializes the hardware,
919 * and initializes iflib structures.
920 *
921 * return 0 on success, positive on failure
922 ************************************************************************/
923 static int
ixgbe_if_attach_pre(if_ctx_t ctx)924 ixgbe_if_attach_pre(if_ctx_t ctx)
925 {
926 struct ixgbe_softc *sc;
927 device_t dev;
928 if_softc_ctx_t scctx;
929 struct ixgbe_hw *hw;
930 int error = 0;
931 u32 ctrl_ext;
932 size_t i;
933
934 INIT_DEBUGOUT("ixgbe_attach: begin");
935
936 /* Allocate, clear, and link in our adapter structure */
937 dev = iflib_get_dev(ctx);
938 sc = iflib_get_softc(ctx);
939 sc->hw.back = sc;
940 sc->ctx = ctx;
941 sc->dev = dev;
942 scctx = sc->shared = iflib_get_softc_ctx(ctx);
943 sc->media = iflib_get_media(ctx);
944 hw = &sc->hw;
945
946 /* Determine hardware revision */
947 hw->vendor_id = pci_get_vendor(dev);
948 hw->device_id = pci_get_device(dev);
949 hw->revision_id = pci_get_revid(dev);
950 hw->subsystem_vendor_id = pci_get_subvendor(dev);
951 hw->subsystem_device_id = pci_get_subdevice(dev);
952
953 /* Do base PCI setup - map BAR0 */
954 if (ixgbe_allocate_pci_resources(ctx)) {
955 device_printf(dev, "Allocation of PCI resources failed\n");
956 return (ENXIO);
957 }
958
959 /* let hardware know driver is loaded */
960 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
961 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
962 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
963
964 /*
965 * Initialize the shared code
966 */
967 if (ixgbe_init_shared_code(hw) != 0) {
968 device_printf(dev, "Unable to initialize the shared code\n");
969 error = ENXIO;
970 goto err_pci;
971 }
972
973 if (hw->mac.ops.fw_recovery_mode &&
974 hw->mac.ops.fw_recovery_mode(hw)) {
975 device_printf(dev,
976 "Firmware recovery mode detected. Limiting "
977 "functionality.\nRefer to the Intel(R) Ethernet Adapters "
978 "and Devices User Guide for details on firmware recovery "
979 "mode.");
980 error = ENOSYS;
981 goto err_pci;
982 }
983
984 /* 82598 Does not support SR-IOV, initialize everything else */
985 if (hw->mac.type >= ixgbe_mac_82599_vf) {
986 for (i = 0; i < sc->num_vfs; i++)
987 hw->mbx.ops[i].init_params(hw);
988 }
989
990 hw->allow_unsupported_sfp = allow_unsupported_sfp;
991
992 if (hw->mac.type != ixgbe_mac_82598EB)
993 hw->phy.smart_speed = ixgbe_smart_speed;
994
995 ixgbe_init_device_features(sc);
996
997 /* Enable WoL (if supported) */
998 ixgbe_check_wol_support(sc);
999
1000 /* Verify adapter fan is still functional (if applicable) */
1001 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1002 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1003 ixgbe_check_fan_failure(sc, esdp, false);
1004 }
1005
1006 /* Ensure SW/FW semaphore is free */
1007 ixgbe_init_swfw_semaphore(hw);
1008
1009 /* Set an initial default flow control value */
1010 hw->fc.requested_mode = ixgbe_flow_control;
1011
1012 hw->phy.reset_if_overtemp = true;
1013 error = ixgbe_reset_hw(hw);
1014 hw->phy.reset_if_overtemp = false;
1015 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
1016 /*
1017 * No optics in this port, set up
1018 * so the timer routine will probe
1019 * for later insertion.
1020 */
1021 sc->sfp_probe = true;
1022 error = 0;
1023 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1024 device_printf(dev, "Unsupported SFP+ module detected!\n");
1025 error = EIO;
1026 goto err_pci;
1027 } else if (error) {
1028 device_printf(dev, "Hardware initialization failed\n");
1029 error = EIO;
1030 goto err_pci;
1031 }
1032
1033 /* Make sure we have a good EEPROM before we read from it */
1034 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1035 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
1036 error = EIO;
1037 goto err_pci;
1038 }
1039
1040 error = ixgbe_start_hw(hw);
1041 switch (error) {
1042 case IXGBE_ERR_EEPROM_VERSION:
1043 device_printf(dev,
1044 "This device is a pre-production adapter/LOM. Please be"
1045 " aware there may be issues associated with your"
1046 " hardware.\nIf you are experiencing problems please"
1047 " contact your Intel or hardware representative who"
1048 " provided you with this hardware.\n");
1049 break;
1050 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1051 device_printf(dev, "Unsupported SFP+ Module\n");
1052 error = EIO;
1053 goto err_pci;
1054 case IXGBE_ERR_SFP_NOT_PRESENT:
1055 device_printf(dev, "No SFP+ Module found\n");
1056 /* falls thru */
1057 default:
1058 break;
1059 }
1060
1061 /* Most of the iflib initialization... */
1062
1063 iflib_set_mac(ctx, hw->mac.addr);
1064 switch (sc->hw.mac.type) {
1065 case ixgbe_mac_X550:
1066 case ixgbe_mac_X550EM_x:
1067 case ixgbe_mac_X550EM_a:
1068 scctx->isc_rss_table_size = 512;
1069 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1070 break;
1071 default:
1072 scctx->isc_rss_table_size = 128;
1073 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1074 }
1075
1076 /* Allow legacy interrupts */
1077 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1078
1079 scctx->isc_txqsizes[0] =
1080 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1081 sizeof(u32), DBA_ALIGN),
1082 scctx->isc_rxqsizes[0] =
1083 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1084 DBA_ALIGN);
1085
1086 /* XXX */
1087 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1088 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1089 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1090 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1091 } else {
1092 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1093 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1094 }
1095
1096 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1097
1098 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1099 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1100 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1101
1102 scctx->isc_txrx = &ixgbe_txrx;
1103
1104 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1105
1106 return (0);
1107
1108 err_pci:
1109 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1110 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1111 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1112 ixgbe_free_pci_resources(ctx);
1113
1114 return (error);
1115 } /* ixgbe_if_attach_pre */
1116
1117 /*********************************************************************
1118 * ixgbe_if_attach_post - Device initialization routine, part 2
1119 *
1120 * Called during driver load, but after interrupts and
1121 * resources have been allocated and configured.
1122 * Sets up some data structures not relevant to iflib.
1123 *
1124 * return 0 on success, positive on failure
1125 *********************************************************************/
1126 static int
ixgbe_if_attach_post(if_ctx_t ctx)1127 ixgbe_if_attach_post(if_ctx_t ctx)
1128 {
1129 device_t dev;
1130 struct ixgbe_softc *sc;
1131 struct ixgbe_hw *hw;
1132 int error = 0;
1133
1134 dev = iflib_get_dev(ctx);
1135 sc = iflib_get_softc(ctx);
1136 hw = &sc->hw;
1137
1138 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1139 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1140 device_printf(dev, "Device does not support legacy interrupts");
1141 error = ENXIO;
1142 goto err;
1143 }
1144
1145 /* Allocate multicast array memory. */
1146 sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
1147 M_IXGBE, M_NOWAIT);
1148 if (sc->mta == NULL) {
1149 device_printf(dev,
1150 "Can not allocate multicast setup array\n");
1151 error = ENOMEM;
1152 goto err;
1153 }
1154
1155 /* hw.ix defaults init */
1156 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1157
1158 /* Enable the optics for 82599 SFP+ fiber */
1159 ixgbe_enable_tx_laser(hw);
1160
1161 /* Enable power to the phy. */
1162 ixgbe_set_phy_power(hw, true);
1163
1164 ixgbe_initialize_iov(sc);
1165
1166 error = ixgbe_setup_interface(ctx);
1167 if (error) {
1168 device_printf(dev, "Interface setup failed: %d\n", error);
1169 goto err;
1170 }
1171
1172 ixgbe_if_update_admin_status(ctx);
1173
1174 /* Initialize statistics */
1175 ixgbe_update_stats_counters(sc);
1176 ixgbe_add_hw_stats(sc);
1177
1178 /* Check PCIE slot type/speed/width */
1179 ixgbe_get_slot_info(sc);
1180
1181 /*
1182 * Do time init and sysctl init here, but
1183 * only on the first port of a bypass sc.
1184 */
1185 ixgbe_bypass_init(sc);
1186
1187 /* Display NVM and Option ROM versions */
1188 ixgbe_print_fw_version(ctx);
1189
1190 /* Set an initial dmac value */
1191 sc->dmac = 0;
1192 /* Set initial advertised speeds (if applicable) */
1193 sc->advertise = ixgbe_get_default_advertise(sc);
1194
1195 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1196 ixgbe_define_iov_schemas(dev, &error);
1197
1198 /* Add sysctls */
1199 ixgbe_add_device_sysctls(ctx);
1200
1201 /* Init recovery mode timer and state variable */
1202 if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1203 sc->recovery_mode = 0;
1204
1205 /* Set up the timer callout */
1206 callout_init(&sc->fw_mode_timer, true);
1207
1208 /* Start the task */
1209 callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1210 }
1211
1212 return (0);
1213 err:
1214 return (error);
1215 } /* ixgbe_if_attach_post */
1216
1217 /************************************************************************
1218 * ixgbe_check_wol_support
1219 *
1220 * Checks whether the adapter's ports are capable of
1221 * Wake On LAN by reading the adapter's NVM.
1222 *
1223 * Sets each port's hw->wol_enabled value depending
1224 * on the value read here.
1225 ************************************************************************/
1226 static void
ixgbe_check_wol_support(struct ixgbe_softc * sc)1227 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1228 {
1229 struct ixgbe_hw *hw = &sc->hw;
1230 u16 dev_caps = 0;
1231
1232 /* Find out WoL support for port */
1233 sc->wol_support = hw->wol_enabled = 0;
1234 ixgbe_get_device_caps(hw, &dev_caps);
1235 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1236 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1237 hw->bus.func == 0))
1238 sc->wol_support = hw->wol_enabled = 1;
1239
1240 /* Save initial wake up filter configuration */
1241 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1242
1243 return;
1244 } /* ixgbe_check_wol_support */
1245
1246 /************************************************************************
1247 * ixgbe_setup_interface
1248 *
1249 * Setup networking device structure and register an interface.
1250 ************************************************************************/
1251 static int
ixgbe_setup_interface(if_ctx_t ctx)1252 ixgbe_setup_interface(if_ctx_t ctx)
1253 {
1254 if_t ifp = iflib_get_ifp(ctx);
1255 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1256
1257 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1258
1259 if_setbaudrate(ifp, IF_Gbps(10));
1260
1261 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1262
1263 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1264
1265 ixgbe_add_media_types(ctx);
1266
1267 /* Autoselect media by default */
1268 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1269
1270 return (0);
1271 } /* ixgbe_setup_interface */
1272
1273 /************************************************************************
1274 * ixgbe_if_get_counter
1275 ************************************************************************/
1276 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1277 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1278 {
1279 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1280 if_t ifp = iflib_get_ifp(ctx);
1281
1282 switch (cnt) {
1283 case IFCOUNTER_IPACKETS:
1284 return (sc->ipackets);
1285 case IFCOUNTER_OPACKETS:
1286 return (sc->opackets);
1287 case IFCOUNTER_IBYTES:
1288 return (sc->ibytes);
1289 case IFCOUNTER_OBYTES:
1290 return (sc->obytes);
1291 case IFCOUNTER_IMCASTS:
1292 return (sc->imcasts);
1293 case IFCOUNTER_OMCASTS:
1294 return (sc->omcasts);
1295 case IFCOUNTER_COLLISIONS:
1296 return (0);
1297 case IFCOUNTER_IQDROPS:
1298 return (sc->iqdrops);
1299 case IFCOUNTER_OQDROPS:
1300 return (0);
1301 case IFCOUNTER_IERRORS:
1302 return (sc->ierrors);
1303 default:
1304 return (if_get_counter_default(ifp, cnt));
1305 }
1306 } /* ixgbe_if_get_counter */
1307
1308 /************************************************************************
1309 * ixgbe_if_i2c_req
1310 ************************************************************************/
1311 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1312 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1313 {
1314 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1315 struct ixgbe_hw *hw = &sc->hw;
1316 int i;
1317
1318 if (hw->phy.ops.read_i2c_byte == NULL)
1319 return (ENXIO);
1320 for (i = 0; i < req->len; i++)
1321 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1322 req->dev_addr, &req->data[i]);
1323 return (0);
1324 } /* ixgbe_if_i2c_req */
1325
1326 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
1327 * reinitialized
1328 * @ctx: iflib context
1329 * @event: event code to check
1330 *
1331 * Defaults to returning false for unknown events.
1332 *
1333 * @returns true if iflib needs to reinit the interface
1334 */
1335 static bool
ixgbe_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1336 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1337 {
1338 switch (event) {
1339 case IFLIB_RESTART_VLAN_CONFIG:
1340 default:
1341 return (false);
1342 }
1343 }
1344
1345 /************************************************************************
1346 * ixgbe_add_media_types
1347 ************************************************************************/
1348 static void
ixgbe_add_media_types(if_ctx_t ctx)1349 ixgbe_add_media_types(if_ctx_t ctx)
1350 {
1351 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1352 struct ixgbe_hw *hw = &sc->hw;
1353 device_t dev = iflib_get_dev(ctx);
1354 u64 layer;
1355
1356 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1357
1358 /* Media types with matching FreeBSD media defines */
1359 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1360 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1361 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1362 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1363 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1364 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1365 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1366 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1367
1368 if (hw->mac.type == ixgbe_mac_X550) {
1369 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1370 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1371 }
1372
1373 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1374 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1375 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1376 NULL);
1377 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1378 }
1379
1380 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1381 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1382 if (hw->phy.multispeed_fiber)
1383 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1384 NULL);
1385 }
1386 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1387 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1388 if (hw->phy.multispeed_fiber)
1389 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1390 NULL);
1391 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1392 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1393 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1394 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1395
1396 #ifdef IFM_ETH_XTYPE
1397 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1398 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1399 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1400 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1401 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1402 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1403 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1404 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1405 #else
1406 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1407 device_printf(dev, "Media supported: 10GbaseKR\n");
1408 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1409 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1410 }
1411 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1412 device_printf(dev, "Media supported: 10GbaseKX4\n");
1413 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1414 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1415 }
1416 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1417 device_printf(dev, "Media supported: 1000baseKX\n");
1418 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1419 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1420 }
1421 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1422 device_printf(dev, "Media supported: 2500baseKX\n");
1423 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1424 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1425 }
1426 #endif
1427 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
1428 device_printf(dev, "Media supported: 1000baseBX\n");
1429 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_BX, 0, NULL);
1430 }
1431
1432 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1433 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1434 0, NULL);
1435 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1436 }
1437
1438 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1439 } /* ixgbe_add_media_types */
1440
1441 /************************************************************************
1442 * ixgbe_is_sfp
1443 ************************************************************************/
1444 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1445 ixgbe_is_sfp(struct ixgbe_hw *hw)
1446 {
1447 switch (hw->mac.type) {
1448 case ixgbe_mac_82598EB:
1449 if (hw->phy.type == ixgbe_phy_nl)
1450 return (true);
1451 return (false);
1452 case ixgbe_mac_82599EB:
1453 switch (hw->mac.ops.get_media_type(hw)) {
1454 case ixgbe_media_type_fiber:
1455 case ixgbe_media_type_fiber_qsfp:
1456 return (true);
1457 default:
1458 return (false);
1459 }
1460 case ixgbe_mac_X550EM_x:
1461 case ixgbe_mac_X550EM_a:
1462 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1463 return (true);
1464 return (false);
1465 default:
1466 return (false);
1467 }
1468 } /* ixgbe_is_sfp */
1469
1470 /************************************************************************
1471 * ixgbe_config_link
1472 ************************************************************************/
1473 static void
ixgbe_config_link(if_ctx_t ctx)1474 ixgbe_config_link(if_ctx_t ctx)
1475 {
1476 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1477 struct ixgbe_hw *hw = &sc->hw;
1478 u32 autoneg, err = 0;
1479 bool sfp, negotiate;
1480
1481 sfp = ixgbe_is_sfp(hw);
1482
1483 if (sfp) {
1484 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1485 iflib_admin_intr_deferred(ctx);
1486 } else {
1487 if (hw->mac.ops.check_link)
1488 err = ixgbe_check_link(hw, &sc->link_speed,
1489 &sc->link_up, false);
1490 if (err)
1491 return;
1492 autoneg = hw->phy.autoneg_advertised;
1493 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1494 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1495 &negotiate);
1496 if (err)
1497 return;
1498
1499 if (hw->mac.type == ixgbe_mac_X550 &&
1500 hw->phy.autoneg_advertised == 0) {
1501 /*
1502 * 2.5G and 5G autonegotiation speeds on X550
1503 * are disabled by default due to reported
1504 * interoperability issues with some switches.
1505 *
1506 * The second condition checks if any operations
1507 * involving setting autonegotiation speeds have
1508 * been performed prior to this ixgbe_config_link()
1509 * call.
1510 *
1511 * If hw->phy.autoneg_advertised does not
1512 * equal 0, this means that the user might have
1513 * set autonegotiation speeds via the sysctl
1514 * before bringing the interface up. In this
1515 * case, we should not disable 2.5G and 5G
1516 * since that speeds might be selected by the
1517 * user.
1518 *
1519 * Otherwise (i.e. if hw->phy.autoneg_advertised
1520 * is set to 0), it is the first time we set
1521 * autonegotiation preferences and the default
1522 * set of speeds should exclude 2.5G and 5G.
1523 */
1524 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1525 IXGBE_LINK_SPEED_5GB_FULL);
1526 }
1527
1528 if (hw->mac.ops.setup_link)
1529 err = hw->mac.ops.setup_link(hw, autoneg,
1530 sc->link_up);
1531 }
1532 } /* ixgbe_config_link */
1533
1534 /************************************************************************
1535 * ixgbe_update_stats_counters - Update board statistics counters.
1536 ************************************************************************/
1537 static void
ixgbe_update_stats_counters(struct ixgbe_softc * sc)1538 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1539 {
1540 struct ixgbe_hw *hw = &sc->hw;
1541 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1542 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1543 u32 lxoffrxc;
1544 u64 total_missed_rx = 0;
1545
1546 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1547 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1548 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1549 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1550 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1551
1552 for (int i = 0; i < 16; i++) {
1553 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1554 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1555 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1556 }
1557 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1558 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1559 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1560
1561 /* Hardware workaround, gprc counts missed packets */
1562 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1563 stats->gprc -= missed_rx;
1564
1565 if (hw->mac.type != ixgbe_mac_82598EB) {
1566 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1567 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1568 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1569 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1570 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1571 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1572 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1573 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1574 stats->lxoffrxc += lxoffrxc;
1575 } else {
1576 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1577 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1578 stats->lxoffrxc += lxoffrxc;
1579 /* 82598 only has a counter in the high register */
1580 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1581 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1582 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1583 }
1584
1585 /*
1586 * For watchdog management we need to know if we have been paused
1587 * during the last interval, so capture that here.
1588 */
1589 if (lxoffrxc)
1590 sc->shared->isc_pause_frames = 1;
1591
1592 /*
1593 * Workaround: mprc hardware is incorrectly counting
1594 * broadcasts, so for now we subtract those.
1595 */
1596 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1597 stats->bprc += bprc;
1598 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1599 if (hw->mac.type == ixgbe_mac_82598EB)
1600 stats->mprc -= bprc;
1601
1602 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1603 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1604 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1605 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1606 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1607 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1608
1609 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1610 stats->lxontxc += lxon;
1611 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1612 stats->lxofftxc += lxoff;
1613 total = lxon + lxoff;
1614
1615 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1616 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1617 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1618 stats->gptc -= total;
1619 stats->mptc -= total;
1620 stats->ptc64 -= total;
1621 stats->gotc -= total * ETHER_MIN_LEN;
1622
1623 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1624 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1625 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1626 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1627 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1628 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1629 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1630 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1631 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1632 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1633 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1634 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1635 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1636 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1637 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1638 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1639 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1640 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1641 /* Only read FCOE on 82599 */
1642 if (hw->mac.type != ixgbe_mac_82598EB) {
1643 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1644 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1645 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1646 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1647 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1648 }
1649
1650 /* Fill out the OS statistics structure */
1651 IXGBE_SET_IPACKETS(sc, stats->gprc);
1652 IXGBE_SET_OPACKETS(sc, stats->gptc);
1653 IXGBE_SET_IBYTES(sc, stats->gorc);
1654 IXGBE_SET_OBYTES(sc, stats->gotc);
1655 IXGBE_SET_IMCASTS(sc, stats->mprc);
1656 IXGBE_SET_OMCASTS(sc, stats->mptc);
1657 IXGBE_SET_COLLISIONS(sc, 0);
1658 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1659
1660 /*
1661 * Aggregate following types of errors as RX errors:
1662 * - CRC error count,
1663 * - illegal byte error count,
1664 * - missed packets count,
1665 * - length error count,
1666 * - undersized packets count,
1667 * - fragmented packets count,
1668 * - oversized packets count,
1669 * - jabber count.
1670 */
1671 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1672 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
1673 stats->roc + stats->rjc);
1674 } /* ixgbe_update_stats_counters */
1675
1676 /************************************************************************
1677 * ixgbe_add_hw_stats
1678 *
1679 * Add sysctl variables, one per statistic, to the system.
1680 ************************************************************************/
1681 static void
ixgbe_add_hw_stats(struct ixgbe_softc * sc)1682 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1683 {
1684 device_t dev = iflib_get_dev(sc->ctx);
1685 struct ix_rx_queue *rx_que;
1686 struct ix_tx_queue *tx_que;
1687 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1688 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1689 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1690 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1691 struct sysctl_oid *stat_node, *queue_node;
1692 struct sysctl_oid_list *stat_list, *queue_list;
1693 int i;
1694
1695 #define QUEUE_NAME_LEN 32
1696 char namebuf[QUEUE_NAME_LEN];
1697
1698 /* Driver Statistics */
1699 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1700 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
1701 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1702 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1703 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1704 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1705
1706 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
1707 i++, tx_que++) {
1708 struct tx_ring *txr = &tx_que->txr;
1709 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1710 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1711 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1712 queue_list = SYSCTL_CHILDREN(queue_node);
1713
1714 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1715 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1716 ixgbe_sysctl_tdh_handler, "IU",
1717 "Transmit Descriptor Head");
1718 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1719 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
1720 ixgbe_sysctl_tdt_handler, "IU",
1721 "Transmit Descriptor Tail");
1722 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1723 CTLFLAG_RD, &txr->tso_tx, "TSO");
1724 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1725 CTLFLAG_RD, &txr->total_packets,
1726 "Queue Packets Transmitted");
1727 }
1728
1729 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
1730 i++, rx_que++) {
1731 struct rx_ring *rxr = &rx_que->rxr;
1732 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1733 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1734 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1735 queue_list = SYSCTL_CHILDREN(queue_node);
1736
1737 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1738 CTLTYPE_UINT | CTLFLAG_RW,
1739 &sc->rx_queues[i], 0,
1740 ixgbe_sysctl_interrupt_rate_handler, "IU",
1741 "Interrupt Rate");
1742 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1743 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
1744 "irqs on this queue");
1745 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1746 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1747 ixgbe_sysctl_rdh_handler, "IU",
1748 "Receive Descriptor Head");
1749 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1750 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
1751 ixgbe_sysctl_rdt_handler, "IU",
1752 "Receive Descriptor Tail");
1753 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1754 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1755 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1756 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1757 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1758 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1759 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1760 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1761 }
1762
1763 /* MAC stats get their own sub node */
1764 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1765 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
1766 stat_list = SYSCTL_CHILDREN(stat_node);
1767
1768 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
1769 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
1770 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1771 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1772 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1773 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1774 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1775 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1776 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1777 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1778 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1779 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1780 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1781 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1782 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1783 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1784 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1785 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1786
1787 /* Flow Control stats */
1788 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1789 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1790 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1791 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1792 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1793 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1794 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1795 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1796
1797 /* Packet Reception Stats */
1798 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1799 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1800 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1801 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1802 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1803 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1804 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1805 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1806 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1807 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1808 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1809 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1810 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1811 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1812 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1813 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1814 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1815 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1816 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1817 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1818 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1819 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1820 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1821 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1822 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1823 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1824 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1825 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1826 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1827 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1828 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1829 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1830 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1831 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1832 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1833 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1834 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1835 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1836
1837 /* Packet Transmission Stats */
1838 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1839 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1840 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1841 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1842 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1843 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1844 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1845 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1846 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1847 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1848 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1849 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1850 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1851 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1852 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1853 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1854 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1855 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1856 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1857 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1858 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1859 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1860 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1861 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1862 } /* ixgbe_add_hw_stats */
1863
1864 /************************************************************************
1865 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1866 *
1867 * Retrieves the TDH value from the hardware
1868 ************************************************************************/
1869 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)1870 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1871 {
1872 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1873 int error;
1874 unsigned int val;
1875
1876 if (!txr)
1877 return (0);
1878
1879
1880 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1881 return (EPERM);
1882
1883 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
1884 error = sysctl_handle_int(oidp, &val, 0, req);
1885 if (error || !req->newptr)
1886 return error;
1887
1888 return (0);
1889 } /* ixgbe_sysctl_tdh_handler */
1890
1891 /************************************************************************
1892 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1893 *
1894 * Retrieves the TDT value from the hardware
1895 ************************************************************************/
1896 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)1897 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1898 {
1899 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1900 int error;
1901 unsigned int val;
1902
1903 if (!txr)
1904 return (0);
1905
1906 if (atomic_load_acq_int(&txr->sc->recovery_mode))
1907 return (EPERM);
1908
1909 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
1910 error = sysctl_handle_int(oidp, &val, 0, req);
1911 if (error || !req->newptr)
1912 return error;
1913
1914 return (0);
1915 } /* ixgbe_sysctl_tdt_handler */
1916
1917 /************************************************************************
1918 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1919 *
1920 * Retrieves the RDH value from the hardware
1921 ************************************************************************/
1922 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)1923 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1924 {
1925 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1926 int error;
1927 unsigned int val;
1928
1929 if (!rxr)
1930 return (0);
1931
1932 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1933 return (EPERM);
1934
1935 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
1936 error = sysctl_handle_int(oidp, &val, 0, req);
1937 if (error || !req->newptr)
1938 return error;
1939
1940 return (0);
1941 } /* ixgbe_sysctl_rdh_handler */
1942
1943 /************************************************************************
1944 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1945 *
1946 * Retrieves the RDT value from the hardware
1947 ************************************************************************/
1948 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)1949 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1950 {
1951 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1952 int error;
1953 unsigned int val;
1954
1955 if (!rxr)
1956 return (0);
1957
1958 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
1959 return (EPERM);
1960
1961 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
1962 error = sysctl_handle_int(oidp, &val, 0, req);
1963 if (error || !req->newptr)
1964 return error;
1965
1966 return (0);
1967 } /* ixgbe_sysctl_rdt_handler */
1968
1969 /************************************************************************
1970 * ixgbe_if_vlan_register
1971 *
1972 * Run via vlan config EVENT, it enables us to use the
1973 * HW Filter table since we can get the vlan id. This
1974 * just creates the entry in the soft version of the
1975 * VFTA, init will repopulate the real table.
1976 ************************************************************************/
1977 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)1978 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
1979 {
1980 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1981 u16 index, bit;
1982
1983 index = (vtag >> 5) & 0x7F;
1984 bit = vtag & 0x1F;
1985 sc->shadow_vfta[index] |= (1 << bit);
1986 ++sc->num_vlans;
1987 ixgbe_setup_vlan_hw_support(ctx);
1988 } /* ixgbe_if_vlan_register */
1989
1990 /************************************************************************
1991 * ixgbe_if_vlan_unregister
1992 *
1993 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1994 ************************************************************************/
1995 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)1996 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1997 {
1998 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1999 u16 index, bit;
2000
2001 index = (vtag >> 5) & 0x7F;
2002 bit = vtag & 0x1F;
2003 sc->shadow_vfta[index] &= ~(1 << bit);
2004 --sc->num_vlans;
2005 /* Re-init to load the changes */
2006 ixgbe_setup_vlan_hw_support(ctx);
2007 } /* ixgbe_if_vlan_unregister */
2008
2009 /************************************************************************
2010 * ixgbe_setup_vlan_hw_support
2011 ************************************************************************/
2012 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)2013 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
2014 {
2015 if_t ifp = iflib_get_ifp(ctx);
2016 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2017 struct ixgbe_hw *hw = &sc->hw;
2018 struct rx_ring *rxr;
2019 int i;
2020 u32 ctrl;
2021
2022
2023 /*
2024 * We get here thru init_locked, meaning
2025 * a soft reset, this has already cleared
2026 * the VFTA and other state, so if there
2027 * have been no vlan's registered do nothing.
2028 */
2029 if (sc->num_vlans == 0 ||
2030 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
2031 /* Clear the vlan hw flag */
2032 for (i = 0; i < sc->num_rx_queues; i++) {
2033 rxr = &sc->rx_queues[i].rxr;
2034 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2035 if (hw->mac.type != ixgbe_mac_82598EB) {
2036 ctrl = IXGBE_READ_REG(hw,
2037 IXGBE_RXDCTL(rxr->me));
2038 ctrl &= ~IXGBE_RXDCTL_VME;
2039 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2040 ctrl);
2041 }
2042 rxr->vtag_strip = false;
2043 }
2044 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2045 /* Enable the Filter Table if enabled */
2046 ctrl |= IXGBE_VLNCTRL_CFIEN;
2047 ctrl &= ~IXGBE_VLNCTRL_VFE;
2048 if (hw->mac.type == ixgbe_mac_82598EB)
2049 ctrl &= ~IXGBE_VLNCTRL_VME;
2050 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2051 return;
2052 }
2053
2054 /* Setup the queues for vlans */
2055 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
2056 for (i = 0; i < sc->num_rx_queues; i++) {
2057 rxr = &sc->rx_queues[i].rxr;
2058 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2059 if (hw->mac.type != ixgbe_mac_82598EB) {
2060 ctrl = IXGBE_READ_REG(hw,
2061 IXGBE_RXDCTL(rxr->me));
2062 ctrl |= IXGBE_RXDCTL_VME;
2063 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2064 ctrl);
2065 }
2066 rxr->vtag_strip = true;
2067 }
2068 }
2069
2070 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2071 return;
2072 /*
2073 * A soft reset zero's out the VFTA, so
2074 * we need to repopulate it now.
2075 */
2076 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2077 if (sc->shadow_vfta[i] != 0)
2078 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2079 sc->shadow_vfta[i]);
2080
2081 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2082 /* Enable the Filter Table if enabled */
2083 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2084 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2085 ctrl |= IXGBE_VLNCTRL_VFE;
2086 }
2087 if (hw->mac.type == ixgbe_mac_82598EB)
2088 ctrl |= IXGBE_VLNCTRL_VME;
2089 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2090 } /* ixgbe_setup_vlan_hw_support */
2091
2092 /************************************************************************
2093 * ixgbe_get_slot_info
2094 *
2095 * Get the width and transaction speed of
2096 * the slot this adapter is plugged into.
2097 ************************************************************************/
2098 static void
ixgbe_get_slot_info(struct ixgbe_softc * sc)2099 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2100 {
2101 device_t dev = iflib_get_dev(sc->ctx);
2102 struct ixgbe_hw *hw = &sc->hw;
2103 int bus_info_valid = true;
2104 u32 offset;
2105 u16 link;
2106
2107 /* Some devices are behind an internal bridge */
2108 switch (hw->device_id) {
2109 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2110 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2111 goto get_parent_info;
2112 default:
2113 break;
2114 }
2115
2116 ixgbe_get_bus_info(hw);
2117
2118 /*
2119 * Some devices don't use PCI-E, but there is no need
2120 * to display "Unknown" for bus speed and width.
2121 */
2122 switch (hw->mac.type) {
2123 case ixgbe_mac_X550EM_x:
2124 case ixgbe_mac_X550EM_a:
2125 return;
2126 default:
2127 goto display;
2128 }
2129
2130 get_parent_info:
2131 /*
2132 * For the Quad port adapter we need to parse back
2133 * up the PCI tree to find the speed of the expansion
2134 * slot into which this adapter is plugged. A bit more work.
2135 */
2136 dev = device_get_parent(device_get_parent(dev));
2137 #ifdef IXGBE_DEBUG
2138 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2139 pci_get_slot(dev), pci_get_function(dev));
2140 #endif
2141 dev = device_get_parent(device_get_parent(dev));
2142 #ifdef IXGBE_DEBUG
2143 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2144 pci_get_slot(dev), pci_get_function(dev));
2145 #endif
2146 /* Now get the PCI Express Capabilities offset */
2147 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2148 /*
2149 * Hmm...can't get PCI-Express capabilities.
2150 * Falling back to default method.
2151 */
2152 bus_info_valid = false;
2153 ixgbe_get_bus_info(hw);
2154 goto display;
2155 }
2156 /* ...and read the Link Status Register */
2157 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2158 ixgbe_set_pci_config_data_generic(hw, link);
2159
2160 display:
2161 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
2162 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2163 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2164 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2165 "Unknown"),
2166 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
2167 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
2168 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
2169 "Unknown"));
2170
2171 if (bus_info_valid) {
2172 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2173 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2174 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2175 device_printf(dev,
2176 "PCI-Express bandwidth available for this card"
2177 " is not sufficient for optimal performance.\n");
2178 device_printf(dev,
2179 "For optimal performance a x8 PCIE, or x4 PCIE"
2180 " Gen2 slot is required.\n");
2181 }
2182 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2183 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2184 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2185 device_printf(dev,
2186 "PCI-Express bandwidth available for this card"
2187 " is not sufficient for optimal performance.\n");
2188 device_printf(dev,
2189 "For optimal performance a x8 PCIE Gen3 slot is"
2190 " required.\n");
2191 }
2192 } else
2193 device_printf(dev,
2194 "Unable to determine slot speed/width. The speed/width"
2195 " reported are that of the internal switch.\n");
2196
2197 return;
2198 } /* ixgbe_get_slot_info */
2199
2200 /************************************************************************
2201 * ixgbe_if_msix_intr_assign
2202 *
2203 * Setup MSI-X Interrupt resources and handlers
2204 ************************************************************************/
2205 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)2206 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2207 {
2208 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2209 struct ix_rx_queue *rx_que = sc->rx_queues;
2210 struct ix_tx_queue *tx_que;
2211 int error, rid, vector = 0;
2212 char buf[16];
2213
2214 /* Admin Que is vector 0*/
2215 rid = vector + 1;
2216 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2217 rid = vector + 1;
2218
2219 snprintf(buf, sizeof(buf), "rxq%d", i);
2220 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2221 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
2222 buf);
2223
2224 if (error) {
2225 device_printf(iflib_get_dev(ctx),
2226 "Failed to allocate que int %d err: %d",
2227 i,error);
2228 sc->num_rx_queues = i + 1;
2229 goto fail;
2230 }
2231
2232 rx_que->msix = vector;
2233 }
2234 for (int i = 0; i < sc->num_tx_queues; i++) {
2235 snprintf(buf, sizeof(buf), "txq%d", i);
2236 tx_que = &sc->tx_queues[i];
2237 tx_que->msix = i % sc->num_rx_queues;
2238 iflib_softirq_alloc_generic(ctx,
2239 &sc->rx_queues[tx_que->msix].que_irq,
2240 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2241 }
2242 rid = vector + 1;
2243 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2244 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2245 if (error) {
2246 device_printf(iflib_get_dev(ctx),
2247 "Failed to register admin handler");
2248 return (error);
2249 }
2250
2251 sc->vector = vector;
2252
2253 return (0);
2254 fail:
2255 iflib_irq_free(ctx, &sc->irq);
2256 rx_que = sc->rx_queues;
2257 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2258 iflib_irq_free(ctx, &rx_que->que_irq);
2259
2260 return (error);
2261 } /* ixgbe_if_msix_intr_assign */
2262
2263 static inline void
ixgbe_perform_aim(struct ixgbe_softc * sc,struct ix_rx_queue * que)2264 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2265 {
2266 uint32_t newitr = 0;
2267 struct rx_ring *rxr = &que->rxr;
2268 /* FIXME struct tx_ring *txr = ... ->txr; */
2269
2270 /*
2271 * Do Adaptive Interrupt Moderation:
2272 * - Write out last calculated setting
2273 * - Calculate based on average size over
2274 * the last interval.
2275 */
2276 if (que->eitr_setting) {
2277 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2278 que->eitr_setting);
2279 }
2280
2281 que->eitr_setting = 0;
2282 /* Idle, do nothing */
2283 if (rxr->bytes == 0) {
2284 /* FIXME && txr->bytes == 0 */
2285 return;
2286 }
2287
2288 if ((rxr->bytes) && (rxr->packets))
2289 newitr = rxr->bytes / rxr->packets;
2290 /* FIXME for transmit accounting
2291 * if ((txr->bytes) && (txr->packets))
2292 * newitr = txr->bytes/txr->packets;
2293 * if ((rxr->bytes) && (rxr->packets))
2294 * newitr = max(newitr, (rxr->bytes / rxr->packets));
2295 */
2296
2297 newitr += 24; /* account for hardware frame, crc */
2298 /* set an upper boundary */
2299 newitr = min(newitr, 3000);
2300
2301 /* Be nice to the mid range */
2302 if ((newitr > 300) && (newitr < 1200)) {
2303 newitr = (newitr / 3);
2304 } else {
2305 newitr = (newitr / 2);
2306 }
2307
2308 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2309 newitr |= newitr << 16;
2310 } else {
2311 newitr |= IXGBE_EITR_CNT_WDIS;
2312 }
2313
2314 /* save for next interrupt */
2315 que->eitr_setting = newitr;
2316
2317 /* Reset state */
2318 /* FIXME txr->bytes = 0; */
2319 /* FIXME txr->packets = 0; */
2320 rxr->bytes = 0;
2321 rxr->packets = 0;
2322
2323 return;
2324 }
2325
2326 /*********************************************************************
2327 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2328 **********************************************************************/
2329 static int
ixgbe_msix_que(void * arg)2330 ixgbe_msix_que(void *arg)
2331 {
2332 struct ix_rx_queue *que = arg;
2333 struct ixgbe_softc *sc = que->sc;
2334 if_t ifp = iflib_get_ifp(que->sc->ctx);
2335
2336 /* Protect against spurious interrupts */
2337 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2338 return (FILTER_HANDLED);
2339
2340 ixgbe_disable_queue(sc, que->msix);
2341 ++que->irqs;
2342
2343 /* Check for AIM */
2344 if (sc->enable_aim) {
2345 ixgbe_perform_aim(sc, que);
2346 }
2347
2348 return (FILTER_SCHEDULE_THREAD);
2349 } /* ixgbe_msix_que */
2350
2351 /************************************************************************
2352 * ixgbe_media_status - Media Ioctl callback
2353 *
2354 * Called whenever the user queries the status of
2355 * the interface using ifconfig.
2356 ************************************************************************/
2357 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2358 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2359 {
2360 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2361 struct ixgbe_hw *hw = &sc->hw;
2362 int layer;
2363
2364 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2365
2366 ifmr->ifm_status = IFM_AVALID;
2367 ifmr->ifm_active = IFM_ETHER;
2368
2369 if (!sc->link_active)
2370 return;
2371
2372 ifmr->ifm_status |= IFM_ACTIVE;
2373 layer = sc->phy_layer;
2374
2375 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2376 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2377 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2378 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2379 switch (sc->link_speed) {
2380 case IXGBE_LINK_SPEED_10GB_FULL:
2381 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2382 break;
2383 case IXGBE_LINK_SPEED_1GB_FULL:
2384 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2385 break;
2386 case IXGBE_LINK_SPEED_100_FULL:
2387 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2388 break;
2389 case IXGBE_LINK_SPEED_10_FULL:
2390 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2391 break;
2392 }
2393 if (hw->mac.type == ixgbe_mac_X550)
2394 switch (sc->link_speed) {
2395 case IXGBE_LINK_SPEED_5GB_FULL:
2396 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2397 break;
2398 case IXGBE_LINK_SPEED_2_5GB_FULL:
2399 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2400 break;
2401 }
2402 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2403 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2404 switch (sc->link_speed) {
2405 case IXGBE_LINK_SPEED_10GB_FULL:
2406 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2407 break;
2408 case IXGBE_LINK_SPEED_1GB_FULL:
2409 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2410 break;
2411 }
2412 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2413 switch (sc->link_speed) {
2414 case IXGBE_LINK_SPEED_10GB_FULL:
2415 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2416 break;
2417 case IXGBE_LINK_SPEED_1GB_FULL:
2418 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2419 break;
2420 }
2421 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2422 switch (sc->link_speed) {
2423 case IXGBE_LINK_SPEED_10GB_FULL:
2424 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2425 break;
2426 case IXGBE_LINK_SPEED_1GB_FULL:
2427 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2428 break;
2429 }
2430 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2431 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2432 switch (sc->link_speed) {
2433 case IXGBE_LINK_SPEED_10GB_FULL:
2434 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2435 break;
2436 case IXGBE_LINK_SPEED_1GB_FULL:
2437 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2438 break;
2439 }
2440 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2441 switch (sc->link_speed) {
2442 case IXGBE_LINK_SPEED_10GB_FULL:
2443 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2444 break;
2445 }
2446 /*
2447 * XXX: These need to use the proper media types once
2448 * they're added.
2449 */
2450 #ifndef IFM_ETH_XTYPE
2451 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2452 switch (sc->link_speed) {
2453 case IXGBE_LINK_SPEED_10GB_FULL:
2454 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2455 break;
2456 case IXGBE_LINK_SPEED_2_5GB_FULL:
2457 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2458 break;
2459 case IXGBE_LINK_SPEED_1GB_FULL:
2460 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2461 break;
2462 }
2463 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2464 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2465 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2466 switch (sc->link_speed) {
2467 case IXGBE_LINK_SPEED_10GB_FULL:
2468 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2469 break;
2470 case IXGBE_LINK_SPEED_2_5GB_FULL:
2471 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2472 break;
2473 case IXGBE_LINK_SPEED_1GB_FULL:
2474 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2475 break;
2476 }
2477 #else
2478 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2479 switch (sc->link_speed) {
2480 case IXGBE_LINK_SPEED_10GB_FULL:
2481 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2482 break;
2483 case IXGBE_LINK_SPEED_2_5GB_FULL:
2484 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2485 break;
2486 case IXGBE_LINK_SPEED_1GB_FULL:
2487 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2488 break;
2489 }
2490 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2491 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2492 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2493 switch (sc->link_speed) {
2494 case IXGBE_LINK_SPEED_10GB_FULL:
2495 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2496 break;
2497 case IXGBE_LINK_SPEED_2_5GB_FULL:
2498 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2499 break;
2500 case IXGBE_LINK_SPEED_1GB_FULL:
2501 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2502 break;
2503 }
2504 #endif
2505
2506 /* If nothing is recognized... */
2507 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2508 ifmr->ifm_active |= IFM_UNKNOWN;
2509
2510 /* Display current flow control setting used on link */
2511 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2512 hw->fc.current_mode == ixgbe_fc_full)
2513 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2514 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2515 hw->fc.current_mode == ixgbe_fc_full)
2516 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2517 } /* ixgbe_media_status */
2518
2519 /************************************************************************
2520 * ixgbe_media_change - Media Ioctl callback
2521 *
2522 * Called when the user changes speed/duplex using
2523 * media/mediopt option with ifconfig.
2524 ************************************************************************/
2525 static int
ixgbe_if_media_change(if_ctx_t ctx)2526 ixgbe_if_media_change(if_ctx_t ctx)
2527 {
2528 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2529 struct ifmedia *ifm = iflib_get_media(ctx);
2530 struct ixgbe_hw *hw = &sc->hw;
2531 ixgbe_link_speed speed = 0;
2532
2533 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2534
2535 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2536 return (EINVAL);
2537
2538 if (hw->phy.media_type == ixgbe_media_type_backplane)
2539 return (EPERM);
2540
2541 /*
2542 * We don't actually need to check against the supported
2543 * media types of the adapter; ifmedia will take care of
2544 * that for us.
2545 */
2546 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2547 case IFM_AUTO:
2548 case IFM_10G_T:
2549 speed |= IXGBE_LINK_SPEED_100_FULL;
2550 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2551 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2552 break;
2553 case IFM_10G_LRM:
2554 case IFM_10G_LR:
2555 #ifndef IFM_ETH_XTYPE
2556 case IFM_10G_SR: /* KR, too */
2557 case IFM_10G_CX4: /* KX4 */
2558 #else
2559 case IFM_10G_KR:
2560 case IFM_10G_KX4:
2561 #endif
2562 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2563 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2564 break;
2565 #ifndef IFM_ETH_XTYPE
2566 case IFM_1000_CX: /* KX */
2567 #else
2568 case IFM_1000_KX:
2569 #endif
2570 case IFM_1000_LX:
2571 case IFM_1000_SX:
2572 case IFM_1000_BX:
2573 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2574 break;
2575 case IFM_1000_T:
2576 speed |= IXGBE_LINK_SPEED_100_FULL;
2577 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2578 break;
2579 case IFM_10G_TWINAX:
2580 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2581 break;
2582 case IFM_5000_T:
2583 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2584 break;
2585 case IFM_2500_T:
2586 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2587 break;
2588 case IFM_100_TX:
2589 speed |= IXGBE_LINK_SPEED_100_FULL;
2590 break;
2591 case IFM_10_T:
2592 speed |= IXGBE_LINK_SPEED_10_FULL;
2593 break;
2594 default:
2595 goto invalid;
2596 }
2597
2598 hw->mac.autotry_restart = true;
2599 hw->mac.ops.setup_link(hw, speed, true);
2600 sc->advertise =
2601 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
2602 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
2603 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2604 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
2605 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
2606 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
2607
2608 return (0);
2609
2610 invalid:
2611 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2612
2613 return (EINVAL);
2614 } /* ixgbe_if_media_change */
2615
2616 /************************************************************************
2617 * ixgbe_set_promisc
2618 ************************************************************************/
2619 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2620 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2621 {
2622 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2623 if_t ifp = iflib_get_ifp(ctx);
2624 u32 rctl;
2625 int mcnt = 0;
2626
2627 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2628 rctl &= (~IXGBE_FCTRL_UPE);
2629 if (if_getflags(ifp) & IFF_ALLMULTI)
2630 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2631 else {
2632 mcnt = min(if_llmaddr_count(ifp),
2633 MAX_NUM_MULTICAST_ADDRESSES);
2634 }
2635 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2636 rctl &= (~IXGBE_FCTRL_MPE);
2637 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2638
2639 if (if_getflags(ifp) & IFF_PROMISC) {
2640 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2641 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2642 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
2643 rctl |= IXGBE_FCTRL_MPE;
2644 rctl &= ~IXGBE_FCTRL_UPE;
2645 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2646 }
2647 return (0);
2648 } /* ixgbe_if_promisc_set */
2649
2650 /************************************************************************
2651 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2652 ************************************************************************/
2653 static int
ixgbe_msix_link(void * arg)2654 ixgbe_msix_link(void *arg)
2655 {
2656 struct ixgbe_softc *sc = arg;
2657 struct ixgbe_hw *hw = &sc->hw;
2658 u32 eicr, eicr_mask;
2659 s32 retval;
2660
2661 ++sc->link_irq;
2662
2663 /* Pause other interrupts */
2664 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2665
2666 /* First get the cause */
2667 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2668 /* Be sure the queue bits are not cleared */
2669 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2670 /* Clear interrupt with write */
2671 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2672
2673 /* Link status change */
2674 if (eicr & IXGBE_EICR_LSC) {
2675 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2676 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2677 }
2678
2679 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2680 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
2681 (eicr & IXGBE_EICR_FLOW_DIR)) {
2682 /* This is probably overkill :) */
2683 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
2684 return (FILTER_HANDLED);
2685 /* Disable the interrupt */
2686 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
2687 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2688 } else
2689 if (eicr & IXGBE_EICR_ECC) {
2690 device_printf(iflib_get_dev(sc->ctx),
2691 "Received ECC Err, initiating reset\n");
2692 hw->mac.flags |=
2693 ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2694 ixgbe_reset_hw(hw);
2695 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2696 IXGBE_EICR_ECC);
2697 }
2698
2699 /* Check for over temp condition */
2700 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2701 switch (sc->hw.mac.type) {
2702 case ixgbe_mac_X550EM_a:
2703 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2704 break;
2705 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2706 IXGBE_EICR_GPI_SDP0_X550EM_a);
2707 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2708 IXGBE_EICR_GPI_SDP0_X550EM_a);
2709 retval = hw->phy.ops.check_overtemp(hw);
2710 if (retval != IXGBE_ERR_OVERTEMP)
2711 break;
2712 device_printf(iflib_get_dev(sc->ctx),
2713 "\nCRITICAL: OVER TEMP!!"
2714 " PHY IS SHUT DOWN!!\n");
2715 device_printf(iflib_get_dev(sc->ctx),
2716 "System shutdown required!\n");
2717 break;
2718 default:
2719 if (!(eicr & IXGBE_EICR_TS))
2720 break;
2721 retval = hw->phy.ops.check_overtemp(hw);
2722 if (retval != IXGBE_ERR_OVERTEMP)
2723 break;
2724 device_printf(iflib_get_dev(sc->ctx),
2725 "\nCRITICAL: OVER TEMP!!"
2726 " PHY IS SHUT DOWN!!\n");
2727 device_printf(iflib_get_dev(sc->ctx),
2728 "System shutdown required!\n");
2729 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2730 IXGBE_EICR_TS);
2731 break;
2732 }
2733 }
2734
2735 /* Check for VF message */
2736 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
2737 (eicr & IXGBE_EICR_MAILBOX))
2738 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
2739 }
2740
2741 if (ixgbe_is_sfp(hw)) {
2742 /* Pluggable optics-related interrupt */
2743 if (hw->mac.type >= ixgbe_mac_X540)
2744 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2745 else
2746 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2747
2748 if (eicr & eicr_mask) {
2749 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2750 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
2751 }
2752
2753 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2754 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2755 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2756 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2757 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
2758 }
2759 }
2760
2761 /* Check for fan failure */
2762 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2763 ixgbe_check_fan_failure(sc, eicr, true);
2764 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2765 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2766 }
2767
2768 /* External PHY interrupt */
2769 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2770 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2771 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2772 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
2773 }
2774
2775 return (sc->task_requests != 0) ?
2776 FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
2777 } /* ixgbe_msix_link */
2778
2779 /************************************************************************
2780 * ixgbe_sysctl_interrupt_rate_handler
2781 ************************************************************************/
2782 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)2783 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2784 {
2785 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
2786 int error;
2787 unsigned int reg, usec, rate;
2788
2789 if (atomic_load_acq_int(&que->sc->recovery_mode))
2790 return (EPERM);
2791
2792 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
2793 usec = ((reg & 0x0FF8) >> 3);
2794 if (usec > 0)
2795 rate = 500000 / usec;
2796 else
2797 rate = 0;
2798 error = sysctl_handle_int(oidp, &rate, 0, req);
2799 if (error || !req->newptr)
2800 return error;
2801 reg &= ~0xfff; /* default, no limitation */
2802 ixgbe_max_interrupt_rate = 0;
2803 if (rate > 0 && rate < 500000) {
2804 if (rate < 1000)
2805 rate = 1000;
2806 ixgbe_max_interrupt_rate = rate;
2807 reg |= ((4000000/rate) & 0xff8);
2808 }
2809 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
2810
2811 return (0);
2812 } /* ixgbe_sysctl_interrupt_rate_handler */
2813
2814 /************************************************************************
2815 * ixgbe_add_device_sysctls
2816 ************************************************************************/
2817 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)2818 ixgbe_add_device_sysctls(if_ctx_t ctx)
2819 {
2820 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2821 device_t dev = iflib_get_dev(ctx);
2822 struct ixgbe_hw *hw = &sc->hw;
2823 struct sysctl_oid_list *child;
2824 struct sysctl_ctx_list *ctx_list;
2825
2826 ctx_list = device_get_sysctl_ctx(dev);
2827 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2828
2829 /* Sysctls for all devices */
2830 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
2831 CTLTYPE_INT | CTLFLAG_RW,
2832 sc, 0, ixgbe_sysctl_flowcntl, "I",
2833 IXGBE_SYSCTL_DESC_SET_FC);
2834
2835 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
2836 CTLTYPE_INT | CTLFLAG_RW,
2837 sc, 0, ixgbe_sysctl_advertise, "I",
2838 IXGBE_SYSCTL_DESC_ADV_SPEED);
2839
2840 sc->enable_aim = ixgbe_enable_aim;
2841 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2842 &sc->enable_aim, 0, "Interrupt Moderation");
2843
2844 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
2845 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2846 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
2847
2848 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2849 "tso_tcp_flags_mask_first_segment",
2850 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2851 sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2852 "TSO TCP flags mask for first segment");
2853
2854 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2855 "tso_tcp_flags_mask_middle_segment",
2856 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2857 sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2858 "TSO TCP flags mask for middle segment");
2859
2860 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
2861 "tso_tcp_flags_mask_last_segment",
2862 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2863 sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
2864 "TSO TCP flags mask for last segment");
2865
2866 #ifdef IXGBE_DEBUG
2867 /* testing sysctls (for all devices) */
2868 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
2869 CTLTYPE_INT | CTLFLAG_RW,
2870 sc, 0, ixgbe_sysctl_power_state,
2871 "I", "PCI Power State");
2872
2873 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
2874 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2875 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2876 #endif
2877 /* for X550 series devices */
2878 if (hw->mac.type >= ixgbe_mac_X550)
2879 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
2880 CTLTYPE_U16 | CTLFLAG_RW,
2881 sc, 0, ixgbe_sysctl_dmac,
2882 "I", "DMA Coalesce");
2883
2884 /* for WoL-capable devices */
2885 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2886 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
2887 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2888 ixgbe_sysctl_wol_enable, "I",
2889 "Enable/Disable Wake on LAN");
2890
2891 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
2892 CTLTYPE_U32 | CTLFLAG_RW,
2893 sc, 0, ixgbe_sysctl_wufc,
2894 "I", "Enable/Disable Wake Up Filters");
2895 }
2896
2897 /* for X552/X557-AT devices */
2898 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2899 struct sysctl_oid *phy_node;
2900 struct sysctl_oid_list *phy_list;
2901
2902 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
2903 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2904 "External PHY sysctls");
2905 phy_list = SYSCTL_CHILDREN(phy_node);
2906
2907 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
2908 CTLTYPE_U16 | CTLFLAG_RD,
2909 sc, 0, ixgbe_sysctl_phy_temp,
2910 "I", "Current External PHY Temperature (Celsius)");
2911
2912 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
2913 "overtemp_occurred",
2914 CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
2915 ixgbe_sysctl_phy_overtemp_occurred, "I",
2916 "External PHY High Temperature Event Occurred");
2917 }
2918
2919 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
2920 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
2921 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
2922 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2923 }
2924 } /* ixgbe_add_device_sysctls */
2925
2926 /************************************************************************
2927 * ixgbe_allocate_pci_resources
2928 ************************************************************************/
2929 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)2930 ixgbe_allocate_pci_resources(if_ctx_t ctx)
2931 {
2932 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2933 device_t dev = iflib_get_dev(ctx);
2934 int rid;
2935
2936 rid = PCIR_BAR(0);
2937 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2938 RF_ACTIVE);
2939
2940 if (!(sc->pci_mem)) {
2941 device_printf(dev,
2942 "Unable to allocate bus resource: memory\n");
2943 return (ENXIO);
2944 }
2945
2946 /* Save bus_space values for READ/WRITE_REG macros */
2947 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
2948 sc->osdep.mem_bus_space_handle =
2949 rman_get_bushandle(sc->pci_mem);
2950 /* Set hw values for shared code */
2951 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2952
2953 return (0);
2954 } /* ixgbe_allocate_pci_resources */
2955
2956 /************************************************************************
2957 * ixgbe_detach - Device removal routine
2958 *
2959 * Called when the driver is being removed.
2960 * Stops the adapter and deallocates all the resources
2961 * that were allocated for driver operation.
2962 *
2963 * return 0 on success, positive on failure
2964 ************************************************************************/
2965 static int
ixgbe_if_detach(if_ctx_t ctx)2966 ixgbe_if_detach(if_ctx_t ctx)
2967 {
2968 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2969 device_t dev = iflib_get_dev(ctx);
2970 u32 ctrl_ext;
2971
2972 INIT_DEBUGOUT("ixgbe_detach: begin");
2973
2974 if (ixgbe_pci_iov_detach(dev) != 0) {
2975 device_printf(dev, "SR-IOV in use; detach first.\n");
2976 return (EBUSY);
2977 }
2978
2979 ixgbe_setup_low_power_mode(ctx);
2980
2981 /* let hardware know driver is unloading */
2982 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
2983 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2984 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
2985
2986 callout_drain(&sc->fw_mode_timer);
2987
2988 ixgbe_free_pci_resources(ctx);
2989 free(sc->mta, M_IXGBE);
2990
2991 return (0);
2992 } /* ixgbe_if_detach */
2993
2994 /************************************************************************
2995 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2996 *
2997 * Prepare the adapter/port for LPLU and/or WoL
2998 ************************************************************************/
2999 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)3000 ixgbe_setup_low_power_mode(if_ctx_t ctx)
3001 {
3002 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3003 struct ixgbe_hw *hw = &sc->hw;
3004 device_t dev = iflib_get_dev(ctx);
3005 s32 error = 0;
3006
3007 if (!hw->wol_enabled)
3008 ixgbe_set_phy_power(hw, false);
3009
3010 /* Limit power management flow to X550EM baseT */
3011 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3012 hw->phy.ops.enter_lplu) {
3013 /* Turn off support for APM wakeup. (Using ACPI instead) */
3014 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3015 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3016
3017 /*
3018 * Clear Wake Up Status register to prevent any previous
3019 * wakeup events from waking us up immediately after we
3020 * suspend.
3021 */
3022 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3023
3024 /*
3025 * Program the Wakeup Filter Control register with user filter
3026 * settings
3027 */
3028 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3029
3030 /* Enable wakeups and power management in Wakeup Control */
3031 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3032 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3033
3034 /* X550EM baseT adapters need a special LPLU flow */
3035 hw->phy.reset_disable = true;
3036 ixgbe_if_stop(ctx);
3037 error = hw->phy.ops.enter_lplu(hw);
3038 if (error)
3039 device_printf(dev, "Error entering LPLU: %d\n",
3040 error);
3041 hw->phy.reset_disable = false;
3042 } else {
3043 /* Just stop for other adapters */
3044 ixgbe_if_stop(ctx);
3045 }
3046
3047 return error;
3048 } /* ixgbe_setup_low_power_mode */
3049
3050 /************************************************************************
3051 * ixgbe_shutdown - Shutdown entry point
3052 ************************************************************************/
3053 static int
ixgbe_if_shutdown(if_ctx_t ctx)3054 ixgbe_if_shutdown(if_ctx_t ctx)
3055 {
3056 int error = 0;
3057
3058 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3059
3060 error = ixgbe_setup_low_power_mode(ctx);
3061
3062 return (error);
3063 } /* ixgbe_if_shutdown */
3064
3065 /************************************************************************
3066 * ixgbe_suspend
3067 *
3068 * From D0 to D3
3069 ************************************************************************/
3070 static int
ixgbe_if_suspend(if_ctx_t ctx)3071 ixgbe_if_suspend(if_ctx_t ctx)
3072 {
3073 int error = 0;
3074
3075 INIT_DEBUGOUT("ixgbe_suspend: begin");
3076
3077 error = ixgbe_setup_low_power_mode(ctx);
3078
3079 return (error);
3080 } /* ixgbe_if_suspend */
3081
3082 /************************************************************************
3083 * ixgbe_resume
3084 *
3085 * From D3 to D0
3086 ************************************************************************/
3087 static int
ixgbe_if_resume(if_ctx_t ctx)3088 ixgbe_if_resume(if_ctx_t ctx)
3089 {
3090 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3091 device_t dev = iflib_get_dev(ctx);
3092 if_t ifp = iflib_get_ifp(ctx);
3093 struct ixgbe_hw *hw = &sc->hw;
3094 u32 wus;
3095
3096 INIT_DEBUGOUT("ixgbe_resume: begin");
3097
3098 /* Read & clear WUS register */
3099 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3100 if (wus)
3101 device_printf(dev, "Woken up by (WUS): %#010x\n",
3102 IXGBE_READ_REG(hw, IXGBE_WUS));
3103 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3104 /* And clear WUFC until next low-power transition */
3105 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3106
3107 /*
3108 * Required after D3->D0 transition;
3109 * will re-advertise all previous advertised speeds
3110 */
3111 if (if_getflags(ifp) & IFF_UP)
3112 ixgbe_if_init(ctx);
3113
3114 return (0);
3115 } /* ixgbe_if_resume */
3116
3117 /************************************************************************
3118 * ixgbe_if_mtu_set - Ioctl mtu entry point
3119 *
3120 * Return 0 on success, EINVAL on failure
3121 ************************************************************************/
3122 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)3123 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3124 {
3125 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3126 int error = 0;
3127
3128 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3129
3130 if (mtu > IXGBE_MAX_MTU) {
3131 error = EINVAL;
3132 } else {
3133 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3134 }
3135
3136 return error;
3137 } /* ixgbe_if_mtu_set */
3138
3139 /************************************************************************
3140 * ixgbe_if_crcstrip_set
3141 ************************************************************************/
3142 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)3143 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3144 {
3145 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3146 struct ixgbe_hw *hw = &sc->hw;
3147 /* crc stripping is set in two places:
3148 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3149 * IXGBE_RDRXCTL (set by the original driver in
3150 * ixgbe_setup_hw_rsc() called in init_locked.
3151 * We disable the setting when netmap is compiled in).
3152 * We update the values here, but also in ixgbe.c because
3153 * init_locked sometimes is called outside our control.
3154 */
3155 uint32_t hl, rxc;
3156
3157 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3158 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3159 #ifdef NETMAP
3160 if (netmap_verbose)
3161 D("%s read HLREG 0x%x rxc 0x%x",
3162 onoff ? "enter" : "exit", hl, rxc);
3163 #endif
3164 /* hw requirements ... */
3165 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3166 rxc |= IXGBE_RDRXCTL_RSCACKC;
3167 if (onoff && !crcstrip) {
3168 /* keep the crc. Fast rx */
3169 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3170 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3171 } else {
3172 /* reset default mode */
3173 hl |= IXGBE_HLREG0_RXCRCSTRP;
3174 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3175 }
3176 #ifdef NETMAP
3177 if (netmap_verbose)
3178 D("%s write HLREG 0x%x rxc 0x%x",
3179 onoff ? "enter" : "exit", hl, rxc);
3180 #endif
3181 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3182 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3183 } /* ixgbe_if_crcstrip_set */
3184
3185 /*********************************************************************
3186 * ixgbe_if_init - Init entry point
3187 *
3188 * Used in two ways: It is used by the stack as an init
3189 * entry point in network interface structure. It is also
3190 * used by the driver as a hw/sw initialization routine to
3191 * get to a consistent state.
3192 *
3193 * Return 0 on success, positive on failure
3194 **********************************************************************/
3195 void
ixgbe_if_init(if_ctx_t ctx)3196 ixgbe_if_init(if_ctx_t ctx)
3197 {
3198 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3199 if_t ifp = iflib_get_ifp(ctx);
3200 device_t dev = iflib_get_dev(ctx);
3201 struct ixgbe_hw *hw = &sc->hw;
3202 struct ix_rx_queue *rx_que;
3203 struct ix_tx_queue *tx_que;
3204 u32 txdctl, mhadd;
3205 u32 rxdctl, rxctrl;
3206 u32 ctrl_ext;
3207
3208 int i, j, err;
3209
3210 INIT_DEBUGOUT("ixgbe_if_init: begin");
3211
3212 /* Queue indices may change with IOV mode */
3213 ixgbe_align_all_queue_indices(sc);
3214
3215 /* reprogram the RAR[0] in case user changed it. */
3216 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3217
3218 /* Get the latest mac address, User can use a LAA */
3219 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3220 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3221 hw->addr_ctrl.rar_used_count = 1;
3222
3223 ixgbe_init_hw(hw);
3224
3225 ixgbe_initialize_iov(sc);
3226
3227 ixgbe_initialize_transmit_units(ctx);
3228
3229 /* Setup Multicast table */
3230 ixgbe_if_multi_set(ctx);
3231
3232 /* Determine the correct mbuf pool, based on frame size */
3233 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3234
3235 /* Configure RX settings */
3236 ixgbe_initialize_receive_units(ctx);
3237
3238 /*
3239 * Initialize variable holding task enqueue requests
3240 * from MSI-X interrupts
3241 */
3242 sc->task_requests = 0;
3243
3244 /* Enable SDP & MSI-X interrupts based on adapter */
3245 ixgbe_config_gpie(sc);
3246
3247 /* Set MTU size */
3248 if (if_getmtu(ifp) > ETHERMTU) {
3249 /* aka IXGBE_MAXFRS on 82599 and newer */
3250 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3251 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3252 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3253 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3254 }
3255
3256 /* Now enable all the queues */
3257 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
3258 i++, tx_que++) {
3259 struct tx_ring *txr = &tx_que->txr;
3260
3261 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3262 txdctl |= IXGBE_TXDCTL_ENABLE;
3263 /* Set WTHRESH to 8, burst writeback */
3264 txdctl |= (8 << 16);
3265 /*
3266 * When the internal queue falls below PTHRESH (32),
3267 * start prefetching as long as there are at least
3268 * HTHRESH (1) buffers ready. The values are taken
3269 * from the Intel linux driver 3.8.21.
3270 * Prefetching enables tx line rate even with 1 queue.
3271 */
3272 txdctl |= (32 << 0) | (1 << 8);
3273 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3274 }
3275
3276 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
3277 i++, rx_que++) {
3278 struct rx_ring *rxr = &rx_que->rxr;
3279
3280 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3281 if (hw->mac.type == ixgbe_mac_82598EB) {
3282 /*
3283 * PTHRESH = 21
3284 * HTHRESH = 4
3285 * WTHRESH = 8
3286 */
3287 rxdctl &= ~0x3FFFFF;
3288 rxdctl |= 0x080420;
3289 }
3290 rxdctl |= IXGBE_RXDCTL_ENABLE;
3291 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3292 for (j = 0; j < 10; j++) {
3293 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3294 IXGBE_RXDCTL_ENABLE)
3295 break;
3296 else
3297 msec_delay(1);
3298 }
3299 wmb();
3300 }
3301
3302 /* Enable Receive engine */
3303 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3304 if (hw->mac.type == ixgbe_mac_82598EB)
3305 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3306 rxctrl |= IXGBE_RXCTRL_RXEN;
3307 ixgbe_enable_rx_dma(hw, rxctrl);
3308
3309 /* Set up MSI/MSI-X routing */
3310 if (ixgbe_enable_msix) {
3311 ixgbe_configure_ivars(sc);
3312 /* Set up auto-mask */
3313 if (hw->mac.type == ixgbe_mac_82598EB)
3314 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3315 else {
3316 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3317 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3318 }
3319 } else { /* Simple settings for Legacy/MSI */
3320 ixgbe_set_ivar(sc, 0, 0, 0);
3321 ixgbe_set_ivar(sc, 0, 0, 1);
3322 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3323 }
3324
3325 ixgbe_init_fdir(sc);
3326
3327 /*
3328 * Check on any SFP devices that
3329 * need to be kick-started
3330 */
3331 if (hw->phy.type == ixgbe_phy_none) {
3332 err = hw->phy.ops.identify(hw);
3333 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3334 device_printf(dev,
3335 "Unsupported SFP+ module type was detected.\n");
3336 return;
3337 }
3338 }
3339
3340 /* Set moderation on the Link interrupt */
3341 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3342
3343 /* Enable power to the phy. */
3344 ixgbe_set_phy_power(hw, true);
3345
3346 /* Config/Enable Link */
3347 ixgbe_config_link(ctx);
3348
3349 /* Hardware Packet Buffer & Flow Control setup */
3350 ixgbe_config_delay_values(sc);
3351
3352 /* Initialize the FC settings */
3353 ixgbe_start_hw(hw);
3354
3355 /* Set up VLAN support and filter */
3356 ixgbe_setup_vlan_hw_support(ctx);
3357
3358 /* Setup DMA Coalescing */
3359 ixgbe_config_dmac(sc);
3360
3361 /* And now turn on interrupts */
3362 ixgbe_if_enable_intr(ctx);
3363
3364 /* Enable the use of the MBX by the VF's */
3365 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3366 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3367 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3368 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3369 }
3370
3371 } /* ixgbe_init_locked */
3372
3373 /************************************************************************
3374 * ixgbe_set_ivar
3375 *
3376 * Setup the correct IVAR register for a particular MSI-X interrupt
3377 * (yes this is all very magic and confusing :)
3378 * - entry is the register array entry
3379 * - vector is the MSI-X vector for this queue
3380 * - type is RX/TX/MISC
3381 ************************************************************************/
3382 static void
ixgbe_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)3383 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3384 {
3385 struct ixgbe_hw *hw = &sc->hw;
3386 u32 ivar, index;
3387
3388 vector |= IXGBE_IVAR_ALLOC_VAL;
3389
3390 switch (hw->mac.type) {
3391 case ixgbe_mac_82598EB:
3392 if (type == -1)
3393 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3394 else
3395 entry += (type * 64);
3396 index = (entry >> 2) & 0x1F;
3397 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3398 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3399 ivar |= (vector << (8 * (entry & 0x3)));
3400 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3401 break;
3402 case ixgbe_mac_82599EB:
3403 case ixgbe_mac_X540:
3404 case ixgbe_mac_X550:
3405 case ixgbe_mac_X550EM_x:
3406 case ixgbe_mac_X550EM_a:
3407 if (type == -1) { /* MISC IVAR */
3408 index = (entry & 1) * 8;
3409 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3410 ivar &= ~(0xFF << index);
3411 ivar |= (vector << index);
3412 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3413 } else { /* RX/TX IVARS */
3414 index = (16 * (entry & 1)) + (8 * type);
3415 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3416 ivar &= ~(0xFF << index);
3417 ivar |= (vector << index);
3418 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3419 }
3420 default:
3421 break;
3422 }
3423 } /* ixgbe_set_ivar */
3424
3425 /************************************************************************
3426 * ixgbe_configure_ivars
3427 ************************************************************************/
3428 static void
ixgbe_configure_ivars(struct ixgbe_softc * sc)3429 ixgbe_configure_ivars(struct ixgbe_softc *sc)
3430 {
3431 struct ix_rx_queue *rx_que = sc->rx_queues;
3432 struct ix_tx_queue *tx_que = sc->tx_queues;
3433 u32 newitr;
3434
3435 if (ixgbe_max_interrupt_rate > 0)
3436 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3437 else {
3438 /*
3439 * Disable DMA coalescing if interrupt moderation is
3440 * disabled.
3441 */
3442 sc->dmac = 0;
3443 newitr = 0;
3444 }
3445
3446 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
3447 struct rx_ring *rxr = &rx_que->rxr;
3448
3449 /* First the RX queue entry */
3450 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
3451
3452 /* Set an Initial EITR value */
3453 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
3454 }
3455 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
3456 struct tx_ring *txr = &tx_que->txr;
3457
3458 /* ... and the TX */
3459 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
3460 }
3461 /* For the Link interrupt */
3462 ixgbe_set_ivar(sc, 1, sc->vector, -1);
3463 } /* ixgbe_configure_ivars */
3464
3465 /************************************************************************
3466 * ixgbe_config_gpie
3467 ************************************************************************/
3468 static void
ixgbe_config_gpie(struct ixgbe_softc * sc)3469 ixgbe_config_gpie(struct ixgbe_softc *sc)
3470 {
3471 struct ixgbe_hw *hw = &sc->hw;
3472 u32 gpie;
3473
3474 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3475
3476 if (sc->intr_type == IFLIB_INTR_MSIX) {
3477 /* Enable Enhanced MSI-X mode */
3478 gpie |= IXGBE_GPIE_MSIX_MODE |
3479 IXGBE_GPIE_EIAME |
3480 IXGBE_GPIE_PBA_SUPPORT |
3481 IXGBE_GPIE_OCD;
3482 }
3483
3484 /* Fan Failure Interrupt */
3485 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3486 gpie |= IXGBE_SDP1_GPIEN;
3487
3488 /* Thermal Sensor Interrupt */
3489 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3490 gpie |= IXGBE_SDP0_GPIEN_X540;
3491
3492 /* Link detection */
3493 switch (hw->mac.type) {
3494 case ixgbe_mac_82599EB:
3495 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3496 break;
3497 case ixgbe_mac_X550EM_x:
3498 case ixgbe_mac_X550EM_a:
3499 gpie |= IXGBE_SDP0_GPIEN_X540;
3500 break;
3501 default:
3502 break;
3503 }
3504
3505 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3506
3507 } /* ixgbe_config_gpie */
3508
3509 /************************************************************************
3510 * ixgbe_config_delay_values
3511 *
3512 * Requires sc->max_frame_size to be set.
3513 ************************************************************************/
3514 static void
ixgbe_config_delay_values(struct ixgbe_softc * sc)3515 ixgbe_config_delay_values(struct ixgbe_softc *sc)
3516 {
3517 struct ixgbe_hw *hw = &sc->hw;
3518 u32 rxpb, frame, size, tmp;
3519
3520 frame = sc->max_frame_size;
3521
3522 /* Calculate High Water */
3523 switch (hw->mac.type) {
3524 case ixgbe_mac_X540:
3525 case ixgbe_mac_X550:
3526 case ixgbe_mac_X550EM_x:
3527 case ixgbe_mac_X550EM_a:
3528 tmp = IXGBE_DV_X540(frame, frame);
3529 break;
3530 default:
3531 tmp = IXGBE_DV(frame, frame);
3532 break;
3533 }
3534 size = IXGBE_BT2KB(tmp);
3535 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3536 hw->fc.high_water[0] = rxpb - size;
3537
3538 /* Now calculate Low Water */
3539 switch (hw->mac.type) {
3540 case ixgbe_mac_X540:
3541 case ixgbe_mac_X550:
3542 case ixgbe_mac_X550EM_x:
3543 case ixgbe_mac_X550EM_a:
3544 tmp = IXGBE_LOW_DV_X540(frame);
3545 break;
3546 default:
3547 tmp = IXGBE_LOW_DV(frame);
3548 break;
3549 }
3550 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3551
3552 hw->fc.pause_time = IXGBE_FC_PAUSE;
3553 hw->fc.send_xon = true;
3554 } /* ixgbe_config_delay_values */
3555
3556 /************************************************************************
3557 * ixgbe_set_multi - Multicast Update
3558 *
3559 * Called whenever multicast address list is updated.
3560 ************************************************************************/
3561 static u_int
ixgbe_mc_filter_apply(void * arg,struct sockaddr_dl * sdl,u_int idx)3562 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
3563 {
3564 struct ixgbe_softc *sc = arg;
3565 struct ixgbe_mc_addr *mta = sc->mta;
3566
3567 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
3568 return (0);
3569 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3570 mta[idx].vmdq = sc->pool;
3571
3572 return (1);
3573 } /* ixgbe_mc_filter_apply */
3574
3575 static void
ixgbe_if_multi_set(if_ctx_t ctx)3576 ixgbe_if_multi_set(if_ctx_t ctx)
3577 {
3578 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3579 struct ixgbe_mc_addr *mta;
3580 if_t ifp = iflib_get_ifp(ctx);
3581 u8 *update_ptr;
3582 u32 fctrl;
3583 u_int mcnt;
3584
3585 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
3586
3587 mta = sc->mta;
3588 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3589
3590 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
3591 sc);
3592
3593 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3594 update_ptr = (u8 *)mta;
3595 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
3596 ixgbe_mc_array_itr, true);
3597 }
3598
3599 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
3600
3601 if (if_getflags(ifp) & IFF_PROMISC)
3602 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3603 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3604 if_getflags(ifp) & IFF_ALLMULTI) {
3605 fctrl |= IXGBE_FCTRL_MPE;
3606 fctrl &= ~IXGBE_FCTRL_UPE;
3607 } else
3608 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3609
3610 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
3611 } /* ixgbe_if_multi_set */
3612
3613 /************************************************************************
3614 * ixgbe_mc_array_itr
3615 *
3616 * An iterator function needed by the multicast shared code.
3617 * It feeds the shared code routine the addresses in the
3618 * array of ixgbe_set_multi() one by one.
3619 ************************************************************************/
3620 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)3621 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3622 {
3623 struct ixgbe_mc_addr *mta;
3624
3625 mta = (struct ixgbe_mc_addr *)*update_ptr;
3626 *vmdq = mta->vmdq;
3627
3628 *update_ptr = (u8*)(mta + 1);
3629
3630 return (mta->addr);
3631 } /* ixgbe_mc_array_itr */
3632
3633 /************************************************************************
3634 * ixgbe_local_timer - Timer routine
3635 *
3636 * Checks for link status, updates statistics,
3637 * and runs the watchdog check.
3638 ************************************************************************/
3639 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)3640 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
3641 {
3642 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3643
3644 if (qid != 0)
3645 return;
3646
3647 /* Check for pluggable optics */
3648 if (sc->sfp_probe)
3649 if (!ixgbe_sfp_probe(ctx))
3650 return; /* Nothing to do */
3651
3652 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
3653
3654 /* Fire off the adminq task */
3655 iflib_admin_intr_deferred(ctx);
3656
3657 } /* ixgbe_if_timer */
3658
3659 /************************************************************************
3660 * ixgbe_fw_mode_timer - FW mode timer routine
3661 ************************************************************************/
3662 static void
ixgbe_fw_mode_timer(void * arg)3663 ixgbe_fw_mode_timer(void *arg)
3664 {
3665 struct ixgbe_softc *sc = arg;
3666 struct ixgbe_hw *hw = &sc->hw;
3667
3668 if (ixgbe_fw_recovery_mode(hw)) {
3669 if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
3670 /* Firmware error detected, entering recovery mode */
3671 device_printf(sc->dev,
3672 "Firmware recovery mode detected. Limiting"
3673 " functionality. Refer to the Intel(R) Ethernet"
3674 " Adapters and Devices User Guide for details on"
3675 " firmware recovery mode.\n");
3676
3677 if (hw->adapter_stopped == FALSE)
3678 ixgbe_if_stop(sc->ctx);
3679 }
3680 } else
3681 atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
3682
3683
3684 callout_reset(&sc->fw_mode_timer, hz,
3685 ixgbe_fw_mode_timer, sc);
3686 } /* ixgbe_fw_mode_timer */
3687
3688 /************************************************************************
3689 * ixgbe_sfp_probe
3690 *
3691 * Determine if a port had optics inserted.
3692 ************************************************************************/
3693 static bool
ixgbe_sfp_probe(if_ctx_t ctx)3694 ixgbe_sfp_probe(if_ctx_t ctx)
3695 {
3696 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3697 struct ixgbe_hw *hw = &sc->hw;
3698 device_t dev = iflib_get_dev(ctx);
3699 bool result = false;
3700
3701 if ((hw->phy.type == ixgbe_phy_nl) &&
3702 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3703 s32 ret = hw->phy.ops.identify_sfp(hw);
3704 if (ret)
3705 goto out;
3706 ret = hw->phy.ops.reset(hw);
3707 sc->sfp_probe = false;
3708 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3709 device_printf(dev,
3710 "Unsupported SFP+ module detected!");
3711 device_printf(dev,
3712 "Reload driver with supported module.\n");
3713 goto out;
3714 } else
3715 device_printf(dev, "SFP+ module detected!\n");
3716 /* We now have supported optics */
3717 result = true;
3718 }
3719 out:
3720
3721 return (result);
3722 } /* ixgbe_sfp_probe */
3723
3724 /************************************************************************
3725 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3726 ************************************************************************/
3727 static void
ixgbe_handle_mod(void * context)3728 ixgbe_handle_mod(void *context)
3729 {
3730 if_ctx_t ctx = context;
3731 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3732 struct ixgbe_hw *hw = &sc->hw;
3733 device_t dev = iflib_get_dev(ctx);
3734 u32 err, cage_full = 0;
3735
3736 if (sc->hw.need_crosstalk_fix) {
3737 switch (hw->mac.type) {
3738 case ixgbe_mac_82599EB:
3739 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3740 IXGBE_ESDP_SDP2;
3741 break;
3742 case ixgbe_mac_X550EM_x:
3743 case ixgbe_mac_X550EM_a:
3744 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3745 IXGBE_ESDP_SDP0;
3746 break;
3747 default:
3748 break;
3749 }
3750
3751 if (!cage_full)
3752 goto handle_mod_out;
3753 }
3754
3755 err = hw->phy.ops.identify_sfp(hw);
3756 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3757 device_printf(dev,
3758 "Unsupported SFP+ module type was detected.\n");
3759 goto handle_mod_out;
3760 }
3761
3762 if (hw->mac.type == ixgbe_mac_82598EB)
3763 err = hw->phy.ops.reset(hw);
3764 else
3765 err = hw->mac.ops.setup_sfp(hw);
3766
3767 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3768 device_printf(dev,
3769 "Setup failure - unsupported SFP+ module type.\n");
3770 goto handle_mod_out;
3771 }
3772 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3773 return;
3774
3775 handle_mod_out:
3776 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3777 } /* ixgbe_handle_mod */
3778
3779
3780 /************************************************************************
3781 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3782 ************************************************************************/
3783 static void
ixgbe_handle_msf(void * context)3784 ixgbe_handle_msf(void *context)
3785 {
3786 if_ctx_t ctx = context;
3787 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3788 struct ixgbe_hw *hw = &sc->hw;
3789 u32 autoneg;
3790 bool negotiate;
3791
3792 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3793 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
3794
3795 autoneg = hw->phy.autoneg_advertised;
3796 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3797 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3798 if (hw->mac.ops.setup_link)
3799 hw->mac.ops.setup_link(hw, autoneg, true);
3800
3801 /* Adjust media types shown in ifconfig */
3802 ifmedia_removeall(sc->media);
3803 ixgbe_add_media_types(sc->ctx);
3804 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3805 } /* ixgbe_handle_msf */
3806
3807 /************************************************************************
3808 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3809 ************************************************************************/
3810 static void
ixgbe_handle_phy(void * context)3811 ixgbe_handle_phy(void *context)
3812 {
3813 if_ctx_t ctx = context;
3814 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3815 struct ixgbe_hw *hw = &sc->hw;
3816 int error;
3817
3818 error = hw->phy.ops.handle_lasi(hw);
3819 if (error == IXGBE_ERR_OVERTEMP)
3820 device_printf(sc->dev,
3821 "CRITICAL: EXTERNAL PHY OVER TEMP!!"
3822 " PHY will downshift to lower power state!\n");
3823 else if (error)
3824 device_printf(sc->dev,
3825 "Error handling LASI interrupt: %d\n", error);
3826 } /* ixgbe_handle_phy */
3827
3828 /************************************************************************
3829 * ixgbe_if_stop - Stop the hardware
3830 *
3831 * Disables all traffic on the adapter by issuing a
3832 * global reset on the MAC and deallocates TX/RX buffers.
3833 ************************************************************************/
3834 static void
ixgbe_if_stop(if_ctx_t ctx)3835 ixgbe_if_stop(if_ctx_t ctx)
3836 {
3837 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3838 struct ixgbe_hw *hw = &sc->hw;
3839
3840 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
3841
3842 ixgbe_reset_hw(hw);
3843 hw->adapter_stopped = false;
3844 ixgbe_stop_adapter(hw);
3845 if (hw->mac.type == ixgbe_mac_82599EB)
3846 ixgbe_stop_mac_link_on_d3_82599(hw);
3847 /* Turn off the laser - noop with no optics */
3848 ixgbe_disable_tx_laser(hw);
3849
3850 /* Update the stack */
3851 sc->link_up = false;
3852 ixgbe_if_update_admin_status(ctx);
3853
3854 /* reprogram the RAR[0] in case user changed it. */
3855 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
3856
3857 return;
3858 } /* ixgbe_if_stop */
3859
3860 /************************************************************************
3861 * ixgbe_update_link_status - Update OS on link state
3862 *
3863 * Note: Only updates the OS on the cached link state.
3864 * The real check of the hardware only happens with
3865 * a link interrupt.
3866 ************************************************************************/
3867 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)3868 ixgbe_if_update_admin_status(if_ctx_t ctx)
3869 {
3870 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3871 device_t dev = iflib_get_dev(ctx);
3872
3873 if (sc->link_up) {
3874 if (sc->link_active == false) {
3875 if (bootverbose)
3876 device_printf(dev, "Link is up %d Gbps %s \n",
3877 ((sc->link_speed == 128) ? 10 : 1),
3878 "Full Duplex");
3879 sc->link_active = true;
3880 /* Update any Flow Control changes */
3881 ixgbe_fc_enable(&sc->hw);
3882 /* Update DMA coalescing config */
3883 ixgbe_config_dmac(sc);
3884 iflib_link_state_change(ctx, LINK_STATE_UP,
3885 ixgbe_link_speed_to_baudrate(sc->link_speed));
3886
3887 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3888 ixgbe_ping_all_vfs(sc);
3889 }
3890 } else { /* Link down */
3891 if (sc->link_active == true) {
3892 if (bootverbose)
3893 device_printf(dev, "Link is Down\n");
3894 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
3895 sc->link_active = false;
3896 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3897 ixgbe_ping_all_vfs(sc);
3898 }
3899 }
3900
3901 /* Handle task requests from msix_link() */
3902 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
3903 ixgbe_handle_mod(ctx);
3904 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
3905 ixgbe_handle_msf(ctx);
3906 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
3907 ixgbe_handle_mbx(ctx);
3908 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
3909 ixgbe_reinit_fdir(ctx);
3910 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
3911 ixgbe_handle_phy(ctx);
3912 sc->task_requests = 0;
3913
3914 ixgbe_update_stats_counters(sc);
3915 } /* ixgbe_if_update_admin_status */
3916
3917 /************************************************************************
3918 * ixgbe_config_dmac - Configure DMA Coalescing
3919 ************************************************************************/
3920 static void
ixgbe_config_dmac(struct ixgbe_softc * sc)3921 ixgbe_config_dmac(struct ixgbe_softc *sc)
3922 {
3923 struct ixgbe_hw *hw = &sc->hw;
3924 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3925
3926 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3927 return;
3928
3929 if (dcfg->watchdog_timer ^ sc->dmac ||
3930 dcfg->link_speed ^ sc->link_speed) {
3931 dcfg->watchdog_timer = sc->dmac;
3932 dcfg->fcoe_en = false;
3933 dcfg->link_speed = sc->link_speed;
3934 dcfg->num_tcs = 1;
3935
3936 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3937 dcfg->watchdog_timer, dcfg->link_speed);
3938
3939 hw->mac.ops.dmac_config(hw);
3940 }
3941 } /* ixgbe_config_dmac */
3942
3943 /************************************************************************
3944 * ixgbe_if_enable_intr
3945 ************************************************************************/
3946 void
ixgbe_if_enable_intr(if_ctx_t ctx)3947 ixgbe_if_enable_intr(if_ctx_t ctx)
3948 {
3949 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3950 struct ixgbe_hw *hw = &sc->hw;
3951 struct ix_rx_queue *que = sc->rx_queues;
3952 u32 mask, fwsm;
3953
3954 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3955
3956 switch (sc->hw.mac.type) {
3957 case ixgbe_mac_82599EB:
3958 mask |= IXGBE_EIMS_ECC;
3959 /* Temperature sensor on some scs */
3960 mask |= IXGBE_EIMS_GPI_SDP0;
3961 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3962 mask |= IXGBE_EIMS_GPI_SDP1;
3963 mask |= IXGBE_EIMS_GPI_SDP2;
3964 break;
3965 case ixgbe_mac_X540:
3966 /* Detect if Thermal Sensor is enabled */
3967 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3968 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3969 mask |= IXGBE_EIMS_TS;
3970 mask |= IXGBE_EIMS_ECC;
3971 break;
3972 case ixgbe_mac_X550:
3973 /* MAC thermal sensor is automatically enabled */
3974 mask |= IXGBE_EIMS_TS;
3975 mask |= IXGBE_EIMS_ECC;
3976 break;
3977 case ixgbe_mac_X550EM_x:
3978 case ixgbe_mac_X550EM_a:
3979 /* Some devices use SDP0 for important information */
3980 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3981 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3982 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3983 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3984 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3985 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3986 mask |= IXGBE_EICR_GPI_SDP0_X540;
3987 mask |= IXGBE_EIMS_ECC;
3988 break;
3989 default:
3990 break;
3991 }
3992
3993 /* Enable Fan Failure detection */
3994 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3995 mask |= IXGBE_EIMS_GPI_SDP1;
3996 /* Enable SR-IOV */
3997 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
3998 mask |= IXGBE_EIMS_MAILBOX;
3999 /* Enable Flow Director */
4000 if (sc->feat_en & IXGBE_FEATURE_FDIR)
4001 mask |= IXGBE_EIMS_FLOW_DIR;
4002
4003 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4004
4005 /* With MSI-X we use auto clear */
4006 if (sc->intr_type == IFLIB_INTR_MSIX) {
4007 mask = IXGBE_EIMS_ENABLE_MASK;
4008 /* Don't autoclear Link */
4009 mask &= ~IXGBE_EIMS_OTHER;
4010 mask &= ~IXGBE_EIMS_LSC;
4011 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
4012 mask &= ~IXGBE_EIMS_MAILBOX;
4013 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4014 }
4015
4016 /*
4017 * Now enable all queues, this is done separately to
4018 * allow for handling the extended (beyond 32) MSI-X
4019 * vectors that can be used by 82599
4020 */
4021 for (int i = 0; i < sc->num_rx_queues; i++, que++)
4022 ixgbe_enable_queue(sc, que->msix);
4023
4024 IXGBE_WRITE_FLUSH(hw);
4025
4026 } /* ixgbe_if_enable_intr */
4027
4028 /************************************************************************
4029 * ixgbe_disable_intr
4030 ************************************************************************/
4031 static void
ixgbe_if_disable_intr(if_ctx_t ctx)4032 ixgbe_if_disable_intr(if_ctx_t ctx)
4033 {
4034 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4035
4036 if (sc->intr_type == IFLIB_INTR_MSIX)
4037 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
4038 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
4039 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
4040 } else {
4041 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
4042 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
4043 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
4044 }
4045 IXGBE_WRITE_FLUSH(&sc->hw);
4046
4047 } /* ixgbe_if_disable_intr */
4048
4049 /************************************************************************
4050 * ixgbe_link_intr_enable
4051 ************************************************************************/
4052 static void
ixgbe_link_intr_enable(if_ctx_t ctx)4053 ixgbe_link_intr_enable(if_ctx_t ctx)
4054 {
4055 struct ixgbe_hw *hw =
4056 &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
4057
4058 /* Re-enable other interrupts */
4059 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
4060 } /* ixgbe_link_intr_enable */
4061
4062 /************************************************************************
4063 * ixgbe_if_rx_queue_intr_enable
4064 ************************************************************************/
4065 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)4066 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
4067 {
4068 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4069 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
4070
4071 ixgbe_enable_queue(sc, que->msix);
4072
4073 return (0);
4074 } /* ixgbe_if_rx_queue_intr_enable */
4075
4076 /************************************************************************
4077 * ixgbe_enable_queue
4078 ************************************************************************/
4079 static void
ixgbe_enable_queue(struct ixgbe_softc * sc,u32 vector)4080 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
4081 {
4082 struct ixgbe_hw *hw = &sc->hw;
4083 u64 queue = 1ULL << vector;
4084 u32 mask;
4085
4086 if (hw->mac.type == ixgbe_mac_82598EB) {
4087 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4088 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4089 } else {
4090 mask = (queue & 0xFFFFFFFF);
4091 if (mask)
4092 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4093 mask = (queue >> 32);
4094 if (mask)
4095 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4096 }
4097 } /* ixgbe_enable_queue */
4098
4099 /************************************************************************
4100 * ixgbe_disable_queue
4101 ************************************************************************/
4102 static void
ixgbe_disable_queue(struct ixgbe_softc * sc,u32 vector)4103 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
4104 {
4105 struct ixgbe_hw *hw = &sc->hw;
4106 u64 queue = 1ULL << vector;
4107 u32 mask;
4108
4109 if (hw->mac.type == ixgbe_mac_82598EB) {
4110 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4111 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
4112 } else {
4113 mask = (queue & 0xFFFFFFFF);
4114 if (mask)
4115 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
4116 mask = (queue >> 32);
4117 if (mask)
4118 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
4119 }
4120 } /* ixgbe_disable_queue */
4121
4122 /************************************************************************
4123 * ixgbe_intr - Legacy Interrupt Service Routine
4124 ************************************************************************/
4125 int
ixgbe_intr(void * arg)4126 ixgbe_intr(void *arg)
4127 {
4128 struct ixgbe_softc *sc = arg;
4129 struct ix_rx_queue *que = sc->rx_queues;
4130 struct ixgbe_hw *hw = &sc->hw;
4131 if_ctx_t ctx = sc->ctx;
4132 u32 eicr, eicr_mask;
4133
4134 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4135
4136 ++que->irqs;
4137 if (eicr == 0) {
4138 ixgbe_if_enable_intr(ctx);
4139 return (FILTER_HANDLED);
4140 }
4141
4142 /* Check for fan failure */
4143 if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4144 (eicr & IXGBE_EICR_GPI_SDP1)) {
4145 device_printf(sc->dev,
4146 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4147 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
4148 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4149 }
4150
4151 /* Link status change */
4152 if (eicr & IXGBE_EICR_LSC) {
4153 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4154 iflib_admin_intr_deferred(ctx);
4155 }
4156
4157 if (ixgbe_is_sfp(hw)) {
4158 /* Pluggable optics-related interrupt */
4159 if (hw->mac.type >= ixgbe_mac_X540)
4160 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4161 else
4162 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4163
4164 if (eicr & eicr_mask) {
4165 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4166 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4167 }
4168
4169 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4170 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4171 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4172 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4173 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4174 }
4175 }
4176
4177 /* External PHY interrupt */
4178 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4179 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4180 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4181
4182 return (FILTER_SCHEDULE_THREAD);
4183 } /* ixgbe_intr */
4184
4185 /************************************************************************
4186 * ixgbe_free_pci_resources
4187 ************************************************************************/
4188 static void
ixgbe_free_pci_resources(if_ctx_t ctx)4189 ixgbe_free_pci_resources(if_ctx_t ctx)
4190 {
4191 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4192 struct ix_rx_queue *que = sc->rx_queues;
4193 device_t dev = iflib_get_dev(ctx);
4194
4195 /* Release all MSI-X queue resources */
4196 if (sc->intr_type == IFLIB_INTR_MSIX)
4197 iflib_irq_free(ctx, &sc->irq);
4198
4199 if (que != NULL) {
4200 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4201 iflib_irq_free(ctx, &que->que_irq);
4202 }
4203 }
4204
4205 if (sc->pci_mem != NULL)
4206 bus_release_resource(dev, SYS_RES_MEMORY,
4207 rman_get_rid(sc->pci_mem), sc->pci_mem);
4208 } /* ixgbe_free_pci_resources */
4209
4210 /************************************************************************
4211 * ixgbe_sysctl_flowcntl
4212 *
4213 * SYSCTL wrapper around setting Flow Control
4214 ************************************************************************/
4215 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)4216 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4217 {
4218 struct ixgbe_softc *sc;
4219 int error, fc;
4220
4221 sc = (struct ixgbe_softc *)arg1;
4222 fc = sc->hw.fc.current_mode;
4223
4224 error = sysctl_handle_int(oidp, &fc, 0, req);
4225 if ((error) || (req->newptr == NULL))
4226 return (error);
4227
4228 /* Don't bother if it's not changed */
4229 if (fc == sc->hw.fc.current_mode)
4230 return (0);
4231
4232 return ixgbe_set_flowcntl(sc, fc);
4233 } /* ixgbe_sysctl_flowcntl */
4234
4235 /************************************************************************
4236 * ixgbe_set_flowcntl - Set flow control
4237 *
4238 * Flow control values:
4239 * 0 - off
4240 * 1 - rx pause
4241 * 2 - tx pause
4242 * 3 - full
4243 ************************************************************************/
4244 static int
ixgbe_set_flowcntl(struct ixgbe_softc * sc,int fc)4245 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4246 {
4247 switch (fc) {
4248 case ixgbe_fc_rx_pause:
4249 case ixgbe_fc_tx_pause:
4250 case ixgbe_fc_full:
4251 sc->hw.fc.requested_mode = fc;
4252 if (sc->num_rx_queues > 1)
4253 ixgbe_disable_rx_drop(sc);
4254 break;
4255 case ixgbe_fc_none:
4256 sc->hw.fc.requested_mode = ixgbe_fc_none;
4257 if (sc->num_rx_queues > 1)
4258 ixgbe_enable_rx_drop(sc);
4259 break;
4260 default:
4261 return (EINVAL);
4262 }
4263
4264 /* Don't autoneg if forcing a value */
4265 sc->hw.fc.disable_fc_autoneg = true;
4266 ixgbe_fc_enable(&sc->hw);
4267
4268 return (0);
4269 } /* ixgbe_set_flowcntl */
4270
4271 /************************************************************************
4272 * ixgbe_enable_rx_drop
4273 *
4274 * Enable the hardware to drop packets when the buffer is
4275 * full. This is useful with multiqueue, so that no single
4276 * queue being full stalls the entire RX engine. We only
4277 * enable this when Multiqueue is enabled AND Flow Control
4278 * is disabled.
4279 ************************************************************************/
4280 static void
ixgbe_enable_rx_drop(struct ixgbe_softc * sc)4281 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
4282 {
4283 struct ixgbe_hw *hw = &sc->hw;
4284 struct rx_ring *rxr;
4285 u32 srrctl;
4286
4287 for (int i = 0; i < sc->num_rx_queues; i++) {
4288 rxr = &sc->rx_queues[i].rxr;
4289 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4290 srrctl |= IXGBE_SRRCTL_DROP_EN;
4291 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4292 }
4293
4294 /* enable drop for each vf */
4295 for (int i = 0; i < sc->num_vfs; i++) {
4296 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4297 (IXGBE_QDE_WRITE |
4298 (i << IXGBE_QDE_IDX_SHIFT) |
4299 IXGBE_QDE_ENABLE));
4300 }
4301 } /* ixgbe_enable_rx_drop */
4302
4303 /************************************************************************
4304 * ixgbe_disable_rx_drop
4305 ************************************************************************/
4306 static void
ixgbe_disable_rx_drop(struct ixgbe_softc * sc)4307 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
4308 {
4309 struct ixgbe_hw *hw = &sc->hw;
4310 struct rx_ring *rxr;
4311 u32 srrctl;
4312
4313 for (int i = 0; i < sc->num_rx_queues; i++) {
4314 rxr = &sc->rx_queues[i].rxr;
4315 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4316 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4317 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4318 }
4319
4320 /* disable drop for each vf */
4321 for (int i = 0; i < sc->num_vfs; i++) {
4322 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4323 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4324 }
4325 } /* ixgbe_disable_rx_drop */
4326
4327 /************************************************************************
4328 * ixgbe_sysctl_advertise
4329 *
4330 * SYSCTL wrapper around setting advertised speed
4331 ************************************************************************/
4332 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)4333 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4334 {
4335 struct ixgbe_softc *sc;
4336 int error, advertise;
4337
4338 sc = (struct ixgbe_softc *)arg1;
4339 if (atomic_load_acq_int(&sc->recovery_mode))
4340 return (EPERM);
4341
4342 advertise = sc->advertise;
4343
4344 error = sysctl_handle_int(oidp, &advertise, 0, req);
4345 if ((error) || (req->newptr == NULL))
4346 return (error);
4347
4348 return ixgbe_set_advertise(sc, advertise);
4349 } /* ixgbe_sysctl_advertise */
4350
4351 /************************************************************************
4352 * ixgbe_set_advertise - Control advertised link speed
4353 *
4354 * Flags:
4355 * 0x1 - advertise 100 Mb
4356 * 0x2 - advertise 1G
4357 * 0x4 - advertise 10G
4358 * 0x8 - advertise 10 Mb (yes, Mb)
4359 * 0x10 - advertise 2.5G (disabled by default)
4360 * 0x20 - advertise 5G (disabled by default)
4361 *
4362 ************************************************************************/
4363 static int
ixgbe_set_advertise(struct ixgbe_softc * sc,int advertise)4364 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
4365 {
4366 device_t dev = iflib_get_dev(sc->ctx);
4367 struct ixgbe_hw *hw;
4368 ixgbe_link_speed speed = 0;
4369 ixgbe_link_speed link_caps = 0;
4370 s32 err = IXGBE_NOT_IMPLEMENTED;
4371 bool negotiate = false;
4372
4373 /* Checks to validate new value */
4374 if (sc->advertise == advertise) /* no change */
4375 return (0);
4376
4377 hw = &sc->hw;
4378
4379 /* No speed changes for backplane media */
4380 if (hw->phy.media_type == ixgbe_media_type_backplane)
4381 return (ENODEV);
4382
4383 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4384 (hw->phy.multispeed_fiber))) {
4385 device_printf(dev,
4386 "Advertised speed can only be set on copper or multispeed"
4387 " fiber media types.\n");
4388 return (EINVAL);
4389 }
4390
4391 if (advertise < 0x1 || advertise > 0x3F) {
4392 device_printf(dev,
4393 "Invalid advertised speed; valid modes are 0x1 through"
4394 " 0x3F\n");
4395 return (EINVAL);
4396 }
4397
4398 if (hw->mac.ops.get_link_capabilities) {
4399 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4400 &negotiate);
4401 if (err != IXGBE_SUCCESS) {
4402 device_printf(dev,
4403 "Unable to determine supported advertise speeds"
4404 "\n");
4405 return (ENODEV);
4406 }
4407 }
4408
4409 /* Set new value and report new advertised mode */
4410 if (advertise & 0x1) {
4411 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4412 device_printf(dev,
4413 "Interface does not support 100Mb advertised"
4414 " speed\n");
4415 return (EINVAL);
4416 }
4417 speed |= IXGBE_LINK_SPEED_100_FULL;
4418 }
4419 if (advertise & 0x2) {
4420 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4421 device_printf(dev,
4422 "Interface does not support 1Gb advertised speed"
4423 "\n");
4424 return (EINVAL);
4425 }
4426 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4427 }
4428 if (advertise & 0x4) {
4429 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4430 device_printf(dev,
4431 "Interface does not support 10Gb advertised speed"
4432 "\n");
4433 return (EINVAL);
4434 }
4435 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4436 }
4437 if (advertise & 0x8) {
4438 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4439 device_printf(dev,
4440 "Interface does not support 10Mb advertised speed"
4441 "\n");
4442 return (EINVAL);
4443 }
4444 speed |= IXGBE_LINK_SPEED_10_FULL;
4445 }
4446 if (advertise & 0x10) {
4447 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
4448 device_printf(dev,
4449 "Interface does not support 2.5G advertised speed"
4450 "\n");
4451 return (EINVAL);
4452 }
4453 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
4454 }
4455 if (advertise & 0x20) {
4456 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
4457 device_printf(dev,
4458 "Interface does not support 5G advertised speed"
4459 "\n");
4460 return (EINVAL);
4461 }
4462 speed |= IXGBE_LINK_SPEED_5GB_FULL;
4463 }
4464
4465 hw->mac.autotry_restart = true;
4466 hw->mac.ops.setup_link(hw, speed, true);
4467 sc->advertise = advertise;
4468
4469 return (0);
4470 } /* ixgbe_set_advertise */
4471
4472 /************************************************************************
4473 * ixgbe_get_default_advertise - Get default advertised speed settings
4474 *
4475 * Formatted for sysctl usage.
4476 * Flags:
4477 * 0x1 - advertise 100 Mb
4478 * 0x2 - advertise 1G
4479 * 0x4 - advertise 10G
4480 * 0x8 - advertise 10 Mb (yes, Mb)
4481 * 0x10 - advertise 2.5G (disabled by default)
4482 * 0x20 - advertise 5G (disabled by default)
4483 ************************************************************************/
4484 static int
ixgbe_get_default_advertise(struct ixgbe_softc * sc)4485 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
4486 {
4487 struct ixgbe_hw *hw = &sc->hw;
4488 int speed;
4489 ixgbe_link_speed link_caps = 0;
4490 s32 err;
4491 bool negotiate = false;
4492
4493 /*
4494 * Advertised speed means nothing unless it's copper or
4495 * multi-speed fiber
4496 */
4497 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4498 !(hw->phy.multispeed_fiber))
4499 return (0);
4500
4501 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4502 if (err != IXGBE_SUCCESS)
4503 return (0);
4504
4505 if (hw->mac.type == ixgbe_mac_X550) {
4506 /*
4507 * 2.5G and 5G autonegotiation speeds on X550
4508 * are disabled by default due to reported
4509 * interoperability issues with some switches.
4510 */
4511 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
4512 IXGBE_LINK_SPEED_5GB_FULL);
4513 }
4514
4515 speed =
4516 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
4517 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
4518 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
4519 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
4520 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
4521 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
4522
4523 return speed;
4524 } /* ixgbe_get_default_advertise */
4525
4526 /************************************************************************
4527 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4528 *
4529 * Control values:
4530 * 0/1 - off / on (use default value of 1000)
4531 *
4532 * Legal timer values are:
4533 * 50,100,250,500,1000,2000,5000,10000
4534 *
4535 * Turning off interrupt moderation will also turn this off.
4536 ************************************************************************/
4537 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)4538 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4539 {
4540 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4541 if_t ifp = iflib_get_ifp(sc->ctx);
4542 int error;
4543 u16 newval;
4544
4545 newval = sc->dmac;
4546 error = sysctl_handle_16(oidp, &newval, 0, req);
4547 if ((error) || (req->newptr == NULL))
4548 return (error);
4549
4550 switch (newval) {
4551 case 0:
4552 /* Disabled */
4553 sc->dmac = 0;
4554 break;
4555 case 1:
4556 /* Enable and use default */
4557 sc->dmac = 1000;
4558 break;
4559 case 50:
4560 case 100:
4561 case 250:
4562 case 500:
4563 case 1000:
4564 case 2000:
4565 case 5000:
4566 case 10000:
4567 /* Legal values - allow */
4568 sc->dmac = newval;
4569 break;
4570 default:
4571 /* Do nothing, illegal value */
4572 return (EINVAL);
4573 }
4574
4575 /* Re-initialize hardware if it's already running */
4576 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
4577 if_init(ifp, ifp);
4578
4579 return (0);
4580 } /* ixgbe_sysctl_dmac */
4581
4582 #ifdef IXGBE_DEBUG
4583 /************************************************************************
4584 * ixgbe_sysctl_power_state
4585 *
4586 * Sysctl to test power states
4587 * Values:
4588 * 0 - set device to D0
4589 * 3 - set device to D3
4590 * (none) - get current device power state
4591 ************************************************************************/
4592 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)4593 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4594 {
4595 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4596 device_t dev = sc->dev;
4597 int curr_ps, new_ps, error = 0;
4598
4599 curr_ps = new_ps = pci_get_powerstate(dev);
4600
4601 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4602 if ((error) || (req->newptr == NULL))
4603 return (error);
4604
4605 if (new_ps == curr_ps)
4606 return (0);
4607
4608 if (new_ps == 3 && curr_ps == 0)
4609 error = DEVICE_SUSPEND(dev);
4610 else if (new_ps == 0 && curr_ps == 3)
4611 error = DEVICE_RESUME(dev);
4612 else
4613 return (EINVAL);
4614
4615 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4616
4617 return (error);
4618 } /* ixgbe_sysctl_power_state */
4619 #endif
4620
4621 /************************************************************************
4622 * ixgbe_sysctl_wol_enable
4623 *
4624 * Sysctl to enable/disable the WoL capability,
4625 * if supported by the adapter.
4626 *
4627 * Values:
4628 * 0 - disabled
4629 * 1 - enabled
4630 ************************************************************************/
4631 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)4632 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4633 {
4634 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4635 struct ixgbe_hw *hw = &sc->hw;
4636 int new_wol_enabled;
4637 int error = 0;
4638
4639 new_wol_enabled = hw->wol_enabled;
4640 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4641 if ((error) || (req->newptr == NULL))
4642 return (error);
4643 new_wol_enabled = !!(new_wol_enabled);
4644 if (new_wol_enabled == hw->wol_enabled)
4645 return (0);
4646
4647 if (new_wol_enabled > 0 && !sc->wol_support)
4648 return (ENODEV);
4649 else
4650 hw->wol_enabled = new_wol_enabled;
4651
4652 return (0);
4653 } /* ixgbe_sysctl_wol_enable */
4654
4655 /************************************************************************
4656 * ixgbe_sysctl_wufc - Wake Up Filter Control
4657 *
4658 * Sysctl to enable/disable the types of packets that the
4659 * adapter will wake up on upon receipt.
4660 * Flags:
4661 * 0x1 - Link Status Change
4662 * 0x2 - Magic Packet
4663 * 0x4 - Direct Exact
4664 * 0x8 - Directed Multicast
4665 * 0x10 - Broadcast
4666 * 0x20 - ARP/IPv4 Request Packet
4667 * 0x40 - Direct IPv4 Packet
4668 * 0x80 - Direct IPv6 Packet
4669 *
4670 * Settings not listed above will cause the sysctl to return an error.
4671 ************************************************************************/
4672 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)4673 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4674 {
4675 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4676 int error = 0;
4677 u32 new_wufc;
4678
4679 new_wufc = sc->wufc;
4680
4681 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
4682 if ((error) || (req->newptr == NULL))
4683 return (error);
4684 if (new_wufc == sc->wufc)
4685 return (0);
4686
4687 if (new_wufc & 0xffffff00)
4688 return (EINVAL);
4689
4690 new_wufc &= 0xff;
4691 new_wufc |= (0xffffff & sc->wufc);
4692 sc->wufc = new_wufc;
4693
4694 return (0);
4695 } /* ixgbe_sysctl_wufc */
4696
4697 #ifdef IXGBE_DEBUG
4698 /************************************************************************
4699 * ixgbe_sysctl_print_rss_config
4700 ************************************************************************/
4701 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)4702 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4703 {
4704 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4705 struct ixgbe_hw *hw = &sc->hw;
4706 device_t dev = sc->dev;
4707 struct sbuf *buf;
4708 int error = 0, reta_size;
4709 u32 reg;
4710
4711 if (atomic_load_acq_int(&sc->recovery_mode))
4712 return (EPERM);
4713
4714 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4715 if (!buf) {
4716 device_printf(dev, "Could not allocate sbuf for output.\n");
4717 return (ENOMEM);
4718 }
4719
4720 // TODO: use sbufs to make a string to print out
4721 /* Set multiplier for RETA setup and table size based on MAC */
4722 switch (sc->hw.mac.type) {
4723 case ixgbe_mac_X550:
4724 case ixgbe_mac_X550EM_x:
4725 case ixgbe_mac_X550EM_a:
4726 reta_size = 128;
4727 break;
4728 default:
4729 reta_size = 32;
4730 break;
4731 }
4732
4733 /* Print out the redirection table */
4734 sbuf_cat(buf, "\n");
4735 for (int i = 0; i < reta_size; i++) {
4736 if (i < 32) {
4737 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4738 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4739 } else {
4740 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4741 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4742 }
4743 }
4744
4745 // TODO: print more config
4746
4747 error = sbuf_finish(buf);
4748 if (error)
4749 device_printf(dev, "Error finishing sbuf: %d\n", error);
4750
4751 sbuf_delete(buf);
4752
4753 return (0);
4754 } /* ixgbe_sysctl_print_rss_config */
4755 #endif /* IXGBE_DEBUG */
4756
4757 /************************************************************************
4758 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4759 *
4760 * For X552/X557-AT devices using an external PHY
4761 ************************************************************************/
4762 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)4763 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4764 {
4765 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4766 struct ixgbe_hw *hw = &sc->hw;
4767 u16 reg;
4768
4769 if (atomic_load_acq_int(&sc->recovery_mode))
4770 return (EPERM);
4771
4772 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4773 device_printf(iflib_get_dev(sc->ctx),
4774 "Device has no supported external thermal sensor.\n");
4775 return (ENODEV);
4776 }
4777
4778 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4779 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4780 device_printf(iflib_get_dev(sc->ctx),
4781 "Error reading from PHY's current temperature register"
4782 "\n");
4783 return (EAGAIN);
4784 }
4785
4786 /* Shift temp for output */
4787 reg = reg >> 8;
4788
4789 return (sysctl_handle_16(oidp, NULL, reg, req));
4790 } /* ixgbe_sysctl_phy_temp */
4791
4792 /************************************************************************
4793 * ixgbe_sysctl_phy_overtemp_occurred
4794 *
4795 * Reports (directly from the PHY) whether the current PHY
4796 * temperature is over the overtemp threshold.
4797 ************************************************************************/
4798 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)4799 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4800 {
4801 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4802 struct ixgbe_hw *hw = &sc->hw;
4803 u16 reg;
4804
4805 if (atomic_load_acq_int(&sc->recovery_mode))
4806 return (EPERM);
4807
4808 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4809 device_printf(iflib_get_dev(sc->ctx),
4810 "Device has no supported external thermal sensor.\n");
4811 return (ENODEV);
4812 }
4813
4814 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4815 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
4816 device_printf(iflib_get_dev(sc->ctx),
4817 "Error reading from PHY's temperature status register\n");
4818 return (EAGAIN);
4819 }
4820
4821 /* Get occurrence bit */
4822 reg = !!(reg & 0x4000);
4823
4824 return (sysctl_handle_16(oidp, 0, reg, req));
4825 } /* ixgbe_sysctl_phy_overtemp_occurred */
4826
4827 /************************************************************************
4828 * ixgbe_sysctl_eee_state
4829 *
4830 * Sysctl to set EEE power saving feature
4831 * Values:
4832 * 0 - disable EEE
4833 * 1 - enable EEE
4834 * (none) - get current device EEE state
4835 ************************************************************************/
4836 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)4837 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4838 {
4839 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
4840 device_t dev = sc->dev;
4841 if_t ifp = iflib_get_ifp(sc->ctx);
4842 int curr_eee, new_eee, error = 0;
4843 s32 retval;
4844
4845 if (atomic_load_acq_int(&sc->recovery_mode))
4846 return (EPERM);
4847
4848 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
4849
4850 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4851 if ((error) || (req->newptr == NULL))
4852 return (error);
4853
4854 /* Nothing to do */
4855 if (new_eee == curr_eee)
4856 return (0);
4857
4858 /* Not supported */
4859 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
4860 return (EINVAL);
4861
4862 /* Bounds checking */
4863 if ((new_eee < 0) || (new_eee > 1))
4864 return (EINVAL);
4865
4866 retval = ixgbe_setup_eee(&sc->hw, new_eee);
4867 if (retval) {
4868 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4869 return (EINVAL);
4870 }
4871
4872 /* Restart auto-neg */
4873 if_init(ifp, ifp);
4874
4875 device_printf(dev, "New EEE state: %d\n", new_eee);
4876
4877 /* Cache new value */
4878 if (new_eee)
4879 sc->feat_en |= IXGBE_FEATURE_EEE;
4880 else
4881 sc->feat_en &= ~IXGBE_FEATURE_EEE;
4882
4883 return (error);
4884 } /* ixgbe_sysctl_eee_state */
4885
4886 static int
ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)4887 ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
4888 {
4889 struct ixgbe_softc *sc;
4890 u32 reg, val, shift;
4891 int error, mask;
4892
4893 sc = oidp->oid_arg1;
4894 switch (oidp->oid_arg2) {
4895 case 0:
4896 reg = IXGBE_DTXTCPFLGL;
4897 shift = 0;
4898 break;
4899 case 1:
4900 reg = IXGBE_DTXTCPFLGL;
4901 shift = 16;
4902 break;
4903 case 2:
4904 reg = IXGBE_DTXTCPFLGH;
4905 shift = 0;
4906 break;
4907 default:
4908 return (EINVAL);
4909 break;
4910 }
4911 val = IXGBE_READ_REG(&sc->hw, reg);
4912 mask = (val >> shift) & 0xfff;
4913 error = sysctl_handle_int(oidp, &mask, 0, req);
4914 if (error != 0 || req->newptr == NULL)
4915 return (error);
4916 if (mask < 0 || mask > 0xfff)
4917 return (EINVAL);
4918 val = (val & ~(0xfff << shift)) | (mask << shift);
4919 IXGBE_WRITE_REG(&sc->hw, reg, val);
4920 return (0);
4921 }
4922
4923 /************************************************************************
4924 * ixgbe_init_device_features
4925 ************************************************************************/
4926 static void
ixgbe_init_device_features(struct ixgbe_softc * sc)4927 ixgbe_init_device_features(struct ixgbe_softc *sc)
4928 {
4929 sc->feat_cap = IXGBE_FEATURE_NETMAP |
4930 IXGBE_FEATURE_RSS |
4931 IXGBE_FEATURE_MSI |
4932 IXGBE_FEATURE_MSIX |
4933 IXGBE_FEATURE_LEGACY_IRQ;
4934
4935 /* Set capabilities first... */
4936 switch (sc->hw.mac.type) {
4937 case ixgbe_mac_82598EB:
4938 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
4939 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4940 break;
4941 case ixgbe_mac_X540:
4942 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4943 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4944 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4945 (sc->hw.bus.func == 0))
4946 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4947 break;
4948 case ixgbe_mac_X550:
4949 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4950 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4951 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4952 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4953 break;
4954 case ixgbe_mac_X550EM_x:
4955 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4956 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4957 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4958 if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4959 sc->feat_cap |= IXGBE_FEATURE_EEE;
4960 break;
4961 case ixgbe_mac_X550EM_a:
4962 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
4963 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4964 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4965 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4966 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4967 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4968 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4969 sc->feat_cap |= IXGBE_FEATURE_EEE;
4970 }
4971 break;
4972 case ixgbe_mac_82599EB:
4973 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
4974 sc->feat_cap |= IXGBE_FEATURE_FDIR;
4975 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4976 (sc->hw.bus.func == 0))
4977 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
4978 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4979 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4980 break;
4981 default:
4982 break;
4983 }
4984
4985 /* Enabled by default... */
4986 /* Fan failure detection */
4987 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4988 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4989 /* Netmap */
4990 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
4991 sc->feat_en |= IXGBE_FEATURE_NETMAP;
4992 /* EEE */
4993 if (sc->feat_cap & IXGBE_FEATURE_EEE)
4994 sc->feat_en |= IXGBE_FEATURE_EEE;
4995 /* Thermal Sensor */
4996 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4997 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4998 /* Recovery mode */
4999 if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
5000 sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
5001
5002 /* Enabled via global sysctl... */
5003 /* Flow Director */
5004 if (ixgbe_enable_fdir) {
5005 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
5006 sc->feat_en |= IXGBE_FEATURE_FDIR;
5007 else
5008 device_printf(sc->dev,
5009 "Device does not support Flow Director."
5010 " Leaving disabled.");
5011 }
5012 /*
5013 * Message Signal Interrupts - Extended (MSI-X)
5014 * Normal MSI is only enabled if MSI-X calls fail.
5015 */
5016 if (!ixgbe_enable_msix)
5017 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
5018 /* Receive-Side Scaling (RSS) */
5019 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5020 sc->feat_en |= IXGBE_FEATURE_RSS;
5021
5022 /* Disable features with unmet dependencies... */
5023 /* No MSI-X */
5024 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
5025 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
5026 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5027 sc->feat_en &= ~IXGBE_FEATURE_RSS;
5028 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
5029 }
5030 } /* ixgbe_init_device_features */
5031
5032 /************************************************************************
5033 * ixgbe_check_fan_failure
5034 ************************************************************************/
5035 static void
ixgbe_check_fan_failure(struct ixgbe_softc * sc,u32 reg,bool in_interrupt)5036 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
5037 {
5038 u32 mask;
5039
5040 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
5041 IXGBE_ESDP_SDP1;
5042
5043 if (reg & mask)
5044 device_printf(sc->dev,
5045 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5046 } /* ixgbe_check_fan_failure */
5047
5048 /************************************************************************
5049 * ixgbe_sbuf_fw_version
5050 ************************************************************************/
5051 static void
ixgbe_sbuf_fw_version(struct ixgbe_hw * hw,struct sbuf * buf)5052 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
5053 {
5054 struct ixgbe_nvm_version nvm_ver = {0};
5055 const char *space = "";
5056
5057 ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
5058 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
5059 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
5060 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
5061
5062 /* FW version */
5063 if ((nvm_ver.phy_fw_maj == 0x0 &&
5064 nvm_ver.phy_fw_min == 0x0 &&
5065 nvm_ver.phy_fw_id == 0x0) ||
5066 (nvm_ver.phy_fw_maj == 0xF &&
5067 nvm_ver.phy_fw_min == 0xFF &&
5068 nvm_ver.phy_fw_id == 0xF)) {
5069 /* If major, minor and id numbers are set to 0,
5070 * reading FW version is unsupported. If major number
5071 * is set to 0xF, minor is set to 0xFF and id is set
5072 * to 0xF, this means that number read is invalid. */
5073 } else
5074 sbuf_printf(buf, "fw %d.%d.%d ",
5075 nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min,
5076 nvm_ver.phy_fw_id);
5077
5078 /* NVM version */
5079 if ((nvm_ver.nvm_major == 0x0 &&
5080 nvm_ver.nvm_minor == 0x0 &&
5081 nvm_ver.nvm_id == 0x0) ||
5082 (nvm_ver.nvm_major == 0xF &&
5083 nvm_ver.nvm_minor == 0xFF &&
5084 nvm_ver.nvm_id == 0xF)) {
5085 /* If major, minor and id numbers are set to 0,
5086 * reading NVM version is unsupported. If major number
5087 * is set to 0xF, minor is set to 0xFF and id is set
5088 * to 0xF, this means that number read is invalid. */
5089 } else
5090 sbuf_printf(buf, "nvm %x.%02x.%x ",
5091 nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
5092
5093 if (nvm_ver.oem_valid) {
5094 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
5095 nvm_ver.oem_minor, nvm_ver.oem_release);
5096 space = " ";
5097 }
5098
5099 if (nvm_ver.or_valid) {
5100 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
5101 space, nvm_ver.or_major, nvm_ver.or_build,
5102 nvm_ver.or_patch);
5103 space = " ";
5104 }
5105
5106 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
5107 NVM_VER_INVALID | 0xFFFFFFFF)) {
5108 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
5109 }
5110 } /* ixgbe_sbuf_fw_version */
5111
5112 /************************************************************************
5113 * ixgbe_print_fw_version
5114 ************************************************************************/
5115 static void
ixgbe_print_fw_version(if_ctx_t ctx)5116 ixgbe_print_fw_version(if_ctx_t ctx)
5117 {
5118 struct ixgbe_softc *sc = iflib_get_softc(ctx);
5119 struct ixgbe_hw *hw = &sc->hw;
5120 device_t dev = sc->dev;
5121 struct sbuf *buf;
5122 int error = 0;
5123
5124 buf = sbuf_new_auto();
5125 if (!buf) {
5126 device_printf(dev, "Could not allocate sbuf for output.\n");
5127 return;
5128 }
5129
5130 ixgbe_sbuf_fw_version(hw, buf);
5131
5132 error = sbuf_finish(buf);
5133 if (error)
5134 device_printf(dev, "Error finishing sbuf: %d\n", error);
5135 else if (sbuf_len(buf))
5136 device_printf(dev, "%s\n", sbuf_data(buf));
5137
5138 sbuf_delete(buf);
5139 } /* ixgbe_print_fw_version */
5140
5141 /************************************************************************
5142 * ixgbe_sysctl_print_fw_version
5143 ************************************************************************/
5144 static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)5145 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
5146 {
5147 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5148 struct ixgbe_hw *hw = &sc->hw;
5149 device_t dev = sc->dev;
5150 struct sbuf *buf;
5151 int error = 0;
5152
5153 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5154 if (!buf) {
5155 device_printf(dev, "Could not allocate sbuf for output.\n");
5156 return (ENOMEM);
5157 }
5158
5159 ixgbe_sbuf_fw_version(hw, buf);
5160
5161 error = sbuf_finish(buf);
5162 if (error)
5163 device_printf(dev, "Error finishing sbuf: %d\n", error);
5164
5165 sbuf_delete(buf);
5166
5167 return (0);
5168 } /* ixgbe_sysctl_print_fw_version */
5169