1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "ixl.h"
35 #include "ixl_pf.h"
36
37 #ifdef IXL_IW
38 #include "ixl_iw.h"
39 #include "ixl_iw_int.h"
40 #endif
41
42 #ifdef PCI_IOV
43 #include "ixl_pf_iov.h"
44 #endif
45
46 /*********************************************************************
47 * Driver version
48 *********************************************************************/
49 #define IXL_DRIVER_VERSION_MAJOR 2
50 #define IXL_DRIVER_VERSION_MINOR 3
51 #define IXL_DRIVER_VERSION_BUILD 3
52
53 #define IXL_DRIVER_VERSION_STRING \
54 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \
55 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \
56 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
57
58 /*********************************************************************
59 * PCI Device ID Table
60 *
61 * Used by probe to select devices to load on
62 *
63 * ( Vendor ID, Device ID, Branding String )
64 *********************************************************************/
65
66 static const pci_vendor_info_t ixl_vendor_info_array[] =
67 {
68 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
85 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
86 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
87 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
88 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC, "Intel(R) Ethernet Controller I710 for 1GBASE-T"),
89 /* required last entry */
90 PVID_END
91 };
92
93 /*********************************************************************
94 * Function prototypes
95 *********************************************************************/
96 /*** IFLIB interface ***/
97 static void *ixl_register(device_t dev);
98 static int ixl_if_attach_pre(if_ctx_t ctx);
99 static int ixl_if_attach_post(if_ctx_t ctx);
100 static int ixl_if_detach(if_ctx_t ctx);
101 static int ixl_if_shutdown(if_ctx_t ctx);
102 static int ixl_if_suspend(if_ctx_t ctx);
103 static int ixl_if_resume(if_ctx_t ctx);
104 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
105 static void ixl_if_enable_intr(if_ctx_t ctx);
106 static void ixl_if_disable_intr(if_ctx_t ctx);
107 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
108 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
109 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
110 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
111 static void ixl_if_queues_free(if_ctx_t ctx);
112 static void ixl_if_update_admin_status(if_ctx_t ctx);
113 static void ixl_if_multi_set(if_ctx_t ctx);
114 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
115 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
116 static int ixl_if_media_change(if_ctx_t ctx);
117 static int ixl_if_promisc_set(if_ctx_t ctx, int flags);
118 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid);
119 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
120 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
121 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
122 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
123 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124 static bool ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
125 #ifdef PCI_IOV
126 static void ixl_if_vflr_handle(if_ctx_t ctx);
127 #endif
128
129 /*** Other ***/
130 static void ixl_save_pf_tunables(struct ixl_pf *);
131 static int ixl_allocate_pci_resources(struct ixl_pf *);
132 static void ixl_setup_ssctx(struct ixl_pf *pf);
133 static void ixl_admin_timer(void *arg);
134
135 /*********************************************************************
136 * FreeBSD Device Interface Entry Points
137 *********************************************************************/
138
139 static device_method_t ixl_methods[] = {
140 /* Device interface */
141 DEVMETHOD(device_register, ixl_register),
142 DEVMETHOD(device_probe, iflib_device_probe),
143 DEVMETHOD(device_attach, iflib_device_attach),
144 DEVMETHOD(device_detach, iflib_device_detach),
145 DEVMETHOD(device_shutdown, iflib_device_shutdown),
146 #ifdef PCI_IOV
147 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
148 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
149 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
150 #endif
151 DEVMETHOD_END
152 };
153
154 static driver_t ixl_driver = {
155 "ixl", ixl_methods, sizeof(struct ixl_pf),
156 };
157
158 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0);
159 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
160 MODULE_VERSION(ixl, 3);
161
162 MODULE_DEPEND(ixl, pci, 1, 1, 1);
163 MODULE_DEPEND(ixl, ether, 1, 1, 1);
164 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
165
166 static device_method_t ixl_if_methods[] = {
167 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
168 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
169 DEVMETHOD(ifdi_detach, ixl_if_detach),
170 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
171 DEVMETHOD(ifdi_suspend, ixl_if_suspend),
172 DEVMETHOD(ifdi_resume, ixl_if_resume),
173 DEVMETHOD(ifdi_init, ixl_if_init),
174 DEVMETHOD(ifdi_stop, ixl_if_stop),
175 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
176 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
177 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
178 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
179 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
180 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
181 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
182 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
183 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
184 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
185 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
186 DEVMETHOD(ifdi_media_status, ixl_if_media_status),
187 DEVMETHOD(ifdi_media_change, ixl_if_media_change),
188 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
189 DEVMETHOD(ifdi_timer, ixl_if_timer),
190 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
191 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
192 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
193 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
194 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
195 DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
196 #ifdef PCI_IOV
197 DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
198 DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
199 DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
200 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
201 #endif
202 // ifdi_led_func
203 // ifdi_debug
204 DEVMETHOD_END
205 };
206
207 static driver_t ixl_if_driver = {
208 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
209 };
210
211 /*
212 ** TUNEABLE PARAMETERS:
213 */
214
215 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
216 "ixl driver parameters");
217
218 #ifdef IXL_DEBUG_FC
219 /*
220 * Leave this on unless you need to send flow control
221 * frames (or other control frames) from software
222 */
223 static int ixl_enable_tx_fc_filter = 1;
224 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
225 &ixl_enable_tx_fc_filter);
226 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
227 &ixl_enable_tx_fc_filter, 0,
228 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
229 #endif
230
231 #ifdef IXL_DEBUG
232 static int ixl_debug_recovery_mode = 0;
233 TUNABLE_INT("hw.ixl.debug_recovery_mode",
234 &ixl_debug_recovery_mode);
235 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
236 &ixl_debug_recovery_mode, 0,
237 "Act like when FW entered recovery mode (for debugging)");
238 #endif
239
240 static int ixl_i2c_access_method = 0;
241 TUNABLE_INT("hw.ixl.i2c_access_method",
242 &ixl_i2c_access_method);
243 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
244 &ixl_i2c_access_method, 0,
245 IXL_SYSCTL_HELP_I2C_METHOD);
246
247 static int ixl_enable_vf_loopback = 1;
248 TUNABLE_INT("hw.ixl.enable_vf_loopback",
249 &ixl_enable_vf_loopback);
250 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
251 &ixl_enable_vf_loopback, 0,
252 IXL_SYSCTL_HELP_VF_LOOPBACK);
253
254 /*
255 * Different method for processing TX descriptor
256 * completion.
257 */
258 static int ixl_enable_head_writeback = 1;
259 TUNABLE_INT("hw.ixl.enable_head_writeback",
260 &ixl_enable_head_writeback);
261 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
262 &ixl_enable_head_writeback, 0,
263 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
264
265 static int ixl_core_debug_mask = 0;
266 TUNABLE_INT("hw.ixl.core_debug_mask",
267 &ixl_core_debug_mask);
268 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
269 &ixl_core_debug_mask, 0,
270 "Display debug statements that are printed in non-shared code");
271
272 static int ixl_shared_debug_mask = 0;
273 TUNABLE_INT("hw.ixl.shared_debug_mask",
274 &ixl_shared_debug_mask);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
276 &ixl_shared_debug_mask, 0,
277 "Display debug statements that are printed in shared code");
278
279 #if 0
280 /*
281 ** Controls for Interrupt Throttling
282 ** - true/false for dynamic adjustment
283 ** - default values for static ITR
284 */
285 static int ixl_dynamic_rx_itr = 0;
286 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
287 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
288 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
289
290 static int ixl_dynamic_tx_itr = 0;
291 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
292 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
293 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
294 #endif
295
296 static int ixl_rx_itr = IXL_ITR_8K;
297 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
298 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
299 &ixl_rx_itr, 0, "RX Interrupt Rate");
300
301 static int ixl_tx_itr = IXL_ITR_4K;
302 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
304 &ixl_tx_itr, 0, "TX Interrupt Rate");
305
306 static int ixl_flow_control = -1;
307 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
308 &ixl_flow_control, 0, "Initial Flow Control setting");
309
310 #ifdef IXL_IW
311 int ixl_enable_iwarp = 0;
312 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
313 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
314 &ixl_enable_iwarp, 0, "iWARP enabled");
315
316 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
317 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
318 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
319 &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
320 #endif
321
322 extern struct if_txrx ixl_txrx_hwb;
323 extern struct if_txrx ixl_txrx_dwb;
324
325 static struct if_shared_ctx ixl_sctx_init = {
326 .isc_magic = IFLIB_MAGIC,
327 .isc_q_align = PAGE_SIZE,
328 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
329 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
330 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
331 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
332 .isc_rx_maxsize = 16384,
333 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
334 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
335 .isc_nfl = 1,
336 .isc_ntxqs = 1,
337 .isc_nrxqs = 1,
338
339 .isc_admin_intrcnt = 1,
340 .isc_vendor_info = ixl_vendor_info_array,
341 .isc_driver_version = IXL_DRIVER_VERSION_STRING,
342 .isc_driver = &ixl_if_driver,
343 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
344
345 .isc_nrxd_min = {IXL_MIN_RING},
346 .isc_ntxd_min = {IXL_MIN_RING},
347 .isc_nrxd_max = {IXL_MAX_RING},
348 .isc_ntxd_max = {IXL_MAX_RING},
349 .isc_nrxd_default = {IXL_DEFAULT_RING},
350 .isc_ntxd_default = {IXL_DEFAULT_RING},
351 };
352
353 /*** Functions ***/
354 static void *
ixl_register(device_t dev)355 ixl_register(device_t dev)
356 {
357 return (&ixl_sctx_init);
358 }
359
360 static int
ixl_allocate_pci_resources(struct ixl_pf * pf)361 ixl_allocate_pci_resources(struct ixl_pf *pf)
362 {
363 device_t dev = iflib_get_dev(pf->vsi.ctx);
364 struct i40e_hw *hw = &pf->hw;
365 int rid;
366
367 /* Map BAR0 */
368 rid = PCIR_BAR(0);
369 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
370 &rid, RF_ACTIVE);
371
372 if (!(pf->pci_mem)) {
373 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
374 return (ENXIO);
375 }
376
377 /* Save off the PCI information */
378 hw->vendor_id = pci_get_vendor(dev);
379 hw->device_id = pci_get_device(dev);
380 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
381 hw->subsystem_vendor_id =
382 pci_read_config(dev, PCIR_SUBVEND_0, 2);
383 hw->subsystem_device_id =
384 pci_read_config(dev, PCIR_SUBDEV_0, 2);
385
386 hw->bus.device = pci_get_slot(dev);
387 hw->bus.func = pci_get_function(dev);
388
389 /* Save off register access information */
390 pf->osdep.mem_bus_space_tag =
391 rman_get_bustag(pf->pci_mem);
392 pf->osdep.mem_bus_space_handle =
393 rman_get_bushandle(pf->pci_mem);
394 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
395 pf->osdep.flush_reg = I40E_GLGEN_STAT;
396 pf->osdep.dev = dev;
397
398 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
399 pf->hw.back = &pf->osdep;
400
401 return (0);
402 }
403
404 static void
ixl_setup_ssctx(struct ixl_pf * pf)405 ixl_setup_ssctx(struct ixl_pf *pf)
406 {
407 if_softc_ctx_t scctx = pf->vsi.shared;
408 struct i40e_hw *hw = &pf->hw;
409
410 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
411 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
412 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
413 } else if (hw->mac.type == I40E_MAC_X722)
414 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
415 else
416 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
417
418 if (pf->vsi.enable_head_writeback) {
419 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
420 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
421 scctx->isc_txrx = &ixl_txrx_hwb;
422 } else {
423 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
424 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
425 scctx->isc_txrx = &ixl_txrx_dwb;
426 }
427
428 scctx->isc_txrx->ift_legacy_intr = ixl_intr;
429 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
430 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
431 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
432 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
433 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
434 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
435 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
436 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
437 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
438 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
439 }
440
441 static void
ixl_admin_timer(void * arg)442 ixl_admin_timer(void *arg)
443 {
444 struct ixl_pf *pf = (struct ixl_pf *)arg;
445
446 if (ixl_test_state(&pf->state, IXL_STATE_LINK_POLLING)) {
447 struct i40e_hw *hw = &pf->hw;
448 sbintime_t stime;
449 enum i40e_status_code status;
450
451 hw->phy.get_link_info = TRUE;
452 status = i40e_get_link_status(hw, &pf->link_up);
453 if (status == I40E_SUCCESS) {
454 ixl_clear_state(&pf->state, IXL_STATE_LINK_POLLING);
455 /* OS link info is updated in the admin task */
456 } else {
457 device_printf(pf->dev,
458 "%s: i40e_get_link_status status %s, aq error %s\n",
459 __func__, i40e_stat_str(hw, status),
460 i40e_aq_str(hw, hw->aq.asq_last_status));
461 stime = getsbinuptime();
462 if (stime - pf->link_poll_start > IXL_PF_MAX_LINK_POLL) {
463 device_printf(pf->dev, "Polling link status failed\n");
464 ixl_clear_state(&pf->state, IXL_STATE_LINK_POLLING);
465 }
466 }
467 }
468
469 /* Fire off the admin task */
470 iflib_admin_intr_deferred(pf->vsi.ctx);
471
472 /* Reschedule the admin timer */
473 callout_schedule(&pf->admin_timer, hz/2);
474 }
475
476 static int
ixl_attach_pre_recovery_mode(struct ixl_pf * pf)477 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
478 {
479 struct ixl_vsi *vsi = &pf->vsi;
480 struct i40e_hw *hw = &pf->hw;
481 device_t dev = pf->dev;
482
483 device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
484
485 i40e_get_mac_addr(hw, hw->mac.addr);
486
487 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
488 ixl_configure_intr0_msix(pf);
489 ixl_enable_intr0(hw);
490 }
491
492 ixl_setup_ssctx(pf);
493
494 return (0);
495 }
496
497 static int
ixl_if_attach_pre(if_ctx_t ctx)498 ixl_if_attach_pre(if_ctx_t ctx)
499 {
500 device_t dev;
501 struct ixl_pf *pf;
502 struct i40e_hw *hw;
503 struct ixl_vsi *vsi;
504 enum i40e_get_fw_lldp_status_resp lldp_status;
505 struct i40e_filter_control_settings filter;
506 enum i40e_status_code status;
507 int error = 0;
508
509 dev = iflib_get_dev(ctx);
510 pf = iflib_get_softc(ctx);
511
512 INIT_DBG_DEV(dev, "begin");
513
514 vsi = &pf->vsi;
515 vsi->back = pf;
516 pf->dev = dev;
517 hw = &pf->hw;
518
519 vsi->dev = dev;
520 vsi->hw = &pf->hw;
521 vsi->id = 0;
522 vsi->num_vlans = 0;
523 vsi->ctx = ctx;
524 vsi->media = iflib_get_media(ctx);
525 vsi->shared = iflib_get_softc_ctx(ctx);
526
527 snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
528 "%s:admin", device_get_nameunit(dev));
529 mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
530 callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
531
532 /* Save tunable values */
533 ixl_save_pf_tunables(pf);
534
535 /* Do PCI setup - map BAR0, etc */
536 if (ixl_allocate_pci_resources(pf)) {
537 device_printf(dev, "Allocation of PCI resources failed\n");
538 error = ENXIO;
539 goto err_pci_res;
540 }
541
542 /* Establish a clean starting point */
543 i40e_clear_hw(hw);
544 i40e_set_mac_type(hw);
545
546 error = ixl_pf_reset(pf);
547 if (error)
548 goto err_out;
549
550 /* Initialize the shared code */
551 status = i40e_init_shared_code(hw);
552 if (status) {
553 device_printf(dev, "Unable to initialize shared code, error %s\n",
554 i40e_stat_str(hw, status));
555 error = EIO;
556 goto err_out;
557 }
558
559 /* Set up the admin queue */
560 hw->aq.num_arq_entries = IXL_AQ_LEN;
561 hw->aq.num_asq_entries = IXL_AQ_LEN;
562 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
563 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
564
565 status = i40e_init_adminq(hw);
566 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
567 device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
568 i40e_stat_str(hw, status));
569 error = EIO;
570 goto err_out;
571 }
572 ixl_print_nvm_version(pf);
573
574 if (status == I40E_ERR_FIRMWARE_API_VERSION) {
575 device_printf(dev, "The driver for the device stopped "
576 "because the NVM image is newer than expected.\n");
577 device_printf(dev, "You must install the most recent version of "
578 "the network driver.\n");
579 error = EIO;
580 goto err_out;
581 }
582
583 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
584 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
585 device_printf(dev, "The driver for the device detected "
586 "a newer version of the NVM image than expected.\n");
587 device_printf(dev, "Please install the most recent version "
588 "of the network driver.\n");
589 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
590 device_printf(dev, "The driver for the device detected "
591 "an older version of the NVM image than expected.\n");
592 device_printf(dev, "Please update the NVM image.\n");
593 }
594
595 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
596 error = ixl_attach_pre_recovery_mode(pf);
597 if (error)
598 goto err_out;
599 return (error);
600 }
601
602 /* Clear PXE mode */
603 i40e_clear_pxe_mode(hw);
604
605 /* Get capabilities from the device */
606 error = ixl_get_hw_capabilities(pf);
607 if (error) {
608 device_printf(dev, "get_hw_capabilities failed: %d\n",
609 error);
610 goto err_get_cap;
611 }
612
613 /* Set up host memory cache */
614 error = ixl_setup_hmc(pf);
615 if (error)
616 goto err_mac_hmc;
617
618 /* Disable LLDP from the firmware for certain NVM versions */
619 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
620 (pf->hw.aq.fw_maj_ver < 4)) {
621 i40e_aq_stop_lldp(hw, true, false, NULL);
622 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
623 }
624
625 /* Try enabling Energy Efficient Ethernet (EEE) mode */
626 if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
627 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
628 else
629 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
630
631 /* Get MAC addresses from hardware */
632 i40e_get_mac_addr(hw, hw->mac.addr);
633 error = i40e_validate_mac_addr(hw->mac.addr);
634 if (error) {
635 device_printf(dev, "validate_mac_addr failed: %d\n", error);
636 goto err_mac_hmc;
637 }
638 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
639 iflib_set_mac(ctx, hw->mac.addr);
640 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
641
642 /* Set up the device filtering */
643 bzero(&filter, sizeof(filter));
644 filter.enable_ethtype = TRUE;
645 filter.enable_macvlan = TRUE;
646 filter.enable_fdir = FALSE;
647 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
648 if (i40e_set_filter_control(hw, &filter))
649 device_printf(dev, "i40e_set_filter_control() failed\n");
650
651 /* Query device FW LLDP status */
652 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
653 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
654 ixl_set_state(&pf->state,
655 IXL_STATE_FW_LLDP_DISABLED);
656 } else {
657 ixl_clear_state(&pf->state,
658 IXL_STATE_FW_LLDP_DISABLED);
659 }
660 }
661
662 /* Tell FW to apply DCB config on link up */
663 i40e_aq_set_dcb_parameters(hw, true, NULL);
664
665 /* Fill out iflib parameters */
666 ixl_setup_ssctx(pf);
667
668 INIT_DBG_DEV(dev, "end");
669 return (0);
670
671 err_mac_hmc:
672 ixl_shutdown_hmc(pf);
673 err_get_cap:
674 i40e_shutdown_adminq(hw);
675 err_out:
676 ixl_free_pci_resources(pf);
677 err_pci_res:
678 mtx_lock(&pf->admin_mtx);
679 callout_stop(&pf->admin_timer);
680 mtx_unlock(&pf->admin_mtx);
681 mtx_destroy(&pf->admin_mtx);
682 return (error);
683 }
684
685 static int
ixl_if_attach_post(if_ctx_t ctx)686 ixl_if_attach_post(if_ctx_t ctx)
687 {
688 device_t dev;
689 struct ixl_pf *pf;
690 struct i40e_hw *hw;
691 struct ixl_vsi *vsi;
692 int error = 0;
693 enum i40e_status_code status;
694
695 dev = iflib_get_dev(ctx);
696 pf = iflib_get_softc(ctx);
697
698 INIT_DBG_DEV(dev, "begin");
699
700 vsi = &pf->vsi;
701 vsi->ifp = iflib_get_ifp(ctx);
702 hw = &pf->hw;
703
704 /* Save off determined number of queues for interface */
705 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
706 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
707
708 /* Setup OS network interface / ifnet */
709 if (ixl_setup_interface(dev, pf)) {
710 device_printf(dev, "interface setup failed!\n");
711 error = EIO;
712 goto err;
713 }
714
715 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
716 /* Keep admin queue interrupts active while driver is loaded */
717 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
718 ixl_configure_intr0_msix(pf);
719 ixl_enable_intr0(hw);
720 }
721
722 ixl_add_sysctls_recovery_mode(pf);
723
724 /* Start the admin timer */
725 mtx_lock(&pf->admin_mtx);
726 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
727 mtx_unlock(&pf->admin_mtx);
728 return (0);
729 }
730
731 error = ixl_switch_config(pf);
732 if (error) {
733 device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
734 error);
735 goto err;
736 }
737
738 /* Add protocol filters to list */
739 ixl_init_filters(vsi);
740
741 /* Init queue allocation manager */
742 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
743 if (error) {
744 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
745 error);
746 goto err;
747 }
748 /* reserve a contiguous allocation for the PF's VSI */
749 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
750 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
751 if (error) {
752 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
753 error);
754 goto err;
755 }
756 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
757 pf->qtag.num_allocated, pf->qtag.num_active);
758
759 /* Determine link state */
760 error = ixl_attach_get_link_status(pf);
761 if (error == EINVAL)
762 goto err;
763
764 /* Limit PHY interrupts to link, autoneg, and modules failure */
765 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
766 NULL);
767 if (status) {
768 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
769 " aq_err %s\n", i40e_stat_str(hw, status),
770 i40e_aq_str(hw, hw->aq.asq_last_status));
771 goto err;
772 }
773
774 /* Get the bus configuration and set the shared code */
775 ixl_get_bus_info(pf);
776
777 /* Keep admin queue interrupts active while driver is loaded */
778 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
779 ixl_configure_intr0_msix(pf);
780 ixl_enable_intr0(hw);
781 }
782
783 /* Set initial advertised speed sysctl value */
784 ixl_set_initial_advertised_speeds(pf);
785
786 /* Initialize statistics & add sysctls */
787 ixl_add_device_sysctls(pf);
788 ixl_pf_reset_stats(pf);
789 ixl_update_stats_counters(pf);
790 ixl_add_hw_stats(pf);
791
792 /*
793 * Driver may have been reloaded. Ensure that the link state
794 * is consistent with current settings.
795 */
796 ixl_set_link(pf, ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN));
797
798 hw->phy.get_link_info = true;
799 status = i40e_get_link_status(hw, &pf->link_up);
800 if (status != I40E_SUCCESS) {
801 device_printf(dev,
802 "%s get link status, status: %s aq_err=%s\n",
803 __func__, i40e_stat_str(hw, status),
804 i40e_aq_str(hw, hw->aq.asq_last_status));
805 /*
806 * Most probably FW has not finished configuring PHY.
807 * Retry periodically in a timer callback.
808 */
809 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING);
810 pf->link_poll_start = getsbinuptime();
811 } else
812 ixl_update_link_status(pf);
813
814 #ifdef PCI_IOV
815 ixl_initialize_sriov(pf);
816 #endif
817
818 #ifdef IXL_IW
819 if (hw->func_caps.iwarp && ixl_enable_iwarp) {
820 pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
821 if (pf->iw_enabled) {
822 error = ixl_iw_pf_attach(pf);
823 if (error) {
824 device_printf(dev,
825 "interfacing to iWARP driver failed: %d\n",
826 error);
827 goto err;
828 } else
829 device_printf(dev, "iWARP ready\n");
830 } else
831 device_printf(dev, "iWARP disabled on this device "
832 "(no MSI-X vectors)\n");
833 } else {
834 pf->iw_enabled = false;
835 device_printf(dev, "The device is not iWARP enabled\n");
836 }
837 #endif
838 /* Start the admin timer */
839 mtx_lock(&pf->admin_mtx);
840 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
841 mtx_unlock(&pf->admin_mtx);
842
843 INIT_DBG_DEV(dev, "end");
844 return (0);
845
846 err:
847 INIT_DEBUGOUT("end: error %d", error);
848 /* ixl_if_detach() is called on error from this */
849 return (error);
850 }
851
852 /**
853 * XXX: iflib always ignores the return value of detach()
854 * -> This means that this isn't allowed to fail
855 */
856 static int
ixl_if_detach(if_ctx_t ctx)857 ixl_if_detach(if_ctx_t ctx)
858 {
859 struct ixl_pf *pf = iflib_get_softc(ctx);
860 struct ixl_vsi *vsi = &pf->vsi;
861 struct i40e_hw *hw = &pf->hw;
862 device_t dev = pf->dev;
863 enum i40e_status_code status;
864 #ifdef IXL_IW
865 int error;
866 #endif
867
868 INIT_DBG_DEV(dev, "begin");
869
870 /* Stop the admin timer */
871 mtx_lock(&pf->admin_mtx);
872 callout_stop(&pf->admin_timer);
873 mtx_unlock(&pf->admin_mtx);
874 mtx_destroy(&pf->admin_mtx);
875
876 #ifdef IXL_IW
877 if (ixl_enable_iwarp && pf->iw_enabled) {
878 error = ixl_iw_pf_detach(pf);
879 if (error == EBUSY) {
880 device_printf(dev, "iwarp in use; stop it first.\n");
881 //return (error);
882 }
883 }
884 #endif
885 /* Remove all previously allocated media types */
886 ifmedia_removeall(vsi->media);
887
888 /* Shutdown LAN HMC */
889 ixl_shutdown_hmc(pf);
890
891 /* Shutdown admin queue */
892 ixl_disable_intr0(hw);
893 status = i40e_shutdown_adminq(hw);
894 if (status)
895 device_printf(dev,
896 "i40e_shutdown_adminq() failed with status %s\n",
897 i40e_stat_str(hw, status));
898
899 ixl_pf_qmgr_destroy(&pf->qmgr);
900 ixl_free_pci_resources(pf);
901 ixl_free_filters(&vsi->ftl);
902 INIT_DBG_DEV(dev, "end");
903 return (0);
904 }
905
906 static int
ixl_if_shutdown(if_ctx_t ctx)907 ixl_if_shutdown(if_ctx_t ctx)
908 {
909 int error = 0;
910
911 INIT_DEBUGOUT("ixl_if_shutdown: begin");
912
913 /* TODO: Call ixl_if_stop()? */
914
915 /* TODO: Then setup low power mode */
916
917 return (error);
918 }
919
920 static int
ixl_if_suspend(if_ctx_t ctx)921 ixl_if_suspend(if_ctx_t ctx)
922 {
923 int error = 0;
924
925 INIT_DEBUGOUT("ixl_if_suspend: begin");
926
927 /* TODO: Call ixl_if_stop()? */
928
929 /* TODO: Then setup low power mode */
930
931 return (error);
932 }
933
934 static int
ixl_if_resume(if_ctx_t ctx)935 ixl_if_resume(if_ctx_t ctx)
936 {
937 if_t ifp = iflib_get_ifp(ctx);
938
939 INIT_DEBUGOUT("ixl_if_resume: begin");
940
941 /* Read & clear wake-up registers */
942
943 /* Required after D3->D0 transition */
944 if (if_getflags(ifp) & IFF_UP)
945 ixl_if_init(ctx);
946
947 return (0);
948 }
949
950 void
ixl_if_init(if_ctx_t ctx)951 ixl_if_init(if_ctx_t ctx)
952 {
953 struct ixl_pf *pf = iflib_get_softc(ctx);
954 struct ixl_vsi *vsi = &pf->vsi;
955 struct i40e_hw *hw = &pf->hw;
956 if_t ifp = iflib_get_ifp(ctx);
957 device_t dev = iflib_get_dev(ctx);
958 u8 tmpaddr[ETHER_ADDR_LEN];
959 int ret;
960
961 if (IXL_PF_IN_RECOVERY_MODE(pf))
962 return;
963 /*
964 * If the aq is dead here, it probably means something outside of the driver
965 * did something to the adapter, like a PF reset.
966 * So, rebuild the driver's state here if that occurs.
967 */
968 if (!i40e_check_asq_alive(&pf->hw)) {
969 device_printf(dev, "Admin Queue is down; resetting...\n");
970 ixl_teardown_hw_structs(pf);
971 ixl_rebuild_hw_structs_after_reset(pf, false);
972 }
973
974 /* Get the latest mac address... User might use a LAA */
975 bcopy(if_getlladdr(vsi->ifp), tmpaddr, ETH_ALEN);
976 if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
977 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
978 ixl_del_all_vlan_filters(vsi, hw->mac.addr);
979 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
980 ret = i40e_aq_mac_address_write(hw,
981 I40E_AQC_WRITE_TYPE_LAA_ONLY,
982 hw->mac.addr, NULL);
983 if (ret) {
984 device_printf(dev, "LLA address change failed!!\n");
985 return;
986 }
987 /*
988 * New filters are configured by ixl_reconfigure_filters
989 * at the end of ixl_init_locked.
990 */
991 }
992
993 iflib_set_mac(ctx, hw->mac.addr);
994
995 /* Prepare the VSI: rings, hmc contexts, etc... */
996 if (ixl_initialize_vsi(vsi)) {
997 device_printf(dev, "initialize vsi failed!!\n");
998 return;
999 }
1000
1001 ixl_set_link(pf, true);
1002
1003 /* Reconfigure multicast filters in HW */
1004 ixl_if_multi_set(ctx);
1005
1006 /* Set up RSS */
1007 ixl_config_rss(pf);
1008
1009 /* Set up MSI-X routing and the ITR settings */
1010 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1011 ixl_configure_queue_intr_msix(pf);
1012 ixl_configure_itr(pf);
1013 } else
1014 ixl_configure_legacy(pf);
1015
1016 if (vsi->enable_head_writeback)
1017 ixl_init_tx_cidx(vsi);
1018 else
1019 ixl_init_tx_rsqs(vsi);
1020
1021 ixl_enable_rings(vsi);
1022
1023 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1024
1025 /* Re-add configure filters to HW */
1026 ixl_reconfigure_filters(vsi);
1027
1028 /* Configure promiscuous mode */
1029 ixl_if_promisc_set(ctx, if_getflags(ifp));
1030
1031 #ifdef IXL_IW
1032 if (ixl_enable_iwarp && pf->iw_enabled) {
1033 ret = ixl_iw_pf_init(pf);
1034 if (ret)
1035 device_printf(dev,
1036 "initialize iwarp failed, code %d\n", ret);
1037 }
1038 #endif
1039 }
1040
1041 void
ixl_if_stop(if_ctx_t ctx)1042 ixl_if_stop(if_ctx_t ctx)
1043 {
1044 struct ixl_pf *pf = iflib_get_softc(ctx);
1045 if_t ifp = iflib_get_ifp(ctx);
1046 struct ixl_vsi *vsi = &pf->vsi;
1047
1048 INIT_DEBUGOUT("ixl_if_stop: begin\n");
1049
1050 if (IXL_PF_IN_RECOVERY_MODE(pf))
1051 return;
1052
1053 // TODO: This may need to be reworked
1054 #ifdef IXL_IW
1055 /* Stop iWARP device */
1056 if (ixl_enable_iwarp && pf->iw_enabled)
1057 ixl_iw_pf_stop(pf);
1058 #endif
1059
1060 ixl_disable_rings_intr(vsi);
1061 ixl_disable_rings(pf, vsi, &pf->qtag);
1062
1063 /*
1064 * Don't set link state if only reconfiguring
1065 * e.g. on MTU change.
1066 */
1067 if ((if_getflags(ifp) & IFF_UP) == 0 &&
1068 !ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN))
1069 ixl_set_link(pf, false);
1070 }
1071
1072 static int
ixl_if_msix_intr_assign(if_ctx_t ctx,int msix)1073 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1074 {
1075 struct ixl_pf *pf = iflib_get_softc(ctx);
1076 struct ixl_vsi *vsi = &pf->vsi;
1077 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1078 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1079 int err, i, rid, vector = 0;
1080 char buf[16];
1081
1082 MPASS(vsi->shared->isc_nrxqsets > 0);
1083 MPASS(vsi->shared->isc_ntxqsets > 0);
1084
1085 /* Admin Que must use vector 0*/
1086 rid = vector + 1;
1087 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1088 ixl_msix_adminq, pf, 0, "aq");
1089 if (err) {
1090 iflib_irq_free(ctx, &vsi->irq);
1091 device_printf(iflib_get_dev(ctx),
1092 "Failed to register Admin Que handler");
1093 return (err);
1094 }
1095
1096 #ifdef PCI_IOV
1097 /* Create soft IRQ for handling VFLRs */
1098 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1099 #endif
1100
1101 /* Now set up the stations */
1102 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1103 rid = vector + 1;
1104
1105 snprintf(buf, sizeof(buf), "rxq%d", i);
1106 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1107 IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1108 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1109 * what's expected in the iflib context? */
1110 if (err) {
1111 device_printf(iflib_get_dev(ctx),
1112 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1113 vsi->num_rx_queues = i + 1;
1114 goto fail;
1115 }
1116 rx_que->msix = vector;
1117 }
1118
1119 bzero(buf, sizeof(buf));
1120
1121 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1122 snprintf(buf, sizeof(buf), "txq%d", i);
1123 iflib_softirq_alloc_generic(ctx,
1124 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1125 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1126
1127 /* TODO: Maybe call a strategy function for this to figure out which
1128 * interrupts to map Tx queues to. I don't know if there's an immediately
1129 * better way than this other than a user-supplied map, though. */
1130 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1131 }
1132
1133 return (0);
1134 fail:
1135 iflib_irq_free(ctx, &vsi->irq);
1136 rx_que = vsi->rx_queues;
1137 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1138 iflib_irq_free(ctx, &rx_que->que_irq);
1139 return (err);
1140 }
1141
1142 /*
1143 * Enable all interrupts
1144 *
1145 * Called in:
1146 * iflib_init_locked, after ixl_if_init()
1147 */
1148 static void
ixl_if_enable_intr(if_ctx_t ctx)1149 ixl_if_enable_intr(if_ctx_t ctx)
1150 {
1151 struct ixl_pf *pf = iflib_get_softc(ctx);
1152 struct ixl_vsi *vsi = &pf->vsi;
1153 struct i40e_hw *hw = vsi->hw;
1154 struct ixl_rx_queue *que = vsi->rx_queues;
1155
1156 ixl_enable_intr0(hw);
1157 /* Enable queue interrupts */
1158 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1159 /* TODO: Queue index parameter is probably wrong */
1160 ixl_enable_queue(hw, que->rxr.me);
1161 }
1162
1163 /*
1164 * Disable queue interrupts
1165 *
1166 * Other interrupt causes need to remain active.
1167 */
1168 static void
ixl_if_disable_intr(if_ctx_t ctx)1169 ixl_if_disable_intr(if_ctx_t ctx)
1170 {
1171 struct ixl_pf *pf = iflib_get_softc(ctx);
1172 struct ixl_vsi *vsi = &pf->vsi;
1173 struct i40e_hw *hw = vsi->hw;
1174 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1175
1176 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1177 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1178 ixl_disable_queue(hw, rx_que->msix - 1);
1179 } else {
1180 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1181 // stops queues from triggering interrupts
1182 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1183 }
1184 }
1185
1186 static int
ixl_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)1187 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1188 {
1189 struct ixl_pf *pf = iflib_get_softc(ctx);
1190 struct ixl_vsi *vsi = &pf->vsi;
1191 struct i40e_hw *hw = vsi->hw;
1192 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
1193
1194 ixl_enable_queue(hw, rx_que->msix - 1);
1195 return (0);
1196 }
1197
1198 static int
ixl_if_tx_queue_intr_enable(if_ctx_t ctx,uint16_t txqid)1199 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1200 {
1201 struct ixl_pf *pf = iflib_get_softc(ctx);
1202 struct ixl_vsi *vsi = &pf->vsi;
1203 struct i40e_hw *hw = vsi->hw;
1204 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1205
1206 ixl_enable_queue(hw, tx_que->msix - 1);
1207 return (0);
1208 }
1209
1210 static int
ixl_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)1211 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1212 {
1213 struct ixl_pf *pf = iflib_get_softc(ctx);
1214 struct ixl_vsi *vsi = &pf->vsi;
1215 if_softc_ctx_t scctx = vsi->shared;
1216 struct ixl_tx_queue *que;
1217 int i, j, error = 0;
1218
1219 MPASS(scctx->isc_ntxqsets > 0);
1220 MPASS(ntxqs == 1);
1221 MPASS(scctx->isc_ntxqsets == ntxqsets);
1222
1223 /* Allocate queue structure memory */
1224 if (!(vsi->tx_queues =
1225 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1226 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1227 return (ENOMEM);
1228 }
1229
1230 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1231 struct tx_ring *txr = &que->txr;
1232
1233 txr->me = i;
1234 que->vsi = vsi;
1235
1236 if (!vsi->enable_head_writeback) {
1237 /* Allocate report status array */
1238 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1239 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1240 error = ENOMEM;
1241 goto fail;
1242 }
1243 /* Init report status array */
1244 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1245 txr->tx_rsq[j] = QIDX_INVALID;
1246 }
1247 /* get the virtual and physical address of the hardware queues */
1248 txr->tail = I40E_QTX_TAIL(txr->me);
1249 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1250 txr->tx_paddr = paddrs[i * ntxqs];
1251 txr->que = que;
1252 }
1253
1254 return (0);
1255 fail:
1256 ixl_if_queues_free(ctx);
1257 return (error);
1258 }
1259
1260 static int
ixl_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)1261 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1262 {
1263 struct ixl_pf *pf = iflib_get_softc(ctx);
1264 struct ixl_vsi *vsi = &pf->vsi;
1265 struct ixl_rx_queue *que;
1266 int i, error = 0;
1267
1268 #ifdef INVARIANTS
1269 if_softc_ctx_t scctx = vsi->shared;
1270 MPASS(scctx->isc_nrxqsets > 0);
1271 MPASS(nrxqs == 1);
1272 MPASS(scctx->isc_nrxqsets == nrxqsets);
1273 #endif
1274
1275 /* Allocate queue structure memory */
1276 if (!(vsi->rx_queues =
1277 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1278 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1279 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1280 error = ENOMEM;
1281 goto fail;
1282 }
1283
1284 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1285 struct rx_ring *rxr = &que->rxr;
1286
1287 rxr->me = i;
1288 que->vsi = vsi;
1289
1290 /* get the virtual and physical address of the hardware queues */
1291 rxr->tail = I40E_QRX_TAIL(rxr->me);
1292 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1293 rxr->rx_paddr = paddrs[i * nrxqs];
1294 rxr->que = que;
1295 }
1296
1297 return (0);
1298 fail:
1299 ixl_if_queues_free(ctx);
1300 return (error);
1301 }
1302
1303 static void
ixl_if_queues_free(if_ctx_t ctx)1304 ixl_if_queues_free(if_ctx_t ctx)
1305 {
1306 struct ixl_pf *pf = iflib_get_softc(ctx);
1307 struct ixl_vsi *vsi = &pf->vsi;
1308
1309 if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1310 struct ixl_tx_queue *que;
1311 int i = 0;
1312
1313 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1314 struct tx_ring *txr = &que->txr;
1315 if (txr->tx_rsq != NULL) {
1316 free(txr->tx_rsq, M_IXL);
1317 txr->tx_rsq = NULL;
1318 }
1319 }
1320 }
1321
1322 if (vsi->tx_queues != NULL) {
1323 free(vsi->tx_queues, M_IXL);
1324 vsi->tx_queues = NULL;
1325 }
1326 if (vsi->rx_queues != NULL) {
1327 free(vsi->rx_queues, M_IXL);
1328 vsi->rx_queues = NULL;
1329 }
1330
1331 if (!IXL_PF_IN_RECOVERY_MODE(pf))
1332 sysctl_ctx_free(&vsi->sysctl_ctx);
1333 }
1334
1335 void
ixl_update_link_status(struct ixl_pf * pf)1336 ixl_update_link_status(struct ixl_pf *pf)
1337 {
1338 struct ixl_vsi *vsi = &pf->vsi;
1339 struct i40e_hw *hw = &pf->hw;
1340 u64 baudrate;
1341
1342 if (pf->link_up) {
1343 if (vsi->link_active == FALSE) {
1344 vsi->link_active = TRUE;
1345 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1346 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1347 ixl_link_up_msg(pf);
1348 #ifdef PCI_IOV
1349 ixl_broadcast_link_state(pf);
1350 #endif
1351 }
1352 } else { /* Link down */
1353 if (vsi->link_active == TRUE) {
1354 vsi->link_active = FALSE;
1355 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1356 #ifdef PCI_IOV
1357 ixl_broadcast_link_state(pf);
1358 #endif
1359 }
1360 }
1361 }
1362
1363 static void
ixl_handle_lan_overflow_event(struct ixl_pf * pf,struct i40e_arq_event_info * e)1364 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1365 {
1366 device_t dev = pf->dev;
1367 u32 rxq_idx, qtx_ctl;
1368
1369 rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1370 I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1371 qtx_ctl = e->desc.params.external.param1;
1372
1373 device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1374 device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1375 }
1376
1377 static int
ixl_process_adminq(struct ixl_pf * pf,u16 * pending)1378 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1379 {
1380 enum i40e_status_code status = I40E_SUCCESS;
1381 struct i40e_arq_event_info event;
1382 struct i40e_hw *hw = &pf->hw;
1383 device_t dev = pf->dev;
1384 u16 opcode;
1385 u32 loop = 0, reg;
1386
1387 event.buf_len = IXL_AQ_BUF_SZ;
1388 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1389 if (!event.msg_buf) {
1390 device_printf(dev, "%s: Unable to allocate memory for Admin"
1391 " Queue event!\n", __func__);
1392 return (ENOMEM);
1393 }
1394
1395 /* clean and process any events */
1396 do {
1397 status = i40e_clean_arq_element(hw, &event, pending);
1398 if (status)
1399 break;
1400 opcode = LE16_TO_CPU(event.desc.opcode);
1401 ixl_dbg(pf, IXL_DBG_AQ,
1402 "Admin Queue event: %#06x\n", opcode);
1403 switch (opcode) {
1404 case i40e_aqc_opc_get_link_status:
1405 ixl_link_event(pf, &event);
1406 break;
1407 case i40e_aqc_opc_send_msg_to_pf:
1408 #ifdef PCI_IOV
1409 ixl_handle_vf_msg(pf, &event);
1410 #endif
1411 break;
1412 /*
1413 * This should only occur on no-drop queues, which
1414 * aren't currently configured.
1415 */
1416 case i40e_aqc_opc_event_lan_overflow:
1417 ixl_handle_lan_overflow_event(pf, &event);
1418 break;
1419 default:
1420 break;
1421 }
1422 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1423
1424 free(event.msg_buf, M_IXL);
1425
1426 /* Re-enable admin queue interrupt cause */
1427 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1428 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1429 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1430
1431 return (status);
1432 }
1433
1434 static void
ixl_if_update_admin_status(if_ctx_t ctx)1435 ixl_if_update_admin_status(if_ctx_t ctx)
1436 {
1437 struct ixl_pf *pf = iflib_get_softc(ctx);
1438 struct i40e_hw *hw = &pf->hw;
1439 u16 pending;
1440
1441 if (IXL_PF_IS_RESETTING(pf))
1442 ixl_handle_empr_reset(pf);
1443
1444 /*
1445 * Admin Queue is shut down while handling reset.
1446 * Don't proceed if it hasn't been re-initialized
1447 * e.g due to an issue with new FW.
1448 */
1449 if (!i40e_check_asq_alive(&pf->hw))
1450 return;
1451
1452 if (ixl_test_state(&pf->state, IXL_STATE_MDD_PENDING))
1453 ixl_handle_mdd_event(pf);
1454
1455 ixl_process_adminq(pf, &pending);
1456 ixl_update_link_status(pf);
1457
1458 /*
1459 * If there are still messages to process, reschedule ourselves.
1460 * Otherwise, re-enable our interrupt and go to sleep.
1461 */
1462 if (pending > 0)
1463 iflib_admin_intr_deferred(ctx);
1464 else
1465 ixl_enable_intr0(hw);
1466 }
1467
1468 static void
ixl_if_multi_set(if_ctx_t ctx)1469 ixl_if_multi_set(if_ctx_t ctx)
1470 {
1471 struct ixl_pf *pf = iflib_get_softc(ctx);
1472 struct ixl_vsi *vsi = &pf->vsi;
1473 struct i40e_hw *hw = vsi->hw;
1474 int mcnt;
1475
1476 IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1477
1478 /* Delete filters for removed multicast addresses */
1479 ixl_del_multi(vsi, false);
1480
1481 mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1482 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1483 i40e_aq_set_vsi_multicast_promiscuous(hw,
1484 vsi->seid, TRUE, NULL);
1485 ixl_del_multi(vsi, true);
1486 return;
1487 }
1488
1489 ixl_add_multi(vsi);
1490 IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1491 }
1492
1493 static int
ixl_if_mtu_set(if_ctx_t ctx,uint32_t mtu)1494 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1495 {
1496 struct ixl_pf *pf = iflib_get_softc(ctx);
1497 struct ixl_vsi *vsi = &pf->vsi;
1498
1499 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1500 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1501 ETHER_VLAN_ENCAP_LEN)
1502 return (EINVAL);
1503
1504 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1505 ETHER_VLAN_ENCAP_LEN;
1506
1507 return (0);
1508 }
1509
1510 static void
ixl_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)1511 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1512 {
1513 struct ixl_pf *pf = iflib_get_softc(ctx);
1514 struct i40e_hw *hw = &pf->hw;
1515
1516 INIT_DEBUGOUT("ixl_media_status: begin");
1517
1518 ifmr->ifm_status = IFM_AVALID;
1519 ifmr->ifm_active = IFM_ETHER;
1520
1521 if (!pf->link_up) {
1522 return;
1523 }
1524
1525 ifmr->ifm_status |= IFM_ACTIVE;
1526 /* Hardware is always full-duplex */
1527 ifmr->ifm_active |= IFM_FDX;
1528
1529 switch (hw->phy.link_info.phy_type) {
1530 /* 100 M */
1531 case I40E_PHY_TYPE_100BASE_TX:
1532 ifmr->ifm_active |= IFM_100_TX;
1533 break;
1534 /* 1 G */
1535 case I40E_PHY_TYPE_1000BASE_T:
1536 ifmr->ifm_active |= IFM_1000_T;
1537 break;
1538 case I40E_PHY_TYPE_1000BASE_SX:
1539 ifmr->ifm_active |= IFM_1000_SX;
1540 break;
1541 case I40E_PHY_TYPE_1000BASE_LX:
1542 ifmr->ifm_active |= IFM_1000_LX;
1543 break;
1544 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1545 ifmr->ifm_active |= IFM_1000_T;
1546 break;
1547 /* 2.5 G */
1548 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1549 ifmr->ifm_active |= IFM_2500_T;
1550 break;
1551 /* 5 G */
1552 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1553 ifmr->ifm_active |= IFM_5000_T;
1554 break;
1555 /* 10 G */
1556 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1557 ifmr->ifm_active |= IFM_10G_TWINAX;
1558 break;
1559 case I40E_PHY_TYPE_10GBASE_SR:
1560 ifmr->ifm_active |= IFM_10G_SR;
1561 break;
1562 case I40E_PHY_TYPE_10GBASE_LR:
1563 ifmr->ifm_active |= IFM_10G_LR;
1564 break;
1565 case I40E_PHY_TYPE_10GBASE_T:
1566 ifmr->ifm_active |= IFM_10G_T;
1567 break;
1568 case I40E_PHY_TYPE_XAUI:
1569 case I40E_PHY_TYPE_XFI:
1570 ifmr->ifm_active |= IFM_10G_TWINAX;
1571 break;
1572 case I40E_PHY_TYPE_10GBASE_AOC:
1573 ifmr->ifm_active |= IFM_10G_AOC;
1574 break;
1575 /* 25 G */
1576 case I40E_PHY_TYPE_25GBASE_KR:
1577 ifmr->ifm_active |= IFM_25G_KR;
1578 break;
1579 case I40E_PHY_TYPE_25GBASE_CR:
1580 ifmr->ifm_active |= IFM_25G_CR;
1581 break;
1582 case I40E_PHY_TYPE_25GBASE_SR:
1583 ifmr->ifm_active |= IFM_25G_SR;
1584 break;
1585 case I40E_PHY_TYPE_25GBASE_LR:
1586 ifmr->ifm_active |= IFM_25G_LR;
1587 break;
1588 case I40E_PHY_TYPE_25GBASE_AOC:
1589 ifmr->ifm_active |= IFM_25G_AOC;
1590 break;
1591 case I40E_PHY_TYPE_25GBASE_ACC:
1592 ifmr->ifm_active |= IFM_25G_ACC;
1593 break;
1594 /* 40 G */
1595 case I40E_PHY_TYPE_40GBASE_CR4:
1596 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1597 ifmr->ifm_active |= IFM_40G_CR4;
1598 break;
1599 case I40E_PHY_TYPE_40GBASE_SR4:
1600 ifmr->ifm_active |= IFM_40G_SR4;
1601 break;
1602 case I40E_PHY_TYPE_40GBASE_LR4:
1603 ifmr->ifm_active |= IFM_40G_LR4;
1604 break;
1605 case I40E_PHY_TYPE_XLAUI:
1606 ifmr->ifm_active |= IFM_OTHER;
1607 break;
1608 case I40E_PHY_TYPE_1000BASE_KX:
1609 ifmr->ifm_active |= IFM_1000_KX;
1610 break;
1611 case I40E_PHY_TYPE_SGMII:
1612 ifmr->ifm_active |= IFM_1000_SGMII;
1613 break;
1614 /* ERJ: What's the difference between these? */
1615 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1616 case I40E_PHY_TYPE_10GBASE_CR1:
1617 ifmr->ifm_active |= IFM_10G_CR1;
1618 break;
1619 case I40E_PHY_TYPE_10GBASE_KX4:
1620 ifmr->ifm_active |= IFM_10G_KX4;
1621 break;
1622 case I40E_PHY_TYPE_10GBASE_KR:
1623 ifmr->ifm_active |= IFM_10G_KR;
1624 break;
1625 case I40E_PHY_TYPE_SFI:
1626 ifmr->ifm_active |= IFM_10G_SFI;
1627 break;
1628 /* Our single 20G media type */
1629 case I40E_PHY_TYPE_20GBASE_KR2:
1630 ifmr->ifm_active |= IFM_20G_KR2;
1631 break;
1632 case I40E_PHY_TYPE_40GBASE_KR4:
1633 ifmr->ifm_active |= IFM_40G_KR4;
1634 break;
1635 case I40E_PHY_TYPE_XLPPI:
1636 case I40E_PHY_TYPE_40GBASE_AOC:
1637 ifmr->ifm_active |= IFM_40G_XLPPI;
1638 break;
1639 /* Unknown to driver */
1640 default:
1641 ifmr->ifm_active |= IFM_UNKNOWN;
1642 break;
1643 }
1644 /* Report flow control status as well */
1645 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1646 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1647 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1648 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1649 }
1650
1651 static int
ixl_if_media_change(if_ctx_t ctx)1652 ixl_if_media_change(if_ctx_t ctx)
1653 {
1654 struct ifmedia *ifm = iflib_get_media(ctx);
1655
1656 INIT_DEBUGOUT("ixl_media_change: begin");
1657
1658 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1659 return (EINVAL);
1660
1661 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1662 return (ENODEV);
1663 }
1664
1665 static int
ixl_if_promisc_set(if_ctx_t ctx,int flags)1666 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1667 {
1668 struct ixl_pf *pf = iflib_get_softc(ctx);
1669 struct ixl_vsi *vsi = &pf->vsi;
1670 if_t ifp = iflib_get_ifp(ctx);
1671 struct i40e_hw *hw = vsi->hw;
1672 int err;
1673 bool uni = FALSE, multi = FALSE;
1674
1675 if (flags & IFF_PROMISC)
1676 uni = multi = TRUE;
1677 else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1678 MAX_MULTICAST_ADDR)
1679 multi = TRUE;
1680
1681 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1682 vsi->seid, uni, NULL, true);
1683 if (err)
1684 return (err);
1685 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1686 vsi->seid, multi, NULL);
1687 return (err);
1688 }
1689
1690 static void
ixl_if_timer(if_ctx_t ctx,uint16_t qid)1691 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1692 {
1693 struct ixl_pf *pf = iflib_get_softc(ctx);
1694
1695 if (qid != 0)
1696 return;
1697
1698 ixl_update_stats_counters(pf);
1699 }
1700
1701 static void
ixl_if_vlan_register(if_ctx_t ctx,u16 vtag)1702 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1703 {
1704 struct ixl_pf *pf = iflib_get_softc(ctx);
1705 struct ixl_vsi *vsi = &pf->vsi;
1706 struct i40e_hw *hw = vsi->hw;
1707 if_t ifp = iflib_get_ifp(ctx);
1708
1709 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1710 return;
1711
1712 /*
1713 * Keep track of registered VLANS to know what
1714 * filters have to be configured when VLAN_HWFILTER
1715 * capability is enabled.
1716 */
1717 ++vsi->num_vlans;
1718 bit_set(vsi->vlans_map, vtag);
1719
1720 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1721 return;
1722
1723 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1724 ixl_add_filter(vsi, hw->mac.addr, vtag);
1725 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1726 /*
1727 * There is not enough HW resources to add filters
1728 * for all registered VLANs. Re-configure filtering
1729 * to allow reception of all expected traffic.
1730 */
1731 device_printf(vsi->dev,
1732 "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1733 ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1734 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1735 }
1736 }
1737
1738 static void
ixl_if_vlan_unregister(if_ctx_t ctx,u16 vtag)1739 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1740 {
1741 struct ixl_pf *pf = iflib_get_softc(ctx);
1742 struct ixl_vsi *vsi = &pf->vsi;
1743 struct i40e_hw *hw = vsi->hw;
1744 if_t ifp = iflib_get_ifp(ctx);
1745
1746 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1747 return;
1748
1749 --vsi->num_vlans;
1750 bit_clear(vsi->vlans_map, vtag);
1751
1752 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1753 return;
1754
1755 /* One filter is used for untagged frames */
1756 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS - 1)
1757 ixl_del_filter(vsi, hw->mac.addr, vtag);
1758 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS - 1) {
1759 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1760 ixl_add_vlan_filters(vsi, hw->mac.addr);
1761 }
1762 }
1763
1764 static uint64_t
ixl_if_get_counter(if_ctx_t ctx,ift_counter cnt)1765 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1766 {
1767 struct ixl_pf *pf = iflib_get_softc(ctx);
1768 struct ixl_vsi *vsi = &pf->vsi;
1769 if_t ifp = iflib_get_ifp(ctx);
1770
1771 switch (cnt) {
1772 case IFCOUNTER_IPACKETS:
1773 return (vsi->ipackets);
1774 case IFCOUNTER_IERRORS:
1775 return (vsi->ierrors);
1776 case IFCOUNTER_OPACKETS:
1777 return (vsi->opackets);
1778 case IFCOUNTER_OERRORS:
1779 return (vsi->oerrors);
1780 case IFCOUNTER_COLLISIONS:
1781 /* Collisions are by standard impossible in 40G/10G Ethernet */
1782 return (0);
1783 case IFCOUNTER_IBYTES:
1784 return (vsi->ibytes);
1785 case IFCOUNTER_OBYTES:
1786 return (vsi->obytes);
1787 case IFCOUNTER_IMCASTS:
1788 return (vsi->imcasts);
1789 case IFCOUNTER_OMCASTS:
1790 return (vsi->omcasts);
1791 case IFCOUNTER_IQDROPS:
1792 return (vsi->iqdrops);
1793 case IFCOUNTER_OQDROPS:
1794 return (vsi->oqdrops);
1795 case IFCOUNTER_NOPROTO:
1796 return (vsi->noproto);
1797 default:
1798 return (if_get_counter_default(ifp, cnt));
1799 }
1800 }
1801
1802 #ifdef PCI_IOV
1803 static void
ixl_if_vflr_handle(if_ctx_t ctx)1804 ixl_if_vflr_handle(if_ctx_t ctx)
1805 {
1806 struct ixl_pf *pf = iflib_get_softc(ctx);
1807
1808 ixl_handle_vflr(pf);
1809 }
1810 #endif
1811
1812 static int
ixl_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1813 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1814 {
1815 struct ixl_pf *pf = iflib_get_softc(ctx);
1816
1817 if (pf->read_i2c_byte == NULL)
1818 return (EINVAL);
1819
1820 for (int i = 0; i < req->len; i++)
1821 if (pf->read_i2c_byte(pf, req->offset + i,
1822 req->dev_addr, &req->data[i]))
1823 return (EIO);
1824 return (0);
1825 }
1826
1827 static int
ixl_if_priv_ioctl(if_ctx_t ctx,u_long command,caddr_t data)1828 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1829 {
1830 struct ixl_pf *pf = iflib_get_softc(ctx);
1831 struct ifdrv *ifd = (struct ifdrv *)data;
1832 int error = 0;
1833
1834 /*
1835 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1836 * performing privilege checks. It is important that this function
1837 * perform the necessary checks for commands which should only be
1838 * executed by privileged threads.
1839 */
1840
1841 switch(command) {
1842 case SIOCGDRVSPEC:
1843 case SIOCSDRVSPEC:
1844 /* NVM update command */
1845 if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1846 error = priv_check(curthread, PRIV_DRIVER);
1847 if (error)
1848 break;
1849 error = ixl_handle_nvmupd_cmd(pf, ifd);
1850 } else {
1851 error = EINVAL;
1852 }
1853 break;
1854 default:
1855 error = EOPNOTSUPP;
1856 }
1857
1858 return (error);
1859 }
1860
1861 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1862 * @ctx: iflib context
1863 * @event: event code to check
1864 *
1865 * Defaults to returning false for every event.
1866 *
1867 * @returns true if iflib needs to reinit the interface, false otherwise
1868 */
1869 static bool
ixl_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1870 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1871 {
1872 switch (event) {
1873 case IFLIB_RESTART_VLAN_CONFIG:
1874 default:
1875 return (false);
1876 }
1877 }
1878
1879 /*
1880 * Sanity check and save off tunable values.
1881 */
1882 static void
ixl_save_pf_tunables(struct ixl_pf * pf)1883 ixl_save_pf_tunables(struct ixl_pf *pf)
1884 {
1885 device_t dev = pf->dev;
1886
1887 /* Save tunable information */
1888 #ifdef IXL_DEBUG_FC
1889 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1890 #endif
1891 #ifdef IXL_DEBUG
1892 pf->recovery_mode = ixl_debug_recovery_mode;
1893 #endif
1894 pf->dbg_mask = ixl_core_debug_mask;
1895 pf->hw.debug_mask = ixl_shared_debug_mask;
1896 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1897 pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1898 #if 0
1899 pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1900 pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1901 #endif
1902
1903 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1904 pf->i2c_access_method = 0;
1905 else
1906 pf->i2c_access_method = ixl_i2c_access_method;
1907
1908 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1909 device_printf(dev, "Invalid tx_itr value of %d set!\n",
1910 ixl_tx_itr);
1911 device_printf(dev, "tx_itr must be between %d and %d, "
1912 "inclusive\n",
1913 0, IXL_MAX_ITR);
1914 device_printf(dev, "Using default value of %d instead\n",
1915 IXL_ITR_4K);
1916 pf->tx_itr = IXL_ITR_4K;
1917 } else
1918 pf->tx_itr = ixl_tx_itr;
1919
1920 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1921 device_printf(dev, "Invalid rx_itr value of %d set!\n",
1922 ixl_rx_itr);
1923 device_printf(dev, "rx_itr must be between %d and %d, "
1924 "inclusive\n",
1925 0, IXL_MAX_ITR);
1926 device_printf(dev, "Using default value of %d instead\n",
1927 IXL_ITR_8K);
1928 pf->rx_itr = IXL_ITR_8K;
1929 } else
1930 pf->rx_itr = ixl_rx_itr;
1931
1932 pf->fc = -1;
1933 if (ixl_flow_control != -1) {
1934 if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1935 device_printf(dev,
1936 "Invalid flow_control value of %d set!\n",
1937 ixl_flow_control);
1938 device_printf(dev,
1939 "flow_control must be between %d and %d, "
1940 "inclusive\n", 0, 3);
1941 device_printf(dev,
1942 "Using default configuration instead\n");
1943 } else
1944 pf->fc = ixl_flow_control;
1945 }
1946 }
1947
1948