xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 46a8a1f08f88c278e60ebb6daa7a551eb641c67b)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 
34 #include "ixl.h"
35 #include "ixl_pf.h"
36 
37 #ifdef IXL_IW
38 #include "ixl_iw.h"
39 #include "ixl_iw_int.h"
40 #endif
41 
42 #ifdef PCI_IOV
43 #include "ixl_pf_iov.h"
44 #endif
45 
46 /*********************************************************************
47  *  Driver version
48  *********************************************************************/
49 #define IXL_DRIVER_VERSION_MAJOR	2
50 #define IXL_DRIVER_VERSION_MINOR	3
51 #define IXL_DRIVER_VERSION_BUILD	3
52 
53 #define IXL_DRIVER_VERSION_STRING			\
54     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
55     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
57 
58 /*********************************************************************
59  *  PCI Device ID Table
60  *
61  *  Used by probe to select devices to load on
62  *
63  *  ( Vendor ID, Device ID, Branding String )
64  *********************************************************************/
65 
66 static const pci_vendor_info_t ixl_vendor_info_array[] =
67 {
68 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
85 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
86 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
87 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
88 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC, "Intel(R) Ethernet Controller I710 for 1GBASE-T"),
89 	/* required last entry */
90 	PVID_END
91 };
92 
93 /*********************************************************************
94  *  Function prototypes
95  *********************************************************************/
96 /*** IFLIB interface ***/
97 static void	*ixl_register(device_t dev);
98 static int	 ixl_if_attach_pre(if_ctx_t ctx);
99 static int	 ixl_if_attach_post(if_ctx_t ctx);
100 static int	 ixl_if_detach(if_ctx_t ctx);
101 static int	 ixl_if_shutdown(if_ctx_t ctx);
102 static int	 ixl_if_suspend(if_ctx_t ctx);
103 static int	 ixl_if_resume(if_ctx_t ctx);
104 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
105 static void	 ixl_if_enable_intr(if_ctx_t ctx);
106 static void	 ixl_if_disable_intr(if_ctx_t ctx);
107 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
108 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
109 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
110 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
111 static void	 ixl_if_queues_free(if_ctx_t ctx);
112 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
113 static void	 ixl_if_multi_set(if_ctx_t ctx);
114 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
115 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
116 static int	 ixl_if_media_change(if_ctx_t ctx);
117 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
118 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
119 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
120 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
121 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
122 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
123 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
125 #ifdef PCI_IOV
126 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
127 #endif
128 
129 /*** Other ***/
130 static void	 ixl_save_pf_tunables(struct ixl_pf *);
131 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
132 static void	 ixl_setup_ssctx(struct ixl_pf *pf);
133 static void	 ixl_admin_timer(void *arg);
134 
135 /*********************************************************************
136  *  FreeBSD Device Interface Entry Points
137  *********************************************************************/
138 
139 static device_method_t ixl_methods[] = {
140 	/* Device interface */
141 	DEVMETHOD(device_register, ixl_register),
142 	DEVMETHOD(device_probe, iflib_device_probe),
143 	DEVMETHOD(device_attach, iflib_device_attach),
144 	DEVMETHOD(device_detach, iflib_device_detach),
145 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
146 #ifdef PCI_IOV
147 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
148 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
149 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
150 #endif
151 	DEVMETHOD_END
152 };
153 
154 static driver_t ixl_driver = {
155 	"ixl", ixl_methods, sizeof(struct ixl_pf),
156 };
157 
158 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0);
159 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
160 MODULE_VERSION(ixl, 3);
161 
162 MODULE_DEPEND(ixl, pci, 1, 1, 1);
163 MODULE_DEPEND(ixl, ether, 1, 1, 1);
164 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
165 
166 static device_method_t ixl_if_methods[] = {
167 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
168 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
169 	DEVMETHOD(ifdi_detach, ixl_if_detach),
170 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
171 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
172 	DEVMETHOD(ifdi_resume, ixl_if_resume),
173 	DEVMETHOD(ifdi_init, ixl_if_init),
174 	DEVMETHOD(ifdi_stop, ixl_if_stop),
175 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
176 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
177 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
178 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
179 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
180 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
181 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
182 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
183 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
184 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
185 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
186 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
187 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
188 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
189 	DEVMETHOD(ifdi_timer, ixl_if_timer),
190 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
191 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
192 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
193 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
194 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
195 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
196 #ifdef PCI_IOV
197 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
198 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
199 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
200 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
201 #endif
202 	// ifdi_led_func
203 	// ifdi_debug
204 	DEVMETHOD_END
205 };
206 
207 static driver_t ixl_if_driver = {
208 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
209 };
210 
211 /*
212 ** TUNEABLE PARAMETERS:
213 */
214 
215 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
216     "ixl driver parameters");
217 
218 #ifdef IXL_DEBUG_FC
219 /*
220  * Leave this on unless you need to send flow control
221  * frames (or other control frames) from software
222  */
223 static int ixl_enable_tx_fc_filter = 1;
224 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
225     &ixl_enable_tx_fc_filter);
226 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
227     &ixl_enable_tx_fc_filter, 0,
228     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
229 #endif
230 
231 #ifdef IXL_DEBUG
232 static int ixl_debug_recovery_mode = 0;
233 TUNABLE_INT("hw.ixl.debug_recovery_mode",
234     &ixl_debug_recovery_mode);
235 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
236     &ixl_debug_recovery_mode, 0,
237     "Act like when FW entered recovery mode (for debugging)");
238 #endif
239 
240 static int ixl_i2c_access_method = 0;
241 TUNABLE_INT("hw.ixl.i2c_access_method",
242     &ixl_i2c_access_method);
243 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
244     &ixl_i2c_access_method, 0,
245     IXL_SYSCTL_HELP_I2C_METHOD);
246 
247 static int ixl_enable_vf_loopback = 1;
248 TUNABLE_INT("hw.ixl.enable_vf_loopback",
249     &ixl_enable_vf_loopback);
250 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
251     &ixl_enable_vf_loopback, 0,
252     IXL_SYSCTL_HELP_VF_LOOPBACK);
253 
254 /*
255  * Different method for processing TX descriptor
256  * completion.
257  */
258 static int ixl_enable_head_writeback = 1;
259 TUNABLE_INT("hw.ixl.enable_head_writeback",
260     &ixl_enable_head_writeback);
261 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
262     &ixl_enable_head_writeback, 0,
263     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
264 
265 static int ixl_core_debug_mask = 0;
266 TUNABLE_INT("hw.ixl.core_debug_mask",
267     &ixl_core_debug_mask);
268 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
269     &ixl_core_debug_mask, 0,
270     "Display debug statements that are printed in non-shared code");
271 
272 static int ixl_shared_debug_mask = 0;
273 TUNABLE_INT("hw.ixl.shared_debug_mask",
274     &ixl_shared_debug_mask);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
276     &ixl_shared_debug_mask, 0,
277     "Display debug statements that are printed in shared code");
278 
279 #if 0
280 /*
281 ** Controls for Interrupt Throttling
282 **	- true/false for dynamic adjustment
283 ** 	- default values for static ITR
284 */
285 static int ixl_dynamic_rx_itr = 0;
286 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
287 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
288     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
289 
290 static int ixl_dynamic_tx_itr = 0;
291 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
292 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
293     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
294 #endif
295 
296 static int ixl_rx_itr = IXL_ITR_8K;
297 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
298 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
299     &ixl_rx_itr, 0, "RX Interrupt Rate");
300 
301 static int ixl_tx_itr = IXL_ITR_4K;
302 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
304     &ixl_tx_itr, 0, "TX Interrupt Rate");
305 
306 static int ixl_flow_control = -1;
307 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
308     &ixl_flow_control, 0, "Initial Flow Control setting");
309 
310 #ifdef IXL_IW
311 int ixl_enable_iwarp = 0;
312 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
313 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
314     &ixl_enable_iwarp, 0, "iWARP enabled");
315 
316 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
317 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
318 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
319     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
320 #endif
321 
322 extern struct if_txrx ixl_txrx_hwb;
323 extern struct if_txrx ixl_txrx_dwb;
324 
325 static struct if_shared_ctx ixl_sctx_init = {
326 	.isc_magic = IFLIB_MAGIC,
327 	.isc_q_align = PAGE_SIZE,
328 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
329 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
330 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
331 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
332 	.isc_rx_maxsize = 16384,
333 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
334 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
335 	.isc_nfl = 1,
336 	.isc_ntxqs = 1,
337 	.isc_nrxqs = 1,
338 
339 	.isc_admin_intrcnt = 1,
340 	.isc_vendor_info = ixl_vendor_info_array,
341 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
342 	.isc_driver = &ixl_if_driver,
343 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
344 
345 	.isc_nrxd_min = {IXL_MIN_RING},
346 	.isc_ntxd_min = {IXL_MIN_RING},
347 	.isc_nrxd_max = {IXL_MAX_RING},
348 	.isc_ntxd_max = {IXL_MAX_RING},
349 	.isc_nrxd_default = {IXL_DEFAULT_RING},
350 	.isc_ntxd_default = {IXL_DEFAULT_RING},
351 };
352 
353 /*** Functions ***/
354 static void *
ixl_register(device_t dev)355 ixl_register(device_t dev)
356 {
357 	return (&ixl_sctx_init);
358 }
359 
360 static int
ixl_allocate_pci_resources(struct ixl_pf * pf)361 ixl_allocate_pci_resources(struct ixl_pf *pf)
362 {
363 	device_t dev = iflib_get_dev(pf->vsi.ctx);
364 	struct i40e_hw *hw = &pf->hw;
365 	int             rid;
366 
367 	/* Map BAR0 */
368 	rid = PCIR_BAR(0);
369 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
370 	    &rid, RF_ACTIVE);
371 
372 	if (!(pf->pci_mem)) {
373 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
374 		return (ENXIO);
375 	}
376 
377 	/* Save off the PCI information */
378 	hw->vendor_id = pci_get_vendor(dev);
379 	hw->device_id = pci_get_device(dev);
380 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
381 	hw->subsystem_vendor_id =
382 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
383 	hw->subsystem_device_id =
384 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
385 
386 	hw->bus.device = pci_get_slot(dev);
387 	hw->bus.func = pci_get_function(dev);
388 
389 	/* Save off register access information */
390 	pf->osdep.mem_bus_space_tag =
391 		rman_get_bustag(pf->pci_mem);
392 	pf->osdep.mem_bus_space_handle =
393 		rman_get_bushandle(pf->pci_mem);
394 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
395 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
396 	pf->osdep.dev = dev;
397 
398 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
399 	pf->hw.back = &pf->osdep;
400 
401  	return (0);
402 }
403 
404 static void
ixl_setup_ssctx(struct ixl_pf * pf)405 ixl_setup_ssctx(struct ixl_pf *pf)
406 {
407 	if_softc_ctx_t scctx = pf->vsi.shared;
408 	struct i40e_hw *hw = &pf->hw;
409 
410 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
411 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
412 		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
413 	} else if (hw->mac.type == I40E_MAC_X722)
414 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
415 	else
416 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
417 
418 	if (pf->vsi.enable_head_writeback) {
419 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
420 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
421 		scctx->isc_txrx = &ixl_txrx_hwb;
422 	} else {
423 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
424 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
425 		scctx->isc_txrx = &ixl_txrx_dwb;
426 	}
427 
428 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
429 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
430 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
431 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
432 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
433 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
434 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
435 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
436 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
437 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
438 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
439 }
440 
441 static void
ixl_admin_timer(void * arg)442 ixl_admin_timer(void *arg)
443 {
444 	struct ixl_pf *pf = (struct ixl_pf *)arg;
445 
446 	if (ixl_test_state(&pf->state, IXL_STATE_LINK_POLLING)) {
447 		struct i40e_hw *hw = &pf->hw;
448 		sbintime_t stime;
449 		enum i40e_status_code status;
450 
451 		hw->phy.get_link_info = TRUE;
452 		status = i40e_get_link_status(hw, &pf->link_up);
453 		if (status == I40E_SUCCESS) {
454 			ixl_clear_state(&pf->state, IXL_STATE_LINK_POLLING);
455 			/* OS link info is updated in the admin task */
456 		} else {
457 			device_printf(pf->dev,
458 			    "%s: i40e_get_link_status status %s, aq error %s\n",
459 			    __func__, i40e_stat_str(hw, status),
460 			    i40e_aq_str(hw, hw->aq.asq_last_status));
461 			stime = getsbinuptime();
462 			if (stime - pf->link_poll_start > IXL_PF_MAX_LINK_POLL) {
463 				device_printf(pf->dev, "Polling link status failed\n");
464 				ixl_clear_state(&pf->state, IXL_STATE_LINK_POLLING);
465 			}
466 		}
467 	}
468 
469 	/* Fire off the admin task */
470 	iflib_admin_intr_deferred(pf->vsi.ctx);
471 
472 	/* Reschedule the admin timer */
473 	callout_schedule(&pf->admin_timer, hz/2);
474 }
475 
476 static int
ixl_attach_pre_recovery_mode(struct ixl_pf * pf)477 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
478 {
479 	struct ixl_vsi *vsi = &pf->vsi;
480 	struct i40e_hw *hw = &pf->hw;
481 	device_t dev = pf->dev;
482 
483 	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
484 
485 	i40e_get_mac_addr(hw, hw->mac.addr);
486 
487 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
488 		ixl_configure_intr0_msix(pf);
489 		ixl_enable_intr0(hw);
490 	}
491 
492 	ixl_setup_ssctx(pf);
493 
494 	return (0);
495 }
496 
497 static int
ixl_if_attach_pre(if_ctx_t ctx)498 ixl_if_attach_pre(if_ctx_t ctx)
499 {
500 	device_t dev;
501 	struct ixl_pf *pf;
502 	struct i40e_hw *hw;
503 	struct ixl_vsi *vsi;
504 	enum i40e_get_fw_lldp_status_resp lldp_status;
505 	struct i40e_filter_control_settings filter;
506 	enum i40e_status_code status;
507 	int error = 0;
508 
509 	dev = iflib_get_dev(ctx);
510 	pf = iflib_get_softc(ctx);
511 
512 	INIT_DBG_DEV(dev, "begin");
513 
514 	vsi = &pf->vsi;
515 	vsi->back = pf;
516 	pf->dev = dev;
517 	hw = &pf->hw;
518 
519 	vsi->dev = dev;
520 	vsi->hw = &pf->hw;
521 	vsi->id = 0;
522 	vsi->num_vlans = 0;
523 	vsi->ctx = ctx;
524 	vsi->media = iflib_get_media(ctx);
525 	vsi->shared = iflib_get_softc_ctx(ctx);
526 
527 	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
528 	    "%s:admin", device_get_nameunit(dev));
529 	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
530 	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
531 
532 	/* Save tunable values */
533 	ixl_save_pf_tunables(pf);
534 
535 	/* Do PCI setup - map BAR0, etc */
536 	if (ixl_allocate_pci_resources(pf)) {
537 		device_printf(dev, "Allocation of PCI resources failed\n");
538 		error = ENXIO;
539 		goto err_pci_res;
540 	}
541 
542 	/* Establish a clean starting point */
543 	i40e_clear_hw(hw);
544 	i40e_set_mac_type(hw);
545 
546 	error = ixl_pf_reset(pf);
547 	if (error)
548 		goto err_out;
549 
550 	/* Initialize the shared code */
551 	status = i40e_init_shared_code(hw);
552 	if (status) {
553 		device_printf(dev, "Unable to initialize shared code, error %s\n",
554 		    i40e_stat_str(hw, status));
555 		error = EIO;
556 		goto err_out;
557 	}
558 
559 	/* Set up the admin queue */
560 	hw->aq.num_arq_entries = IXL_AQ_LEN;
561 	hw->aq.num_asq_entries = IXL_AQ_LEN;
562 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
563 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
564 
565 	status = i40e_init_adminq(hw);
566 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
567 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
568 		    i40e_stat_str(hw, status));
569 		error = EIO;
570 		goto err_out;
571 	}
572 	ixl_print_nvm_version(pf);
573 
574 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
575 		device_printf(dev, "The driver for the device stopped "
576 		    "because the NVM image is newer than expected.\n");
577 		device_printf(dev, "You must install the most recent version of "
578 		    "the network driver.\n");
579 		error = EIO;
580 		goto err_out;
581 	}
582 
583         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
584 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
585 		device_printf(dev, "The driver for the device detected "
586 		    "a newer version of the NVM image than expected.\n");
587 		device_printf(dev, "Please install the most recent version "
588 		    "of the network driver.\n");
589 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
590 		device_printf(dev, "The driver for the device detected "
591 		    "an older version of the NVM image than expected.\n");
592 		device_printf(dev, "Please update the NVM image.\n");
593 	}
594 
595 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
596 		error = ixl_attach_pre_recovery_mode(pf);
597 		if (error)
598 			goto err_out;
599 		return (error);
600 	}
601 
602 	/* Clear PXE mode */
603 	i40e_clear_pxe_mode(hw);
604 
605 	/* Get capabilities from the device */
606 	error = ixl_get_hw_capabilities(pf);
607 	if (error) {
608 		device_printf(dev, "get_hw_capabilities failed: %d\n",
609 		    error);
610 		goto err_get_cap;
611 	}
612 
613 	/* Set up host memory cache */
614 	error = ixl_setup_hmc(pf);
615 	if (error)
616 		goto err_mac_hmc;
617 
618 	/* Disable LLDP from the firmware for certain NVM versions */
619 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
620 	    (pf->hw.aq.fw_maj_ver < 4)) {
621 		i40e_aq_stop_lldp(hw, true, false, NULL);
622 		ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
623 	}
624 
625 	/* Try enabling Energy Efficient Ethernet (EEE) mode */
626 	if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
627 		ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
628 	else
629 		ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
630 
631 	/* Get MAC addresses from hardware */
632 	i40e_get_mac_addr(hw, hw->mac.addr);
633 	error = i40e_validate_mac_addr(hw->mac.addr);
634 	if (error) {
635 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
636 		goto err_mac_hmc;
637 	}
638 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
639 	iflib_set_mac(ctx, hw->mac.addr);
640 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
641 
642 	/* Set up the device filtering */
643 	bzero(&filter, sizeof(filter));
644 	filter.enable_ethtype = TRUE;
645 	filter.enable_macvlan = TRUE;
646 	filter.enable_fdir = FALSE;
647 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
648 	if (i40e_set_filter_control(hw, &filter))
649 		device_printf(dev, "i40e_set_filter_control() failed\n");
650 
651 	/* Query device FW LLDP status */
652 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
653 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
654 			ixl_set_state(&pf->state,
655 			    IXL_STATE_FW_LLDP_DISABLED);
656 		} else {
657 			ixl_clear_state(&pf->state,
658 			    IXL_STATE_FW_LLDP_DISABLED);
659 		}
660 	}
661 
662 	/* Tell FW to apply DCB config on link up */
663 	i40e_aq_set_dcb_parameters(hw, true, NULL);
664 
665 	/* Fill out iflib parameters */
666 	ixl_setup_ssctx(pf);
667 
668 	INIT_DBG_DEV(dev, "end");
669 	return (0);
670 
671 err_mac_hmc:
672 	ixl_shutdown_hmc(pf);
673 err_get_cap:
674 	i40e_shutdown_adminq(hw);
675 err_out:
676 	ixl_free_pci_resources(pf);
677 err_pci_res:
678 	mtx_lock(&pf->admin_mtx);
679 	callout_stop(&pf->admin_timer);
680 	mtx_unlock(&pf->admin_mtx);
681 	mtx_destroy(&pf->admin_mtx);
682 	return (error);
683 }
684 
685 static int
ixl_if_attach_post(if_ctx_t ctx)686 ixl_if_attach_post(if_ctx_t ctx)
687 {
688 	device_t dev;
689 	struct ixl_pf *pf;
690 	struct i40e_hw *hw;
691 	struct ixl_vsi *vsi;
692 	int error = 0;
693 	enum i40e_status_code status;
694 
695 	dev = iflib_get_dev(ctx);
696 	pf = iflib_get_softc(ctx);
697 
698 	INIT_DBG_DEV(dev, "begin");
699 
700 	vsi = &pf->vsi;
701 	vsi->ifp = iflib_get_ifp(ctx);
702 	hw = &pf->hw;
703 
704 	/* Save off determined number of queues for interface */
705 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
706 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
707 
708 	/* Setup OS network interface / ifnet */
709 	if (ixl_setup_interface(dev, pf)) {
710 		device_printf(dev, "interface setup failed!\n");
711 		error = EIO;
712 		goto err;
713 	}
714 
715 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
716 		/* Keep admin queue interrupts active while driver is loaded */
717 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
718 			ixl_configure_intr0_msix(pf);
719 			ixl_enable_intr0(hw);
720 		}
721 
722 		ixl_add_sysctls_recovery_mode(pf);
723 
724 		/* Start the admin timer */
725 		mtx_lock(&pf->admin_mtx);
726 		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
727 		mtx_unlock(&pf->admin_mtx);
728 		return (0);
729 	}
730 
731 	error = ixl_switch_config(pf);
732 	if (error) {
733 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
734 		     error);
735 		goto err;
736 	}
737 
738 	/* Add protocol filters to list */
739 	ixl_init_filters(vsi);
740 
741 	/* Init queue allocation manager */
742 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
743 	if (error) {
744 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
745 		    error);
746 		goto err;
747 	}
748 	/* reserve a contiguous allocation for the PF's VSI */
749 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
750 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
751 	if (error) {
752 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
753 		    error);
754 		goto err;
755 	}
756 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
757 	    pf->qtag.num_allocated, pf->qtag.num_active);
758 
759 	/* Determine link state */
760 	error = ixl_attach_get_link_status(pf);
761 	if (error == EINVAL)
762 		goto err;
763 
764 	/* Limit PHY interrupts to link, autoneg, and modules failure */
765 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
766 	    NULL);
767         if (status) {
768 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
769 		    " aq_err %s\n", i40e_stat_str(hw, status),
770 		    i40e_aq_str(hw, hw->aq.asq_last_status));
771 		goto err;
772 	}
773 
774 	/* Get the bus configuration and set the shared code */
775 	ixl_get_bus_info(pf);
776 
777 	/* Keep admin queue interrupts active while driver is loaded */
778 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
779  		ixl_configure_intr0_msix(pf);
780  		ixl_enable_intr0(hw);
781 	}
782 
783 	/* Set initial advertised speed sysctl value */
784 	ixl_set_initial_advertised_speeds(pf);
785 
786 	/* Initialize statistics & add sysctls */
787 	ixl_add_device_sysctls(pf);
788 	ixl_pf_reset_stats(pf);
789 	ixl_update_stats_counters(pf);
790 	ixl_add_hw_stats(pf);
791 
792 	/*
793 	 * Driver may have been reloaded. Ensure that the link state
794 	 * is consistent with current settings.
795 	 */
796 	ixl_set_link(pf, ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN));
797 
798 	hw->phy.get_link_info = true;
799 	status = i40e_get_link_status(hw, &pf->link_up);
800 	if (status != I40E_SUCCESS) {
801 		device_printf(dev,
802 		    "%s get link status, status: %s aq_err=%s\n",
803 		    __func__, i40e_stat_str(hw, status),
804 		    i40e_aq_str(hw, hw->aq.asq_last_status));
805 		/*
806 		 * Most probably FW has not finished configuring PHY.
807 		 * Retry periodically in a timer callback.
808 		 */
809 		ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING);
810 		pf->link_poll_start = getsbinuptime();
811 	} else
812 		ixl_update_link_status(pf);
813 
814 #ifdef PCI_IOV
815 	ixl_initialize_sriov(pf);
816 #endif
817 
818 #ifdef IXL_IW
819 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
820 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
821 		if (pf->iw_enabled) {
822 			error = ixl_iw_pf_attach(pf);
823 			if (error) {
824 				device_printf(dev,
825 				    "interfacing to iWARP driver failed: %d\n",
826 				    error);
827 				goto err;
828 			} else
829 				device_printf(dev, "iWARP ready\n");
830 		} else
831 			device_printf(dev, "iWARP disabled on this device "
832 			    "(no MSI-X vectors)\n");
833 	} else {
834 		pf->iw_enabled = false;
835 		device_printf(dev, "The device is not iWARP enabled\n");
836 	}
837 #endif
838 	/* Start the admin timer */
839 	mtx_lock(&pf->admin_mtx);
840 	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
841 	mtx_unlock(&pf->admin_mtx);
842 
843 	INIT_DBG_DEV(dev, "end");
844 	return (0);
845 
846 err:
847 	INIT_DEBUGOUT("end: error %d", error);
848 	/* ixl_if_detach() is called on error from this */
849 	return (error);
850 }
851 
852 /**
853  * XXX: iflib always ignores the return value of detach()
854  * -> This means that this isn't allowed to fail
855  */
856 static int
ixl_if_detach(if_ctx_t ctx)857 ixl_if_detach(if_ctx_t ctx)
858 {
859 	struct ixl_pf *pf = iflib_get_softc(ctx);
860 	struct ixl_vsi *vsi = &pf->vsi;
861 	struct i40e_hw *hw = &pf->hw;
862 	device_t dev = pf->dev;
863 	enum i40e_status_code	status;
864 #ifdef IXL_IW
865 	int			error;
866 #endif
867 
868 	INIT_DBG_DEV(dev, "begin");
869 
870 	/* Stop the admin timer */
871 	mtx_lock(&pf->admin_mtx);
872 	callout_stop(&pf->admin_timer);
873 	mtx_unlock(&pf->admin_mtx);
874 	mtx_destroy(&pf->admin_mtx);
875 
876 #ifdef IXL_IW
877 	if (ixl_enable_iwarp && pf->iw_enabled) {
878 		error = ixl_iw_pf_detach(pf);
879 		if (error == EBUSY) {
880 			device_printf(dev, "iwarp in use; stop it first.\n");
881 			//return (error);
882 		}
883 	}
884 #endif
885 	/* Remove all previously allocated media types */
886 	ifmedia_removeall(vsi->media);
887 
888 	/* Shutdown LAN HMC */
889 	ixl_shutdown_hmc(pf);
890 
891 	/* Shutdown admin queue */
892 	ixl_disable_intr0(hw);
893 	status = i40e_shutdown_adminq(hw);
894 	if (status)
895 		device_printf(dev,
896 		    "i40e_shutdown_adminq() failed with status %s\n",
897 		    i40e_stat_str(hw, status));
898 
899 	ixl_pf_qmgr_destroy(&pf->qmgr);
900 	ixl_free_pci_resources(pf);
901 	ixl_free_filters(&vsi->ftl);
902 	INIT_DBG_DEV(dev, "end");
903 	return (0);
904 }
905 
906 static int
ixl_if_shutdown(if_ctx_t ctx)907 ixl_if_shutdown(if_ctx_t ctx)
908 {
909 	int error = 0;
910 
911 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
912 
913 	/* TODO: Call ixl_if_stop()? */
914 
915 	/* TODO: Then setup low power mode */
916 
917 	return (error);
918 }
919 
920 static int
ixl_if_suspend(if_ctx_t ctx)921 ixl_if_suspend(if_ctx_t ctx)
922 {
923 	int error = 0;
924 
925 	INIT_DEBUGOUT("ixl_if_suspend: begin");
926 
927 	/* TODO: Call ixl_if_stop()? */
928 
929 	/* TODO: Then setup low power mode */
930 
931 	return (error);
932 }
933 
934 static int
ixl_if_resume(if_ctx_t ctx)935 ixl_if_resume(if_ctx_t ctx)
936 {
937 	if_t ifp = iflib_get_ifp(ctx);
938 
939 	INIT_DEBUGOUT("ixl_if_resume: begin");
940 
941 	/* Read & clear wake-up registers */
942 
943 	/* Required after D3->D0 transition */
944 	if (if_getflags(ifp) & IFF_UP)
945 		ixl_if_init(ctx);
946 
947 	return (0);
948 }
949 
950 void
ixl_if_init(if_ctx_t ctx)951 ixl_if_init(if_ctx_t ctx)
952 {
953 	struct ixl_pf *pf = iflib_get_softc(ctx);
954 	struct ixl_vsi *vsi = &pf->vsi;
955 	struct i40e_hw	*hw = &pf->hw;
956 	if_t ifp = iflib_get_ifp(ctx);
957 	device_t 	dev = iflib_get_dev(ctx);
958 	u8		tmpaddr[ETHER_ADDR_LEN];
959 	int		ret;
960 
961 	if (IXL_PF_IN_RECOVERY_MODE(pf))
962 		return;
963 	/*
964 	 * If the aq is dead here, it probably means something outside of the driver
965 	 * did something to the adapter, like a PF reset.
966 	 * So, rebuild the driver's state here if that occurs.
967 	 */
968 	if (!i40e_check_asq_alive(&pf->hw)) {
969 		device_printf(dev, "Admin Queue is down; resetting...\n");
970 		ixl_teardown_hw_structs(pf);
971 		ixl_rebuild_hw_structs_after_reset(pf, false);
972 	}
973 
974 	/* Get the latest mac address... User might use a LAA */
975 	bcopy(if_getlladdr(vsi->ifp), tmpaddr, ETH_ALEN);
976 	if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
977 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
978 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
979 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
980 		ret = i40e_aq_mac_address_write(hw,
981 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
982 		    hw->mac.addr, NULL);
983 		if (ret) {
984 			device_printf(dev, "LLA address change failed!!\n");
985 			return;
986 		}
987 		/*
988 		 * New filters are configured by ixl_reconfigure_filters
989 		 * at the end of ixl_init_locked.
990 		 */
991 	}
992 
993 	iflib_set_mac(ctx, hw->mac.addr);
994 
995 	/* Prepare the VSI: rings, hmc contexts, etc... */
996 	if (ixl_initialize_vsi(vsi)) {
997 		device_printf(dev, "initialize vsi failed!!\n");
998 		return;
999 	}
1000 
1001 	ixl_set_link(pf, true);
1002 
1003 	/* Reconfigure multicast filters in HW */
1004 	ixl_if_multi_set(ctx);
1005 
1006 	/* Set up RSS */
1007 	ixl_config_rss(pf);
1008 
1009 	/* Set up MSI-X routing and the ITR settings */
1010 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1011 		ixl_configure_queue_intr_msix(pf);
1012 		ixl_configure_itr(pf);
1013 	} else
1014 		ixl_configure_legacy(pf);
1015 
1016 	if (vsi->enable_head_writeback)
1017 		ixl_init_tx_cidx(vsi);
1018 	else
1019 		ixl_init_tx_rsqs(vsi);
1020 
1021 	ixl_enable_rings(vsi);
1022 
1023 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1024 
1025 	/* Re-add configure filters to HW */
1026 	ixl_reconfigure_filters(vsi);
1027 
1028 	/* Configure promiscuous mode */
1029 	ixl_if_promisc_set(ctx, if_getflags(ifp));
1030 
1031 #ifdef IXL_IW
1032 	if (ixl_enable_iwarp && pf->iw_enabled) {
1033 		ret = ixl_iw_pf_init(pf);
1034 		if (ret)
1035 			device_printf(dev,
1036 			    "initialize iwarp failed, code %d\n", ret);
1037 	}
1038 #endif
1039 }
1040 
1041 void
ixl_if_stop(if_ctx_t ctx)1042 ixl_if_stop(if_ctx_t ctx)
1043 {
1044 	struct ixl_pf *pf = iflib_get_softc(ctx);
1045 	if_t ifp = iflib_get_ifp(ctx);
1046 	struct ixl_vsi *vsi = &pf->vsi;
1047 
1048 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
1049 
1050 	if (IXL_PF_IN_RECOVERY_MODE(pf))
1051 		return;
1052 
1053 	// TODO: This may need to be reworked
1054 #ifdef IXL_IW
1055 	/* Stop iWARP device */
1056 	if (ixl_enable_iwarp && pf->iw_enabled)
1057 		ixl_iw_pf_stop(pf);
1058 #endif
1059 
1060 	ixl_disable_rings_intr(vsi);
1061 	ixl_disable_rings(pf, vsi, &pf->qtag);
1062 
1063 	/*
1064 	 * Don't set link state if only reconfiguring
1065 	 * e.g. on MTU change.
1066 	 */
1067 	if ((if_getflags(ifp) & IFF_UP) == 0 &&
1068 	    !ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN))
1069 		ixl_set_link(pf, false);
1070 }
1071 
1072 static int
ixl_if_msix_intr_assign(if_ctx_t ctx,int msix)1073 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1074 {
1075 	struct ixl_pf *pf = iflib_get_softc(ctx);
1076 	struct ixl_vsi *vsi = &pf->vsi;
1077 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1078 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1079 	int err, i, rid, vector = 0;
1080 	char buf[16];
1081 
1082 	MPASS(vsi->shared->isc_nrxqsets > 0);
1083 	MPASS(vsi->shared->isc_ntxqsets > 0);
1084 
1085 	/* Admin Que must use vector 0*/
1086 	rid = vector + 1;
1087 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1088 	    ixl_msix_adminq, pf, 0, "aq");
1089 	if (err) {
1090 		iflib_irq_free(ctx, &vsi->irq);
1091 		device_printf(iflib_get_dev(ctx),
1092 		    "Failed to register Admin Que handler");
1093 		return (err);
1094 	}
1095 
1096 #ifdef PCI_IOV
1097 	/* Create soft IRQ for handling VFLRs */
1098 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1099 #endif
1100 
1101 	/* Now set up the stations */
1102 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1103 		rid = vector + 1;
1104 
1105 		snprintf(buf, sizeof(buf), "rxq%d", i);
1106 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1107 		    IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1108 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1109 		 * what's expected in the iflib context? */
1110 		if (err) {
1111 			device_printf(iflib_get_dev(ctx),
1112 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1113 			vsi->num_rx_queues = i + 1;
1114 			goto fail;
1115 		}
1116 		rx_que->msix = vector;
1117 	}
1118 
1119 	bzero(buf, sizeof(buf));
1120 
1121 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1122 		snprintf(buf, sizeof(buf), "txq%d", i);
1123 		iflib_softirq_alloc_generic(ctx,
1124 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1125 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1126 
1127 		/* TODO: Maybe call a strategy function for this to figure out which
1128 		* interrupts to map Tx queues to. I don't know if there's an immediately
1129 		* better way than this other than a user-supplied map, though. */
1130 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1131 	}
1132 
1133 	return (0);
1134 fail:
1135 	iflib_irq_free(ctx, &vsi->irq);
1136 	rx_que = vsi->rx_queues;
1137 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1138 		iflib_irq_free(ctx, &rx_que->que_irq);
1139 	return (err);
1140 }
1141 
1142 /*
1143  * Enable all interrupts
1144  *
1145  * Called in:
1146  * iflib_init_locked, after ixl_if_init()
1147  */
1148 static void
ixl_if_enable_intr(if_ctx_t ctx)1149 ixl_if_enable_intr(if_ctx_t ctx)
1150 {
1151 	struct ixl_pf *pf = iflib_get_softc(ctx);
1152 	struct ixl_vsi *vsi = &pf->vsi;
1153 	struct i40e_hw		*hw = vsi->hw;
1154 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1155 
1156 	ixl_enable_intr0(hw);
1157 	/* Enable queue interrupts */
1158 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1159 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1160 			ixl_enable_queue(hw, rx_que->rxr.me);
1161 	} else {
1162 		/*
1163 		 * Set PFINT_LNKLST0 FIRSTQ_INDX to 0x0 to enable
1164 		 * triggering interrupts by queues.
1165 		 */
1166 		wr32(hw, I40E_PFINT_LNKLST0, 0x0);
1167 	}
1168 }
1169 
1170 /*
1171  * Disable queue interrupts
1172  *
1173  * Other interrupt causes need to remain active.
1174  */
1175 static void
ixl_if_disable_intr(if_ctx_t ctx)1176 ixl_if_disable_intr(if_ctx_t ctx)
1177 {
1178 	struct ixl_pf *pf = iflib_get_softc(ctx);
1179 	struct ixl_vsi *vsi = &pf->vsi;
1180 	struct i40e_hw		*hw = vsi->hw;
1181 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1182 
1183 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1184 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1185 			ixl_disable_queue(hw, rx_que->rxr.me);
1186 	} else {
1187 		/*
1188 		 * Set PFINT_LNKLST0 FIRSTQ_INDX to End of List (0x7FF)
1189 		 * to stop queues from triggering interrupts.
1190 		 */
1191 		wr32(hw, I40E_PFINT_LNKLST0, IXL_QUEUE_EOL);
1192 	}
1193 }
1194 
1195 static int
ixl_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)1196 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1197 {
1198 	struct ixl_pf *pf = iflib_get_softc(ctx);
1199 	struct ixl_vsi *vsi = &pf->vsi;
1200 	struct i40e_hw		*hw = vsi->hw;
1201 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1202 
1203 	ixl_enable_queue(hw, rx_que->msix - 1);
1204 	return (0);
1205 }
1206 
1207 static int
ixl_if_tx_queue_intr_enable(if_ctx_t ctx,uint16_t txqid)1208 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1209 {
1210 	struct ixl_pf *pf = iflib_get_softc(ctx);
1211 	struct ixl_vsi *vsi = &pf->vsi;
1212 	struct i40e_hw *hw = vsi->hw;
1213 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1214 
1215 	ixl_enable_queue(hw, tx_que->msix - 1);
1216 	return (0);
1217 }
1218 
1219 static int
ixl_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)1220 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1221 {
1222 	struct ixl_pf *pf = iflib_get_softc(ctx);
1223 	struct ixl_vsi *vsi = &pf->vsi;
1224 	if_softc_ctx_t scctx = vsi->shared;
1225 	struct ixl_tx_queue *que;
1226 	int i, j, error = 0;
1227 
1228 	MPASS(scctx->isc_ntxqsets > 0);
1229 	MPASS(ntxqs == 1);
1230 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1231 
1232 	/* Allocate queue structure memory */
1233 	if (!(vsi->tx_queues =
1234 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1235 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1236 		return (ENOMEM);
1237 	}
1238 
1239 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1240 		struct tx_ring *txr = &que->txr;
1241 
1242 		txr->me = i;
1243 		que->vsi = vsi;
1244 
1245 		if (!vsi->enable_head_writeback) {
1246 			/* Allocate report status array */
1247 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1248 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1249 				error = ENOMEM;
1250 				goto fail;
1251 			}
1252 			/* Init report status array */
1253 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1254 				txr->tx_rsq[j] = QIDX_INVALID;
1255 		}
1256 		/* get the virtual and physical address of the hardware queues */
1257 		txr->tail = I40E_QTX_TAIL(txr->me);
1258 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1259 		txr->tx_paddr = paddrs[i * ntxqs];
1260 		txr->que = que;
1261 	}
1262 
1263 	return (0);
1264 fail:
1265 	ixl_if_queues_free(ctx);
1266 	return (error);
1267 }
1268 
1269 static int
ixl_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)1270 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1271 {
1272 	struct ixl_pf *pf = iflib_get_softc(ctx);
1273 	struct ixl_vsi *vsi = &pf->vsi;
1274 	struct ixl_rx_queue *que;
1275 	int i, error = 0;
1276 
1277 #ifdef INVARIANTS
1278 	if_softc_ctx_t scctx = vsi->shared;
1279 	MPASS(scctx->isc_nrxqsets > 0);
1280 	MPASS(nrxqs == 1);
1281 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1282 #endif
1283 
1284 	/* Allocate queue structure memory */
1285 	if (!(vsi->rx_queues =
1286 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1287 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1288 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1289 		error = ENOMEM;
1290 		goto fail;
1291 	}
1292 
1293 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1294 		struct rx_ring *rxr = &que->rxr;
1295 
1296 		rxr->me = i;
1297 		que->vsi = vsi;
1298 
1299 		/* get the virtual and physical address of the hardware queues */
1300 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1301 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1302 		rxr->rx_paddr = paddrs[i * nrxqs];
1303 		rxr->que = que;
1304 	}
1305 
1306 	return (0);
1307 fail:
1308 	ixl_if_queues_free(ctx);
1309 	return (error);
1310 }
1311 
1312 static void
ixl_if_queues_free(if_ctx_t ctx)1313 ixl_if_queues_free(if_ctx_t ctx)
1314 {
1315 	struct ixl_pf *pf = iflib_get_softc(ctx);
1316 	struct ixl_vsi *vsi = &pf->vsi;
1317 
1318 	if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1319 		struct ixl_tx_queue *que;
1320 		int i = 0;
1321 
1322 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1323 			struct tx_ring *txr = &que->txr;
1324 			if (txr->tx_rsq != NULL) {
1325 				free(txr->tx_rsq, M_IXL);
1326 				txr->tx_rsq = NULL;
1327 			}
1328 		}
1329 	}
1330 
1331 	if (vsi->tx_queues != NULL) {
1332 		free(vsi->tx_queues, M_IXL);
1333 		vsi->tx_queues = NULL;
1334 	}
1335 	if (vsi->rx_queues != NULL) {
1336 		free(vsi->rx_queues, M_IXL);
1337 		vsi->rx_queues = NULL;
1338 	}
1339 
1340 	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1341 		sysctl_ctx_free(&vsi->sysctl_ctx);
1342 }
1343 
1344 void
ixl_update_link_status(struct ixl_pf * pf)1345 ixl_update_link_status(struct ixl_pf *pf)
1346 {
1347 	struct ixl_vsi *vsi = &pf->vsi;
1348 	struct i40e_hw *hw = &pf->hw;
1349 	u64 baudrate;
1350 
1351 	if (pf->link_up) {
1352 		if (vsi->link_active == FALSE) {
1353 			vsi->link_active = TRUE;
1354 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1355 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1356 			ixl_link_up_msg(pf);
1357 #ifdef PCI_IOV
1358 			ixl_broadcast_link_state(pf);
1359 #endif
1360 		}
1361 	} else { /* Link down */
1362 		if (vsi->link_active == TRUE) {
1363 			vsi->link_active = FALSE;
1364 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1365 #ifdef PCI_IOV
1366 			ixl_broadcast_link_state(pf);
1367 #endif
1368 		}
1369 	}
1370 }
1371 
1372 static void
ixl_handle_lan_overflow_event(struct ixl_pf * pf,struct i40e_arq_event_info * e)1373 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1374 {
1375 	device_t dev = pf->dev;
1376 	u32 rxq_idx, qtx_ctl;
1377 
1378 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1379 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1380 	qtx_ctl = e->desc.params.external.param1;
1381 
1382 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1383 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1384 }
1385 
1386 static int
ixl_process_adminq(struct ixl_pf * pf,u16 * pending)1387 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1388 {
1389 	enum i40e_status_code status = I40E_SUCCESS;
1390 	struct i40e_arq_event_info event;
1391 	struct i40e_hw *hw = &pf->hw;
1392 	device_t dev = pf->dev;
1393 	u16 opcode;
1394 	u32 loop = 0, reg;
1395 
1396 	event.buf_len = IXL_AQ_BUF_SZ;
1397 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1398 	if (!event.msg_buf) {
1399 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1400 		    " Queue event!\n", __func__);
1401 		return (ENOMEM);
1402 	}
1403 
1404 	/* clean and process any events */
1405 	do {
1406 		status = i40e_clean_arq_element(hw, &event, pending);
1407 		if (status)
1408 			break;
1409 		opcode = LE16_TO_CPU(event.desc.opcode);
1410 		ixl_dbg(pf, IXL_DBG_AQ,
1411 		    "Admin Queue event: %#06x\n", opcode);
1412 		switch (opcode) {
1413 		case i40e_aqc_opc_get_link_status:
1414 			ixl_link_event(pf, &event);
1415 			break;
1416 		case i40e_aqc_opc_send_msg_to_pf:
1417 #ifdef PCI_IOV
1418 			ixl_handle_vf_msg(pf, &event);
1419 #endif
1420 			break;
1421 		/*
1422 		 * This should only occur on no-drop queues, which
1423 		 * aren't currently configured.
1424 		 */
1425 		case i40e_aqc_opc_event_lan_overflow:
1426 			ixl_handle_lan_overflow_event(pf, &event);
1427 			break;
1428 		default:
1429 			break;
1430 		}
1431 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1432 
1433 	free(event.msg_buf, M_IXL);
1434 
1435 	/* Re-enable admin queue interrupt cause */
1436 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1437 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1438 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1439 
1440 	return (status);
1441 }
1442 
1443 static void
ixl_if_update_admin_status(if_ctx_t ctx)1444 ixl_if_update_admin_status(if_ctx_t ctx)
1445 {
1446 	struct ixl_pf	*pf = iflib_get_softc(ctx);
1447 	struct i40e_hw	*hw = &pf->hw;
1448 	u16		pending;
1449 
1450 	if (IXL_PF_IS_RESETTING(pf))
1451 		ixl_handle_empr_reset(pf);
1452 
1453 	/*
1454 	 * Admin Queue is shut down while handling reset.
1455 	 * Don't proceed if it hasn't been re-initialized
1456 	 * e.g due to an issue with new FW.
1457 	 */
1458 	if (!i40e_check_asq_alive(&pf->hw))
1459 		return;
1460 
1461 	if (ixl_test_state(&pf->state, IXL_STATE_MDD_PENDING))
1462 		ixl_handle_mdd_event(pf);
1463 
1464 	ixl_process_adminq(pf, &pending);
1465 	ixl_update_link_status(pf);
1466 
1467 	/*
1468 	 * If there are still messages to process, reschedule ourselves.
1469 	 * Otherwise, re-enable our interrupt and go to sleep.
1470 	 */
1471 	if (pending > 0)
1472 		iflib_admin_intr_deferred(ctx);
1473 	else
1474 		ixl_enable_intr0(hw);
1475 }
1476 
1477 static void
ixl_if_multi_set(if_ctx_t ctx)1478 ixl_if_multi_set(if_ctx_t ctx)
1479 {
1480 	struct ixl_pf *pf = iflib_get_softc(ctx);
1481 	struct ixl_vsi *vsi = &pf->vsi;
1482 	struct i40e_hw *hw = vsi->hw;
1483 	enum i40e_status_code status;
1484 	int mcnt;
1485 	if_t ifp = iflib_get_ifp(ctx);
1486 
1487 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1488 
1489 	/* Delete filters for removed multicast addresses */
1490 	ixl_del_multi(vsi, false);
1491 
1492 	mcnt = min(if_llmaddr_count(ifp), MAX_MULTICAST_ADDR);
1493 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1494 		/* Check if promisc mode is already enabled, if yes return */
1495 		if (vsi->flags & IXL_FLAGS_MC_PROMISC)
1496 			return;
1497 
1498 		status = i40e_aq_set_vsi_multicast_promiscuous(hw,
1499 		    vsi->seid, TRUE, NULL);
1500 		if (status != I40E_SUCCESS)
1501 			if_printf(ifp, "Failed to enable multicast promiscuous "
1502 			    "mode, status: %s\n", i40e_stat_str(hw, status));
1503 		else {
1504 			if_printf(ifp, "Enabled multicast promiscuous mode\n");
1505 
1506 			/* Set the flag to track promiscuous mode */
1507 			vsi->flags |= IXL_FLAGS_MC_PROMISC;
1508 		}
1509 		/* Delete all existing MC filters */
1510 		ixl_del_multi(vsi, true);
1511 		return;
1512 	}
1513 
1514 	ixl_add_multi(vsi);
1515 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1516 }
1517 
1518 static int
ixl_if_mtu_set(if_ctx_t ctx,uint32_t mtu)1519 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1520 {
1521 	struct ixl_pf *pf = iflib_get_softc(ctx);
1522 	struct ixl_vsi *vsi = &pf->vsi;
1523 
1524 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1525 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1526 		ETHER_VLAN_ENCAP_LEN)
1527 		return (EINVAL);
1528 
1529 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1530 		ETHER_VLAN_ENCAP_LEN;
1531 
1532 	return (0);
1533 }
1534 
1535 static void
ixl_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)1536 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1537 {
1538 	struct ixl_pf *pf = iflib_get_softc(ctx);
1539 	struct i40e_hw  *hw = &pf->hw;
1540 
1541 	INIT_DEBUGOUT("ixl_media_status: begin");
1542 
1543 	ifmr->ifm_status = IFM_AVALID;
1544 	ifmr->ifm_active = IFM_ETHER;
1545 
1546 	if (!pf->link_up) {
1547 		return;
1548 	}
1549 
1550 	ifmr->ifm_status |= IFM_ACTIVE;
1551 	/* Hardware is always full-duplex */
1552 	ifmr->ifm_active |= IFM_FDX;
1553 
1554 	switch (hw->phy.link_info.phy_type) {
1555 		/* 100 M */
1556 		case I40E_PHY_TYPE_100BASE_TX:
1557 			ifmr->ifm_active |= IFM_100_TX;
1558 			break;
1559 		/* 1 G */
1560 		case I40E_PHY_TYPE_1000BASE_T:
1561 			ifmr->ifm_active |= IFM_1000_T;
1562 			break;
1563 		case I40E_PHY_TYPE_1000BASE_SX:
1564 			ifmr->ifm_active |= IFM_1000_SX;
1565 			break;
1566 		case I40E_PHY_TYPE_1000BASE_LX:
1567 			ifmr->ifm_active |= IFM_1000_LX;
1568 			break;
1569 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1570 			ifmr->ifm_active |= IFM_1000_T;
1571 			break;
1572 		/* 2.5 G */
1573 		case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1574 			ifmr->ifm_active |= IFM_2500_T;
1575 			break;
1576 		/* 5 G */
1577 		case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1578 			ifmr->ifm_active |= IFM_5000_T;
1579 			break;
1580 		/* 10 G */
1581 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1582 			ifmr->ifm_active |= IFM_10G_TWINAX;
1583 			break;
1584 		case I40E_PHY_TYPE_10GBASE_SR:
1585 			ifmr->ifm_active |= IFM_10G_SR;
1586 			break;
1587 		case I40E_PHY_TYPE_10GBASE_LR:
1588 			ifmr->ifm_active |= IFM_10G_LR;
1589 			break;
1590 		case I40E_PHY_TYPE_10GBASE_T:
1591 			ifmr->ifm_active |= IFM_10G_T;
1592 			break;
1593 		case I40E_PHY_TYPE_XAUI:
1594 		case I40E_PHY_TYPE_XFI:
1595 			ifmr->ifm_active |= IFM_10G_TWINAX;
1596 			break;
1597 		case I40E_PHY_TYPE_10GBASE_AOC:
1598 			ifmr->ifm_active |= IFM_10G_AOC;
1599 			break;
1600 		/* 25 G */
1601 		case I40E_PHY_TYPE_25GBASE_KR:
1602 			ifmr->ifm_active |= IFM_25G_KR;
1603 			break;
1604 		case I40E_PHY_TYPE_25GBASE_CR:
1605 			ifmr->ifm_active |= IFM_25G_CR;
1606 			break;
1607 		case I40E_PHY_TYPE_25GBASE_SR:
1608 			ifmr->ifm_active |= IFM_25G_SR;
1609 			break;
1610 		case I40E_PHY_TYPE_25GBASE_LR:
1611 			ifmr->ifm_active |= IFM_25G_LR;
1612 			break;
1613 		case I40E_PHY_TYPE_25GBASE_AOC:
1614 			ifmr->ifm_active |= IFM_25G_AOC;
1615 			break;
1616 		case I40E_PHY_TYPE_25GBASE_ACC:
1617 			ifmr->ifm_active |= IFM_25G_ACC;
1618 			break;
1619 		/* 40 G */
1620 		case I40E_PHY_TYPE_40GBASE_CR4:
1621 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1622 			ifmr->ifm_active |= IFM_40G_CR4;
1623 			break;
1624 		case I40E_PHY_TYPE_40GBASE_SR4:
1625 			ifmr->ifm_active |= IFM_40G_SR4;
1626 			break;
1627 		case I40E_PHY_TYPE_40GBASE_LR4:
1628 			ifmr->ifm_active |= IFM_40G_LR4;
1629 			break;
1630 		case I40E_PHY_TYPE_XLAUI:
1631 			ifmr->ifm_active |= IFM_OTHER;
1632 			break;
1633 		case I40E_PHY_TYPE_1000BASE_KX:
1634 			ifmr->ifm_active |= IFM_1000_KX;
1635 			break;
1636 		case I40E_PHY_TYPE_SGMII:
1637 			ifmr->ifm_active |= IFM_1000_SGMII;
1638 			break;
1639 		/* ERJ: What's the difference between these? */
1640 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1641 		case I40E_PHY_TYPE_10GBASE_CR1:
1642 			ifmr->ifm_active |= IFM_10G_CR1;
1643 			break;
1644 		case I40E_PHY_TYPE_10GBASE_KX4:
1645 			ifmr->ifm_active |= IFM_10G_KX4;
1646 			break;
1647 		case I40E_PHY_TYPE_10GBASE_KR:
1648 			ifmr->ifm_active |= IFM_10G_KR;
1649 			break;
1650 		case I40E_PHY_TYPE_SFI:
1651 			ifmr->ifm_active |= IFM_10G_SFI;
1652 			break;
1653 		/* Our single 20G media type */
1654 		case I40E_PHY_TYPE_20GBASE_KR2:
1655 			ifmr->ifm_active |= IFM_20G_KR2;
1656 			break;
1657 		case I40E_PHY_TYPE_40GBASE_KR4:
1658 			ifmr->ifm_active |= IFM_40G_KR4;
1659 			break;
1660 		case I40E_PHY_TYPE_XLPPI:
1661 		case I40E_PHY_TYPE_40GBASE_AOC:
1662 			ifmr->ifm_active |= IFM_40G_XLPPI;
1663 			break;
1664 		/* Unknown to driver */
1665 		default:
1666 			ifmr->ifm_active |= IFM_UNKNOWN;
1667 			break;
1668 	}
1669 	/* Report flow control status as well */
1670 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1671 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1672 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1673 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1674 }
1675 
1676 static int
ixl_if_media_change(if_ctx_t ctx)1677 ixl_if_media_change(if_ctx_t ctx)
1678 {
1679 	struct ifmedia *ifm = iflib_get_media(ctx);
1680 
1681 	INIT_DEBUGOUT("ixl_media_change: begin");
1682 
1683 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1684 		return (EINVAL);
1685 
1686 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1687 	return (ENODEV);
1688 }
1689 
1690 static int
ixl_if_promisc_set(if_ctx_t ctx,int flags)1691 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1692 {
1693 	struct ixl_pf *pf = iflib_get_softc(ctx);
1694 	struct ixl_vsi *vsi = &pf->vsi;
1695 	if_t ifp = iflib_get_ifp(ctx);
1696 	struct i40e_hw	*hw = vsi->hw;
1697 	int		err;
1698 	bool		uni = FALSE, multi = FALSE;
1699 
1700 	if (flags & IFF_PROMISC)
1701 		uni = multi = TRUE;
1702 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1703 	    MAX_MULTICAST_ADDR)
1704 		multi = TRUE;
1705 
1706 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1707 	    vsi->seid, uni, NULL, true);
1708 	if (err)
1709 		return (err);
1710 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1711 	    vsi->seid, multi, NULL);
1712 
1713 	/* Update the multicast promiscuous flag based on the new state */
1714 	if (multi)
1715 		vsi->flags |= IXL_FLAGS_MC_PROMISC;
1716 	else
1717 		vsi->flags &= ~IXL_FLAGS_MC_PROMISC;
1718 
1719 	return (err);
1720 }
1721 
1722 static void
ixl_if_timer(if_ctx_t ctx,uint16_t qid)1723 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1724 {
1725 	struct ixl_pf *pf = iflib_get_softc(ctx);
1726 
1727 	if (qid != 0)
1728 		return;
1729 
1730 	ixl_update_stats_counters(pf);
1731 }
1732 
1733 static void
ixl_if_vlan_register(if_ctx_t ctx,u16 vtag)1734 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1735 {
1736 	struct ixl_pf *pf = iflib_get_softc(ctx);
1737 	struct ixl_vsi *vsi = &pf->vsi;
1738 	struct i40e_hw	*hw = vsi->hw;
1739 	if_t ifp = iflib_get_ifp(ctx);
1740 
1741 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1742 		return;
1743 
1744 	/*
1745 	 * Keep track of registered VLANS to know what
1746 	 * filters have to be configured when VLAN_HWFILTER
1747 	 * capability is enabled.
1748 	 */
1749 	++vsi->num_vlans;
1750 	bit_set(vsi->vlans_map, vtag);
1751 
1752 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1753 		return;
1754 
1755 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1756 		ixl_add_filter(vsi, hw->mac.addr, vtag);
1757 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1758 		/*
1759 		 * There is not enough HW resources to add filters
1760 		 * for all registered VLANs. Re-configure filtering
1761 		 * to allow reception of all expected traffic.
1762 		 */
1763 		device_printf(vsi->dev,
1764 		    "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1765 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1766 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1767 	}
1768 }
1769 
1770 static void
ixl_if_vlan_unregister(if_ctx_t ctx,u16 vtag)1771 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1772 {
1773 	struct ixl_pf *pf = iflib_get_softc(ctx);
1774 	struct ixl_vsi *vsi = &pf->vsi;
1775 	struct i40e_hw	*hw = vsi->hw;
1776 	if_t ifp = iflib_get_ifp(ctx);
1777 
1778 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1779 		return;
1780 
1781 	--vsi->num_vlans;
1782 	bit_clear(vsi->vlans_map, vtag);
1783 
1784 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1785 		return;
1786 
1787 	/* One filter is used for untagged frames */
1788 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS - 1)
1789 		ixl_del_filter(vsi, hw->mac.addr, vtag);
1790 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS - 1) {
1791 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1792 		ixl_add_vlan_filters(vsi, hw->mac.addr);
1793 	}
1794 }
1795 
1796 static uint64_t
ixl_if_get_counter(if_ctx_t ctx,ift_counter cnt)1797 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1798 {
1799 	struct ixl_pf *pf = iflib_get_softc(ctx);
1800 	struct ixl_vsi *vsi = &pf->vsi;
1801 	if_t ifp = iflib_get_ifp(ctx);
1802 
1803 	switch (cnt) {
1804 	case IFCOUNTER_IPACKETS:
1805 		return (vsi->ipackets);
1806 	case IFCOUNTER_IERRORS:
1807 		return (vsi->ierrors);
1808 	case IFCOUNTER_OPACKETS:
1809 		return (vsi->opackets);
1810 	case IFCOUNTER_OERRORS:
1811 		return (if_get_counter_default(ifp, cnt) + vsi->oerrors);
1812 	case IFCOUNTER_COLLISIONS:
1813 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1814 		return (0);
1815 	case IFCOUNTER_IBYTES:
1816 		return (vsi->ibytes);
1817 	case IFCOUNTER_OBYTES:
1818 		return (vsi->obytes);
1819 	case IFCOUNTER_IMCASTS:
1820 		return (vsi->imcasts);
1821 	case IFCOUNTER_OMCASTS:
1822 		return (vsi->omcasts);
1823 	case IFCOUNTER_IQDROPS:
1824 		return (vsi->iqdrops);
1825 	case IFCOUNTER_OQDROPS:
1826 		return (if_get_counter_default(ifp, cnt) + vsi->oqdrops);
1827 	case IFCOUNTER_NOPROTO:
1828 		return (vsi->noproto);
1829 	default:
1830 		return (if_get_counter_default(ifp, cnt));
1831 	}
1832 }
1833 
1834 #ifdef PCI_IOV
1835 static void
ixl_if_vflr_handle(if_ctx_t ctx)1836 ixl_if_vflr_handle(if_ctx_t ctx)
1837 {
1838 	struct ixl_pf *pf = iflib_get_softc(ctx);
1839 
1840 	ixl_handle_vflr(pf);
1841 }
1842 #endif
1843 
1844 static int
ixl_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1845 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1846 {
1847 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1848 
1849 	if (pf->read_i2c_byte == NULL)
1850 		return (EINVAL);
1851 
1852 	for (int i = 0; i < req->len; i++)
1853 		if (pf->read_i2c_byte(pf, req->offset + i,
1854 		    req->dev_addr, &req->data[i]))
1855 			return (EIO);
1856 	return (0);
1857 }
1858 
1859 static int
ixl_if_priv_ioctl(if_ctx_t ctx,u_long command,caddr_t data)1860 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1861 {
1862 	struct ixl_pf *pf = iflib_get_softc(ctx);
1863 	struct ifdrv *ifd = (struct ifdrv *)data;
1864 	int error = 0;
1865 
1866 	/*
1867 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1868 	 * performing privilege checks. It is important that this function
1869 	 * perform the necessary checks for commands which should only be
1870 	 * executed by privileged threads.
1871 	 */
1872 
1873 	switch(command) {
1874 	case SIOCGDRVSPEC:
1875 	case SIOCSDRVSPEC:
1876 		/* NVM update command */
1877 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1878 			error = priv_check(curthread, PRIV_DRIVER);
1879 			if (error)
1880 				break;
1881 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1882 		} else {
1883 			error = EINVAL;
1884 		}
1885 		break;
1886 	default:
1887 		error = EOPNOTSUPP;
1888 	}
1889 
1890 	return (error);
1891 }
1892 
1893 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1894  * @ctx: iflib context
1895  * @event: event code to check
1896  *
1897  * Defaults to returning false for every event.
1898  *
1899  * @returns true if iflib needs to reinit the interface, false otherwise
1900  */
1901 static bool
ixl_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1902 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1903 {
1904 	switch (event) {
1905 	case IFLIB_RESTART_VLAN_CONFIG:
1906 	default:
1907 		return (false);
1908 	}
1909 }
1910 
1911 /*
1912  * Sanity check and save off tunable values.
1913  */
1914 static void
ixl_save_pf_tunables(struct ixl_pf * pf)1915 ixl_save_pf_tunables(struct ixl_pf *pf)
1916 {
1917 	device_t dev = pf->dev;
1918 
1919 	/* Save tunable information */
1920 #ifdef IXL_DEBUG_FC
1921 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1922 #endif
1923 #ifdef IXL_DEBUG
1924 	pf->recovery_mode = ixl_debug_recovery_mode;
1925 #endif
1926 	pf->dbg_mask = ixl_core_debug_mask;
1927 	pf->hw.debug_mask = ixl_shared_debug_mask;
1928 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1929 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1930 #if 0
1931 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1932 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1933 #endif
1934 
1935 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1936 		pf->i2c_access_method = 0;
1937 	else
1938 		pf->i2c_access_method = ixl_i2c_access_method;
1939 
1940 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1941 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1942 		    ixl_tx_itr);
1943 		device_printf(dev, "tx_itr must be between %d and %d, "
1944 		    "inclusive\n",
1945 		    0, IXL_MAX_ITR);
1946 		device_printf(dev, "Using default value of %d instead\n",
1947 		    IXL_ITR_4K);
1948 		pf->tx_itr = IXL_ITR_4K;
1949 	} else
1950 		pf->tx_itr = ixl_tx_itr;
1951 
1952 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1953 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1954 		    ixl_rx_itr);
1955 		device_printf(dev, "rx_itr must be between %d and %d, "
1956 		    "inclusive\n",
1957 		    0, IXL_MAX_ITR);
1958 		device_printf(dev, "Using default value of %d instead\n",
1959 		    IXL_ITR_8K);
1960 		pf->rx_itr = IXL_ITR_8K;
1961 	} else
1962 		pf->rx_itr = ixl_rx_itr;
1963 
1964 	pf->fc = -1;
1965 	if (ixl_flow_control != -1) {
1966 		if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1967 			device_printf(dev,
1968 			    "Invalid flow_control value of %d set!\n",
1969 			    ixl_flow_control);
1970 			device_printf(dev,
1971 			    "flow_control must be between %d and %d, "
1972 			    "inclusive\n", 0, 3);
1973 			device_printf(dev,
1974 			    "Using default configuration instead\n");
1975 		} else
1976 			pf->fc = ixl_flow_control;
1977 	}
1978 }
1979 
1980