xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 4fe1295c964fa712dd763e3852187da8724ef79a)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	3
52 #define IXL_DRIVER_VERSION_BUILD	1
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89 	/* required last entry */
90 	PVID_END
91 };
92 
93 /*********************************************************************
94  *  Function prototypes
95  *********************************************************************/
96 /*** IFLIB interface ***/
97 static void	*ixl_register(device_t dev);
98 static int	 ixl_if_attach_pre(if_ctx_t ctx);
99 static int	 ixl_if_attach_post(if_ctx_t ctx);
100 static int	 ixl_if_detach(if_ctx_t ctx);
101 static int	 ixl_if_shutdown(if_ctx_t ctx);
102 static int	 ixl_if_suspend(if_ctx_t ctx);
103 static int	 ixl_if_resume(if_ctx_t ctx);
104 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
105 static void	 ixl_if_enable_intr(if_ctx_t ctx);
106 static void	 ixl_if_disable_intr(if_ctx_t ctx);
107 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
108 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
109 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
110 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
111 static void	 ixl_if_queues_free(if_ctx_t ctx);
112 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
113 static void	 ixl_if_multi_set(if_ctx_t ctx);
114 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
115 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
116 static int	 ixl_if_media_change(if_ctx_t ctx);
117 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
118 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
119 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
120 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
121 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
122 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
123 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
125 #ifdef PCI_IOV
126 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
127 #endif
128 
129 /*** Other ***/
130 static void	 ixl_save_pf_tunables(struct ixl_pf *);
131 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
132 static void	 ixl_setup_ssctx(struct ixl_pf *pf);
133 static void	 ixl_admin_timer(void *arg);
134 
135 /*********************************************************************
136  *  FreeBSD Device Interface Entry Points
137  *********************************************************************/
138 
139 static device_method_t ixl_methods[] = {
140 	/* Device interface */
141 	DEVMETHOD(device_register, ixl_register),
142 	DEVMETHOD(device_probe, iflib_device_probe),
143 	DEVMETHOD(device_attach, iflib_device_attach),
144 	DEVMETHOD(device_detach, iflib_device_detach),
145 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
146 #ifdef PCI_IOV
147 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
148 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
149 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
150 #endif
151 	DEVMETHOD_END
152 };
153 
154 static driver_t ixl_driver = {
155 	"ixl", ixl_methods, sizeof(struct ixl_pf),
156 };
157 
158 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0);
159 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
160 MODULE_VERSION(ixl, 3);
161 
162 MODULE_DEPEND(ixl, pci, 1, 1, 1);
163 MODULE_DEPEND(ixl, ether, 1, 1, 1);
164 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
165 
166 static device_method_t ixl_if_methods[] = {
167 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
168 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
169 	DEVMETHOD(ifdi_detach, ixl_if_detach),
170 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
171 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
172 	DEVMETHOD(ifdi_resume, ixl_if_resume),
173 	DEVMETHOD(ifdi_init, ixl_if_init),
174 	DEVMETHOD(ifdi_stop, ixl_if_stop),
175 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
176 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
177 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
178 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
179 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
180 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
181 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
182 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
183 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
184 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
185 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
186 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
187 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
188 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
189 	DEVMETHOD(ifdi_timer, ixl_if_timer),
190 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
191 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
192 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
193 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
194 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
195 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
196 #ifdef PCI_IOV
197 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
198 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
199 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
200 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
201 #endif
202 	// ifdi_led_func
203 	// ifdi_debug
204 	DEVMETHOD_END
205 };
206 
207 static driver_t ixl_if_driver = {
208 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
209 };
210 
211 /*
212 ** TUNEABLE PARAMETERS:
213 */
214 
215 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
216     "ixl driver parameters");
217 
218 #ifdef IXL_DEBUG_FC
219 /*
220  * Leave this on unless you need to send flow control
221  * frames (or other control frames) from software
222  */
223 static int ixl_enable_tx_fc_filter = 1;
224 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
225     &ixl_enable_tx_fc_filter);
226 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
227     &ixl_enable_tx_fc_filter, 0,
228     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
229 #endif
230 
231 #ifdef IXL_DEBUG
232 static int ixl_debug_recovery_mode = 0;
233 TUNABLE_INT("hw.ixl.debug_recovery_mode",
234     &ixl_debug_recovery_mode);
235 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
236     &ixl_debug_recovery_mode, 0,
237     "Act like when FW entered recovery mode (for debugging)");
238 #endif
239 
240 static int ixl_i2c_access_method = 0;
241 TUNABLE_INT("hw.ixl.i2c_access_method",
242     &ixl_i2c_access_method);
243 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
244     &ixl_i2c_access_method, 0,
245     IXL_SYSCTL_HELP_I2C_METHOD);
246 
247 static int ixl_enable_vf_loopback = 1;
248 TUNABLE_INT("hw.ixl.enable_vf_loopback",
249     &ixl_enable_vf_loopback);
250 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
251     &ixl_enable_vf_loopback, 0,
252     IXL_SYSCTL_HELP_VF_LOOPBACK);
253 
254 /*
255  * Different method for processing TX descriptor
256  * completion.
257  */
258 static int ixl_enable_head_writeback = 1;
259 TUNABLE_INT("hw.ixl.enable_head_writeback",
260     &ixl_enable_head_writeback);
261 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
262     &ixl_enable_head_writeback, 0,
263     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
264 
265 static int ixl_core_debug_mask = 0;
266 TUNABLE_INT("hw.ixl.core_debug_mask",
267     &ixl_core_debug_mask);
268 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
269     &ixl_core_debug_mask, 0,
270     "Display debug statements that are printed in non-shared code");
271 
272 static int ixl_shared_debug_mask = 0;
273 TUNABLE_INT("hw.ixl.shared_debug_mask",
274     &ixl_shared_debug_mask);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
276     &ixl_shared_debug_mask, 0,
277     "Display debug statements that are printed in shared code");
278 
279 #if 0
280 /*
281 ** Controls for Interrupt Throttling
282 **	- true/false for dynamic adjustment
283 ** 	- default values for static ITR
284 */
285 static int ixl_dynamic_rx_itr = 0;
286 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
287 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
288     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
289 
290 static int ixl_dynamic_tx_itr = 0;
291 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
292 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
293     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
294 #endif
295 
296 static int ixl_rx_itr = IXL_ITR_8K;
297 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
298 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
299     &ixl_rx_itr, 0, "RX Interrupt Rate");
300 
301 static int ixl_tx_itr = IXL_ITR_4K;
302 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
304     &ixl_tx_itr, 0, "TX Interrupt Rate");
305 
306 static int ixl_flow_control = -1;
307 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
308     &ixl_flow_control, 0, "Initial Flow Control setting");
309 
310 #ifdef IXL_IW
311 int ixl_enable_iwarp = 0;
312 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
313 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
314     &ixl_enable_iwarp, 0, "iWARP enabled");
315 
316 #if __FreeBSD_version < 1100000
317 int ixl_limit_iwarp_msix = 1;
318 #else
319 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
320 #endif
321 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
322 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
323     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
324 #endif
325 
326 extern struct if_txrx ixl_txrx_hwb;
327 extern struct if_txrx ixl_txrx_dwb;
328 
329 static struct if_shared_ctx ixl_sctx_init = {
330 	.isc_magic = IFLIB_MAGIC,
331 	.isc_q_align = PAGE_SIZE,
332 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
333 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
334 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
335 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
336 	.isc_rx_maxsize = 16384,
337 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
338 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
339 	.isc_nfl = 1,
340 	.isc_ntxqs = 1,
341 	.isc_nrxqs = 1,
342 
343 	.isc_admin_intrcnt = 1,
344 	.isc_vendor_info = ixl_vendor_info_array,
345 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
346 	.isc_driver = &ixl_if_driver,
347 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
348 
349 	.isc_nrxd_min = {IXL_MIN_RING},
350 	.isc_ntxd_min = {IXL_MIN_RING},
351 	.isc_nrxd_max = {IXL_MAX_RING},
352 	.isc_ntxd_max = {IXL_MAX_RING},
353 	.isc_nrxd_default = {IXL_DEFAULT_RING},
354 	.isc_ntxd_default = {IXL_DEFAULT_RING},
355 };
356 
357 /*** Functions ***/
358 static void *
359 ixl_register(device_t dev)
360 {
361 	return (&ixl_sctx_init);
362 }
363 
364 static int
365 ixl_allocate_pci_resources(struct ixl_pf *pf)
366 {
367 	device_t dev = iflib_get_dev(pf->vsi.ctx);
368 	struct i40e_hw *hw = &pf->hw;
369 	int             rid;
370 
371 	/* Map BAR0 */
372 	rid = PCIR_BAR(0);
373 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
374 	    &rid, RF_ACTIVE);
375 
376 	if (!(pf->pci_mem)) {
377 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
378 		return (ENXIO);
379 	}
380 
381 	/* Save off the PCI information */
382 	hw->vendor_id = pci_get_vendor(dev);
383 	hw->device_id = pci_get_device(dev);
384 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
385 	hw->subsystem_vendor_id =
386 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
387 	hw->subsystem_device_id =
388 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
389 
390 	hw->bus.device = pci_get_slot(dev);
391 	hw->bus.func = pci_get_function(dev);
392 
393 	/* Save off register access information */
394 	pf->osdep.mem_bus_space_tag =
395 		rman_get_bustag(pf->pci_mem);
396 	pf->osdep.mem_bus_space_handle =
397 		rman_get_bushandle(pf->pci_mem);
398 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
399 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
400 	pf->osdep.dev = dev;
401 
402 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
403 	pf->hw.back = &pf->osdep;
404 
405  	return (0);
406 }
407 
408 static void
409 ixl_setup_ssctx(struct ixl_pf *pf)
410 {
411 	if_softc_ctx_t scctx = pf->vsi.shared;
412 	struct i40e_hw *hw = &pf->hw;
413 
414 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
415 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
416 		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
417 	} else if (hw->mac.type == I40E_MAC_X722)
418 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
419 	else
420 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
421 
422 	if (pf->vsi.enable_head_writeback) {
423 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
424 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
425 		scctx->isc_txrx = &ixl_txrx_hwb;
426 	} else {
427 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
428 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
429 		scctx->isc_txrx = &ixl_txrx_dwb;
430 	}
431 
432 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
433 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
434 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
435 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
436 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
437 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
438 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
439 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
440 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
441 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
442 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
443 }
444 
445 static void
446 ixl_admin_timer(void *arg)
447 {
448 	struct ixl_pf *pf = (struct ixl_pf *)arg;
449 
450 	/* Fire off the admin task */
451 	iflib_admin_intr_deferred(pf->vsi.ctx);
452 
453 	/* Reschedule the admin timer */
454 	callout_schedule(&pf->admin_timer, hz/2);
455 }
456 
457 static int
458 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
459 {
460 	struct ixl_vsi *vsi = &pf->vsi;
461 	struct i40e_hw *hw = &pf->hw;
462 	device_t dev = pf->dev;
463 
464 	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
465 
466 	i40e_get_mac_addr(hw, hw->mac.addr);
467 
468 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
469 		ixl_configure_intr0_msix(pf);
470 		ixl_enable_intr0(hw);
471 	}
472 
473 	ixl_setup_ssctx(pf);
474 
475 	return (0);
476 }
477 
478 static int
479 ixl_if_attach_pre(if_ctx_t ctx)
480 {
481 	device_t dev;
482 	struct ixl_pf *pf;
483 	struct i40e_hw *hw;
484 	struct ixl_vsi *vsi;
485 	enum i40e_get_fw_lldp_status_resp lldp_status;
486 	struct i40e_filter_control_settings filter;
487 	enum i40e_status_code status;
488 	int error = 0;
489 
490 	dev = iflib_get_dev(ctx);
491 	pf = iflib_get_softc(ctx);
492 
493 	INIT_DBG_DEV(dev, "begin");
494 
495 	vsi = &pf->vsi;
496 	vsi->back = pf;
497 	pf->dev = dev;
498 	hw = &pf->hw;
499 
500 	vsi->dev = dev;
501 	vsi->hw = &pf->hw;
502 	vsi->id = 0;
503 	vsi->num_vlans = 0;
504 	vsi->ctx = ctx;
505 	vsi->media = iflib_get_media(ctx);
506 	vsi->shared = iflib_get_softc_ctx(ctx);
507 
508 	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
509 	    "%s:admin", device_get_nameunit(dev));
510 	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
511 	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
512 
513 	/* Save tunable values */
514 	ixl_save_pf_tunables(pf);
515 
516 	/* Do PCI setup - map BAR0, etc */
517 	if (ixl_allocate_pci_resources(pf)) {
518 		device_printf(dev, "Allocation of PCI resources failed\n");
519 		error = ENXIO;
520 		goto err_pci_res;
521 	}
522 
523 	/* Establish a clean starting point */
524 	i40e_clear_hw(hw);
525 	i40e_set_mac_type(hw);
526 
527 	error = ixl_pf_reset(pf);
528 	if (error)
529 		goto err_out;
530 
531 	/* Initialize the shared code */
532 	status = i40e_init_shared_code(hw);
533 	if (status) {
534 		device_printf(dev, "Unable to initialize shared code, error %s\n",
535 		    i40e_stat_str(hw, status));
536 		error = EIO;
537 		goto err_out;
538 	}
539 
540 	/* Set up the admin queue */
541 	hw->aq.num_arq_entries = IXL_AQ_LEN;
542 	hw->aq.num_asq_entries = IXL_AQ_LEN;
543 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
544 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
545 
546 	status = i40e_init_adminq(hw);
547 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
548 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
549 		    i40e_stat_str(hw, status));
550 		error = EIO;
551 		goto err_out;
552 	}
553 	ixl_print_nvm_version(pf);
554 
555 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
556 		device_printf(dev, "The driver for the device stopped "
557 		    "because the NVM image is newer than expected.\n");
558 		device_printf(dev, "You must install the most recent version of "
559 		    "the network driver.\n");
560 		error = EIO;
561 		goto err_out;
562 	}
563 
564         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
565 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
566 		device_printf(dev, "The driver for the device detected "
567 		    "a newer version of the NVM image than expected.\n");
568 		device_printf(dev, "Please install the most recent version "
569 		    "of the network driver.\n");
570 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
571 		device_printf(dev, "The driver for the device detected "
572 		    "an older version of the NVM image than expected.\n");
573 		device_printf(dev, "Please update the NVM image.\n");
574 	}
575 
576 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
577 		error = ixl_attach_pre_recovery_mode(pf);
578 		if (error)
579 			goto err_out;
580 		return (error);
581 	}
582 
583 	/* Clear PXE mode */
584 	i40e_clear_pxe_mode(hw);
585 
586 	/* Get capabilities from the device */
587 	error = ixl_get_hw_capabilities(pf);
588 	if (error) {
589 		device_printf(dev, "get_hw_capabilities failed: %d\n",
590 		    error);
591 		goto err_get_cap;
592 	}
593 
594 	/* Set up host memory cache */
595 	error = ixl_setup_hmc(pf);
596 	if (error)
597 		goto err_mac_hmc;
598 
599 	/* Disable LLDP from the firmware for certain NVM versions */
600 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
601 	    (pf->hw.aq.fw_maj_ver < 4)) {
602 		i40e_aq_stop_lldp(hw, true, false, NULL);
603 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
604 	}
605 
606 	/* Try enabling Energy Efficient Ethernet (EEE) mode */
607 	if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
608 		atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
609 	else
610 		atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
611 
612 	/* Get MAC addresses from hardware */
613 	i40e_get_mac_addr(hw, hw->mac.addr);
614 	error = i40e_validate_mac_addr(hw->mac.addr);
615 	if (error) {
616 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
617 		goto err_mac_hmc;
618 	}
619 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
620 	iflib_set_mac(ctx, hw->mac.addr);
621 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
622 
623 	/* Set up the device filtering */
624 	bzero(&filter, sizeof(filter));
625 	filter.enable_ethtype = TRUE;
626 	filter.enable_macvlan = TRUE;
627 	filter.enable_fdir = FALSE;
628 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
629 	if (i40e_set_filter_control(hw, &filter))
630 		device_printf(dev, "i40e_set_filter_control() failed\n");
631 
632 	/* Query device FW LLDP status */
633 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
634 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
635 			atomic_set_32(&pf->state,
636 			    IXL_PF_STATE_FW_LLDP_DISABLED);
637 		} else {
638 			atomic_clear_32(&pf->state,
639 			    IXL_PF_STATE_FW_LLDP_DISABLED);
640 		}
641 	}
642 
643 	/* Tell FW to apply DCB config on link up */
644 	i40e_aq_set_dcb_parameters(hw, true, NULL);
645 
646 	/* Fill out iflib parameters */
647 	ixl_setup_ssctx(pf);
648 
649 	INIT_DBG_DEV(dev, "end");
650 	return (0);
651 
652 err_mac_hmc:
653 	ixl_shutdown_hmc(pf);
654 err_get_cap:
655 	i40e_shutdown_adminq(hw);
656 err_out:
657 	ixl_free_pci_resources(pf);
658 err_pci_res:
659 	mtx_lock(&pf->admin_mtx);
660 	callout_stop(&pf->admin_timer);
661 	mtx_unlock(&pf->admin_mtx);
662 	mtx_destroy(&pf->admin_mtx);
663 	return (error);
664 }
665 
666 static int
667 ixl_if_attach_post(if_ctx_t ctx)
668 {
669 	device_t dev;
670 	struct ixl_pf *pf;
671 	struct i40e_hw *hw;
672 	struct ixl_vsi *vsi;
673 	int error = 0;
674 	enum i40e_status_code status;
675 
676 	dev = iflib_get_dev(ctx);
677 	pf = iflib_get_softc(ctx);
678 
679 	INIT_DBG_DEV(dev, "begin");
680 
681 	vsi = &pf->vsi;
682 	vsi->ifp = iflib_get_ifp(ctx);
683 	hw = &pf->hw;
684 
685 	/* Save off determined number of queues for interface */
686 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
687 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
688 
689 	/* Setup OS network interface / ifnet */
690 	if (ixl_setup_interface(dev, pf)) {
691 		device_printf(dev, "interface setup failed!\n");
692 		error = EIO;
693 		goto err;
694 	}
695 
696 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
697 		/* Keep admin queue interrupts active while driver is loaded */
698 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
699 			ixl_configure_intr0_msix(pf);
700 			ixl_enable_intr0(hw);
701 		}
702 
703 		ixl_add_sysctls_recovery_mode(pf);
704 
705 		/* Start the admin timer */
706 		mtx_lock(&pf->admin_mtx);
707 		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
708 		mtx_unlock(&pf->admin_mtx);
709 		return (0);
710 	}
711 
712 	/* Determine link state */
713 	if (ixl_attach_get_link_status(pf)) {
714 		error = EINVAL;
715 		goto err;
716 	}
717 
718 	error = ixl_switch_config(pf);
719 	if (error) {
720 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
721 		     error);
722 		goto err;
723 	}
724 
725 	/* Add protocol filters to list */
726 	ixl_init_filters(vsi);
727 
728 	/* Init queue allocation manager */
729 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
730 	if (error) {
731 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
732 		    error);
733 		goto err;
734 	}
735 	/* reserve a contiguous allocation for the PF's VSI */
736 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
737 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
738 	if (error) {
739 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
740 		    error);
741 		goto err;
742 	}
743 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
744 	    pf->qtag.num_allocated, pf->qtag.num_active);
745 
746 	/* Limit PHY interrupts to link, autoneg, and modules failure */
747 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
748 	    NULL);
749         if (status) {
750 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
751 		    " aq_err %s\n", i40e_stat_str(hw, status),
752 		    i40e_aq_str(hw, hw->aq.asq_last_status));
753 		goto err;
754 	}
755 
756 	/* Get the bus configuration and set the shared code */
757 	ixl_get_bus_info(pf);
758 
759 	/* Keep admin queue interrupts active while driver is loaded */
760 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
761  		ixl_configure_intr0_msix(pf);
762  		ixl_enable_intr0(hw);
763 	}
764 
765 	/* Set initial advertised speed sysctl value */
766 	ixl_set_initial_advertised_speeds(pf);
767 
768 	/* Initialize statistics & add sysctls */
769 	ixl_add_device_sysctls(pf);
770 	ixl_pf_reset_stats(pf);
771 	ixl_update_stats_counters(pf);
772 	ixl_add_hw_stats(pf);
773 
774 	/*
775 	 * Driver may have been reloaded. Ensure that the link state
776 	 * is consistent with current settings.
777 	 */
778 	ixl_set_link(pf, (pf->state & IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) != 0);
779 
780 	hw->phy.get_link_info = true;
781 	i40e_get_link_status(hw, &pf->link_up);
782 	ixl_update_link_status(pf);
783 
784 #ifdef PCI_IOV
785 	ixl_initialize_sriov(pf);
786 #endif
787 
788 #ifdef IXL_IW
789 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
790 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
791 		if (pf->iw_enabled) {
792 			error = ixl_iw_pf_attach(pf);
793 			if (error) {
794 				device_printf(dev,
795 				    "interfacing to iWARP driver failed: %d\n",
796 				    error);
797 				goto err;
798 			} else
799 				device_printf(dev, "iWARP ready\n");
800 		} else
801 			device_printf(dev, "iWARP disabled on this device "
802 			    "(no MSI-X vectors)\n");
803 	} else {
804 		pf->iw_enabled = false;
805 		device_printf(dev, "The device is not iWARP enabled\n");
806 	}
807 #endif
808 	/* Start the admin timer */
809 	mtx_lock(&pf->admin_mtx);
810 	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
811 	mtx_unlock(&pf->admin_mtx);
812 
813 	INIT_DBG_DEV(dev, "end");
814 	return (0);
815 
816 err:
817 	INIT_DEBUGOUT("end: error %d", error);
818 	/* ixl_if_detach() is called on error from this */
819 	return (error);
820 }
821 
822 /**
823  * XXX: iflib always ignores the return value of detach()
824  * -> This means that this isn't allowed to fail
825  */
826 static int
827 ixl_if_detach(if_ctx_t ctx)
828 {
829 	struct ixl_pf *pf = iflib_get_softc(ctx);
830 	struct ixl_vsi *vsi = &pf->vsi;
831 	struct i40e_hw *hw = &pf->hw;
832 	device_t dev = pf->dev;
833 	enum i40e_status_code	status;
834 #ifdef IXL_IW
835 	int			error;
836 #endif
837 
838 	INIT_DBG_DEV(dev, "begin");
839 
840 	/* Stop the admin timer */
841 	mtx_lock(&pf->admin_mtx);
842 	callout_stop(&pf->admin_timer);
843 	mtx_unlock(&pf->admin_mtx);
844 	mtx_destroy(&pf->admin_mtx);
845 
846 #ifdef IXL_IW
847 	if (ixl_enable_iwarp && pf->iw_enabled) {
848 		error = ixl_iw_pf_detach(pf);
849 		if (error == EBUSY) {
850 			device_printf(dev, "iwarp in use; stop it first.\n");
851 			//return (error);
852 		}
853 	}
854 #endif
855 	/* Remove all previously allocated media types */
856 	ifmedia_removeall(vsi->media);
857 
858 	/* Shutdown LAN HMC */
859 	ixl_shutdown_hmc(pf);
860 
861 	/* Shutdown admin queue */
862 	ixl_disable_intr0(hw);
863 	status = i40e_shutdown_adminq(hw);
864 	if (status)
865 		device_printf(dev,
866 		    "i40e_shutdown_adminq() failed with status %s\n",
867 		    i40e_stat_str(hw, status));
868 
869 	ixl_pf_qmgr_destroy(&pf->qmgr);
870 	ixl_free_pci_resources(pf);
871 	ixl_free_filters(&vsi->ftl);
872 	INIT_DBG_DEV(dev, "end");
873 	return (0);
874 }
875 
876 static int
877 ixl_if_shutdown(if_ctx_t ctx)
878 {
879 	int error = 0;
880 
881 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
882 
883 	/* TODO: Call ixl_if_stop()? */
884 
885 	/* TODO: Then setup low power mode */
886 
887 	return (error);
888 }
889 
890 static int
891 ixl_if_suspend(if_ctx_t ctx)
892 {
893 	int error = 0;
894 
895 	INIT_DEBUGOUT("ixl_if_suspend: begin");
896 
897 	/* TODO: Call ixl_if_stop()? */
898 
899 	/* TODO: Then setup low power mode */
900 
901 	return (error);
902 }
903 
904 static int
905 ixl_if_resume(if_ctx_t ctx)
906 {
907 	struct ifnet *ifp = iflib_get_ifp(ctx);
908 
909 	INIT_DEBUGOUT("ixl_if_resume: begin");
910 
911 	/* Read & clear wake-up registers */
912 
913 	/* Required after D3->D0 transition */
914 	if (ifp->if_flags & IFF_UP)
915 		ixl_if_init(ctx);
916 
917 	return (0);
918 }
919 
920 void
921 ixl_if_init(if_ctx_t ctx)
922 {
923 	struct ixl_pf *pf = iflib_get_softc(ctx);
924 	struct ixl_vsi *vsi = &pf->vsi;
925 	struct i40e_hw	*hw = &pf->hw;
926 	struct ifnet *ifp = iflib_get_ifp(ctx);
927 	device_t 	dev = iflib_get_dev(ctx);
928 	u8		tmpaddr[ETHER_ADDR_LEN];
929 	int		ret;
930 
931 	if (IXL_PF_IN_RECOVERY_MODE(pf))
932 		return;
933 	/*
934 	 * If the aq is dead here, it probably means something outside of the driver
935 	 * did something to the adapter, like a PF reset.
936 	 * So, rebuild the driver's state here if that occurs.
937 	 */
938 	if (!i40e_check_asq_alive(&pf->hw)) {
939 		device_printf(dev, "Admin Queue is down; resetting...\n");
940 		ixl_teardown_hw_structs(pf);
941 		ixl_rebuild_hw_structs_after_reset(pf, false);
942 	}
943 
944 	/* Get the latest mac address... User might use a LAA */
945 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
946 	if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
947 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
948 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
949 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
950 		ret = i40e_aq_mac_address_write(hw,
951 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
952 		    hw->mac.addr, NULL);
953 		if (ret) {
954 			device_printf(dev, "LLA address change failed!!\n");
955 			return;
956 		}
957 		/*
958 		 * New filters are configured by ixl_reconfigure_filters
959 		 * at the end of ixl_init_locked.
960 		 */
961 	}
962 
963 	iflib_set_mac(ctx, hw->mac.addr);
964 
965 	/* Prepare the VSI: rings, hmc contexts, etc... */
966 	if (ixl_initialize_vsi(vsi)) {
967 		device_printf(dev, "initialize vsi failed!!\n");
968 		return;
969 	}
970 
971 	ixl_set_link(pf, true);
972 
973 	/* Reconfigure multicast filters in HW */
974 	ixl_if_multi_set(ctx);
975 
976 	/* Set up RSS */
977 	ixl_config_rss(pf);
978 
979 	/* Set up MSI-X routing and the ITR settings */
980 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
981 		ixl_configure_queue_intr_msix(pf);
982 		ixl_configure_itr(pf);
983 	} else
984 		ixl_configure_legacy(pf);
985 
986 	if (vsi->enable_head_writeback)
987 		ixl_init_tx_cidx(vsi);
988 	else
989 		ixl_init_tx_rsqs(vsi);
990 
991 	ixl_enable_rings(vsi);
992 
993 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
994 
995 	/* Re-add configure filters to HW */
996 	ixl_reconfigure_filters(vsi);
997 
998 	/* Configure promiscuous mode */
999 	ixl_if_promisc_set(ctx, if_getflags(ifp));
1000 
1001 #ifdef IXL_IW
1002 	if (ixl_enable_iwarp && pf->iw_enabled) {
1003 		ret = ixl_iw_pf_init(pf);
1004 		if (ret)
1005 			device_printf(dev,
1006 			    "initialize iwarp failed, code %d\n", ret);
1007 	}
1008 #endif
1009 }
1010 
1011 void
1012 ixl_if_stop(if_ctx_t ctx)
1013 {
1014 	struct ixl_pf *pf = iflib_get_softc(ctx);
1015 	struct ifnet *ifp = iflib_get_ifp(ctx);
1016 	struct ixl_vsi *vsi = &pf->vsi;
1017 
1018 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
1019 
1020 	if (IXL_PF_IN_RECOVERY_MODE(pf))
1021 		return;
1022 
1023 	// TODO: This may need to be reworked
1024 #ifdef IXL_IW
1025 	/* Stop iWARP device */
1026 	if (ixl_enable_iwarp && pf->iw_enabled)
1027 		ixl_iw_pf_stop(pf);
1028 #endif
1029 
1030 	ixl_disable_rings_intr(vsi);
1031 	ixl_disable_rings(pf, vsi, &pf->qtag);
1032 
1033 	/*
1034 	 * Don't set link state if only reconfiguring
1035 	 * e.g. on MTU change.
1036 	 */
1037 	if ((if_getflags(ifp) & IFF_UP) == 0 &&
1038 	    (atomic_load_acq_32(&pf->state) &
1039 	    IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) == 0)
1040 		ixl_set_link(pf, false);
1041 }
1042 
1043 static int
1044 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1045 {
1046 	struct ixl_pf *pf = iflib_get_softc(ctx);
1047 	struct ixl_vsi *vsi = &pf->vsi;
1048 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1049 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1050 	int err, i, rid, vector = 0;
1051 	char buf[16];
1052 
1053 	MPASS(vsi->shared->isc_nrxqsets > 0);
1054 	MPASS(vsi->shared->isc_ntxqsets > 0);
1055 
1056 	/* Admin Que must use vector 0*/
1057 	rid = vector + 1;
1058 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1059 	    ixl_msix_adminq, pf, 0, "aq");
1060 	if (err) {
1061 		iflib_irq_free(ctx, &vsi->irq);
1062 		device_printf(iflib_get_dev(ctx),
1063 		    "Failed to register Admin Que handler");
1064 		return (err);
1065 	}
1066 	/* Create soft IRQ for handling VFLRs */
1067 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1068 
1069 	/* Now set up the stations */
1070 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1071 		rid = vector + 1;
1072 
1073 		snprintf(buf, sizeof(buf), "rxq%d", i);
1074 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1075 		    IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1076 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1077 		 * what's expected in the iflib context? */
1078 		if (err) {
1079 			device_printf(iflib_get_dev(ctx),
1080 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1081 			vsi->num_rx_queues = i + 1;
1082 			goto fail;
1083 		}
1084 		rx_que->msix = vector;
1085 	}
1086 
1087 	bzero(buf, sizeof(buf));
1088 
1089 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1090 		snprintf(buf, sizeof(buf), "txq%d", i);
1091 		iflib_softirq_alloc_generic(ctx,
1092 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1093 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1094 
1095 		/* TODO: Maybe call a strategy function for this to figure out which
1096 		* interrupts to map Tx queues to. I don't know if there's an immediately
1097 		* better way than this other than a user-supplied map, though. */
1098 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1099 	}
1100 
1101 	return (0);
1102 fail:
1103 	iflib_irq_free(ctx, &vsi->irq);
1104 	rx_que = vsi->rx_queues;
1105 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1106 		iflib_irq_free(ctx, &rx_que->que_irq);
1107 	return (err);
1108 }
1109 
1110 /*
1111  * Enable all interrupts
1112  *
1113  * Called in:
1114  * iflib_init_locked, after ixl_if_init()
1115  */
1116 static void
1117 ixl_if_enable_intr(if_ctx_t ctx)
1118 {
1119 	struct ixl_pf *pf = iflib_get_softc(ctx);
1120 	struct ixl_vsi *vsi = &pf->vsi;
1121 	struct i40e_hw		*hw = vsi->hw;
1122 	struct ixl_rx_queue	*que = vsi->rx_queues;
1123 
1124 	ixl_enable_intr0(hw);
1125 	/* Enable queue interrupts */
1126 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1127 		/* TODO: Queue index parameter is probably wrong */
1128 		ixl_enable_queue(hw, que->rxr.me);
1129 }
1130 
1131 /*
1132  * Disable queue interrupts
1133  *
1134  * Other interrupt causes need to remain active.
1135  */
1136 static void
1137 ixl_if_disable_intr(if_ctx_t ctx)
1138 {
1139 	struct ixl_pf *pf = iflib_get_softc(ctx);
1140 	struct ixl_vsi *vsi = &pf->vsi;
1141 	struct i40e_hw		*hw = vsi->hw;
1142 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1143 
1144 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1145 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1146 			ixl_disable_queue(hw, rx_que->msix - 1);
1147 	} else {
1148 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1149 		// stops queues from triggering interrupts
1150 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1151 	}
1152 }
1153 
1154 static int
1155 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1156 {
1157 	struct ixl_pf *pf = iflib_get_softc(ctx);
1158 	struct ixl_vsi *vsi = &pf->vsi;
1159 	struct i40e_hw		*hw = vsi->hw;
1160 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1161 
1162 	ixl_enable_queue(hw, rx_que->msix - 1);
1163 	return (0);
1164 }
1165 
1166 static int
1167 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1168 {
1169 	struct ixl_pf *pf = iflib_get_softc(ctx);
1170 	struct ixl_vsi *vsi = &pf->vsi;
1171 	struct i40e_hw *hw = vsi->hw;
1172 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1173 
1174 	ixl_enable_queue(hw, tx_que->msix - 1);
1175 	return (0);
1176 }
1177 
1178 static int
1179 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1180 {
1181 	struct ixl_pf *pf = iflib_get_softc(ctx);
1182 	struct ixl_vsi *vsi = &pf->vsi;
1183 	if_softc_ctx_t scctx = vsi->shared;
1184 	struct ixl_tx_queue *que;
1185 	int i, j, error = 0;
1186 
1187 	MPASS(scctx->isc_ntxqsets > 0);
1188 	MPASS(ntxqs == 1);
1189 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1190 
1191 	/* Allocate queue structure memory */
1192 	if (!(vsi->tx_queues =
1193 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1194 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1195 		return (ENOMEM);
1196 	}
1197 
1198 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1199 		struct tx_ring *txr = &que->txr;
1200 
1201 		txr->me = i;
1202 		que->vsi = vsi;
1203 
1204 		if (!vsi->enable_head_writeback) {
1205 			/* Allocate report status array */
1206 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1207 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1208 				error = ENOMEM;
1209 				goto fail;
1210 			}
1211 			/* Init report status array */
1212 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1213 				txr->tx_rsq[j] = QIDX_INVALID;
1214 		}
1215 		/* get the virtual and physical address of the hardware queues */
1216 		txr->tail = I40E_QTX_TAIL(txr->me);
1217 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1218 		txr->tx_paddr = paddrs[i * ntxqs];
1219 		txr->que = que;
1220 	}
1221 
1222 	return (0);
1223 fail:
1224 	ixl_if_queues_free(ctx);
1225 	return (error);
1226 }
1227 
1228 static int
1229 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1230 {
1231 	struct ixl_pf *pf = iflib_get_softc(ctx);
1232 	struct ixl_vsi *vsi = &pf->vsi;
1233 	struct ixl_rx_queue *que;
1234 	int i, error = 0;
1235 
1236 #ifdef INVARIANTS
1237 	if_softc_ctx_t scctx = vsi->shared;
1238 	MPASS(scctx->isc_nrxqsets > 0);
1239 	MPASS(nrxqs == 1);
1240 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1241 #endif
1242 
1243 	/* Allocate queue structure memory */
1244 	if (!(vsi->rx_queues =
1245 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1246 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1247 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1248 		error = ENOMEM;
1249 		goto fail;
1250 	}
1251 
1252 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1253 		struct rx_ring *rxr = &que->rxr;
1254 
1255 		rxr->me = i;
1256 		que->vsi = vsi;
1257 
1258 		/* get the virtual and physical address of the hardware queues */
1259 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1260 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1261 		rxr->rx_paddr = paddrs[i * nrxqs];
1262 		rxr->que = que;
1263 	}
1264 
1265 	return (0);
1266 fail:
1267 	ixl_if_queues_free(ctx);
1268 	return (error);
1269 }
1270 
1271 static void
1272 ixl_if_queues_free(if_ctx_t ctx)
1273 {
1274 	struct ixl_pf *pf = iflib_get_softc(ctx);
1275 	struct ixl_vsi *vsi = &pf->vsi;
1276 
1277 	if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1278 		struct ixl_tx_queue *que;
1279 		int i = 0;
1280 
1281 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1282 			struct tx_ring *txr = &que->txr;
1283 			if (txr->tx_rsq != NULL) {
1284 				free(txr->tx_rsq, M_IXL);
1285 				txr->tx_rsq = NULL;
1286 			}
1287 		}
1288 	}
1289 
1290 	if (vsi->tx_queues != NULL) {
1291 		free(vsi->tx_queues, M_IXL);
1292 		vsi->tx_queues = NULL;
1293 	}
1294 	if (vsi->rx_queues != NULL) {
1295 		free(vsi->rx_queues, M_IXL);
1296 		vsi->rx_queues = NULL;
1297 	}
1298 
1299 	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1300 		sysctl_ctx_free(&vsi->sysctl_ctx);
1301 }
1302 
1303 void
1304 ixl_update_link_status(struct ixl_pf *pf)
1305 {
1306 	struct ixl_vsi *vsi = &pf->vsi;
1307 	struct i40e_hw *hw = &pf->hw;
1308 	u64 baudrate;
1309 
1310 	if (pf->link_up) {
1311 		if (vsi->link_active == FALSE) {
1312 			vsi->link_active = TRUE;
1313 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1314 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1315 			ixl_link_up_msg(pf);
1316 #ifdef PCI_IOV
1317 			ixl_broadcast_link_state(pf);
1318 #endif
1319 		}
1320 	} else { /* Link down */
1321 		if (vsi->link_active == TRUE) {
1322 			vsi->link_active = FALSE;
1323 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1324 #ifdef PCI_IOV
1325 			ixl_broadcast_link_state(pf);
1326 #endif
1327 		}
1328 	}
1329 }
1330 
1331 static void
1332 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1333 {
1334 	device_t dev = pf->dev;
1335 	u32 rxq_idx, qtx_ctl;
1336 
1337 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1338 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1339 	qtx_ctl = e->desc.params.external.param1;
1340 
1341 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1342 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1343 }
1344 
1345 static int
1346 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1347 {
1348 	enum i40e_status_code status = I40E_SUCCESS;
1349 	struct i40e_arq_event_info event;
1350 	struct i40e_hw *hw = &pf->hw;
1351 	device_t dev = pf->dev;
1352 	u16 opcode;
1353 	u32 loop = 0, reg;
1354 
1355 	event.buf_len = IXL_AQ_BUF_SZ;
1356 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1357 	if (!event.msg_buf) {
1358 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1359 		    " Queue event!\n", __func__);
1360 		return (ENOMEM);
1361 	}
1362 
1363 	/* clean and process any events */
1364 	do {
1365 		status = i40e_clean_arq_element(hw, &event, pending);
1366 		if (status)
1367 			break;
1368 		opcode = LE16_TO_CPU(event.desc.opcode);
1369 		ixl_dbg(pf, IXL_DBG_AQ,
1370 		    "Admin Queue event: %#06x\n", opcode);
1371 		switch (opcode) {
1372 		case i40e_aqc_opc_get_link_status:
1373 			ixl_link_event(pf, &event);
1374 			break;
1375 		case i40e_aqc_opc_send_msg_to_pf:
1376 #ifdef PCI_IOV
1377 			ixl_handle_vf_msg(pf, &event);
1378 #endif
1379 			break;
1380 		/*
1381 		 * This should only occur on no-drop queues, which
1382 		 * aren't currently configured.
1383 		 */
1384 		case i40e_aqc_opc_event_lan_overflow:
1385 			ixl_handle_lan_overflow_event(pf, &event);
1386 			break;
1387 		default:
1388 			break;
1389 		}
1390 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1391 
1392 	free(event.msg_buf, M_IXL);
1393 
1394 	/* Re-enable admin queue interrupt cause */
1395 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1396 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1397 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1398 
1399 	return (status);
1400 }
1401 
1402 static void
1403 ixl_if_update_admin_status(if_ctx_t ctx)
1404 {
1405 	struct ixl_pf	*pf = iflib_get_softc(ctx);
1406 	struct i40e_hw	*hw = &pf->hw;
1407 	u16		pending;
1408 
1409 	if (IXL_PF_IS_RESETTING(pf))
1410 		ixl_handle_empr_reset(pf);
1411 
1412 	/*
1413 	 * Admin Queue is shut down while handling reset.
1414 	 * Don't proceed if it hasn't been re-initialized
1415 	 * e.g due to an issue with new FW.
1416 	 */
1417 	if (!i40e_check_asq_alive(&pf->hw))
1418 		return;
1419 
1420 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1421 		ixl_handle_mdd_event(pf);
1422 
1423 	ixl_process_adminq(pf, &pending);
1424 	ixl_update_link_status(pf);
1425 
1426 	/*
1427 	 * If there are still messages to process, reschedule ourselves.
1428 	 * Otherwise, re-enable our interrupt and go to sleep.
1429 	 */
1430 	if (pending > 0)
1431 		iflib_admin_intr_deferred(ctx);
1432 	else
1433 		ixl_enable_intr0(hw);
1434 }
1435 
1436 static void
1437 ixl_if_multi_set(if_ctx_t ctx)
1438 {
1439 	struct ixl_pf *pf = iflib_get_softc(ctx);
1440 	struct ixl_vsi *vsi = &pf->vsi;
1441 	struct i40e_hw *hw = vsi->hw;
1442 	int mcnt;
1443 
1444 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1445 
1446 	/* Delete filters for removed multicast addresses */
1447 	ixl_del_multi(vsi, false);
1448 
1449 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1450 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1451 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1452 		    vsi->seid, TRUE, NULL);
1453 		ixl_del_multi(vsi, true);
1454 		return;
1455 	}
1456 
1457 	ixl_add_multi(vsi);
1458 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1459 }
1460 
1461 static int
1462 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1463 {
1464 	struct ixl_pf *pf = iflib_get_softc(ctx);
1465 	struct ixl_vsi *vsi = &pf->vsi;
1466 
1467 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1468 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1469 		ETHER_VLAN_ENCAP_LEN)
1470 		return (EINVAL);
1471 
1472 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1473 		ETHER_VLAN_ENCAP_LEN;
1474 
1475 	return (0);
1476 }
1477 
1478 static void
1479 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1480 {
1481 	struct ixl_pf *pf = iflib_get_softc(ctx);
1482 	struct i40e_hw  *hw = &pf->hw;
1483 
1484 	INIT_DEBUGOUT("ixl_media_status: begin");
1485 
1486 	ifmr->ifm_status = IFM_AVALID;
1487 	ifmr->ifm_active = IFM_ETHER;
1488 
1489 	if (!pf->link_up) {
1490 		return;
1491 	}
1492 
1493 	ifmr->ifm_status |= IFM_ACTIVE;
1494 	/* Hardware is always full-duplex */
1495 	ifmr->ifm_active |= IFM_FDX;
1496 
1497 	switch (hw->phy.link_info.phy_type) {
1498 		/* 100 M */
1499 		case I40E_PHY_TYPE_100BASE_TX:
1500 			ifmr->ifm_active |= IFM_100_TX;
1501 			break;
1502 		/* 1 G */
1503 		case I40E_PHY_TYPE_1000BASE_T:
1504 			ifmr->ifm_active |= IFM_1000_T;
1505 			break;
1506 		case I40E_PHY_TYPE_1000BASE_SX:
1507 			ifmr->ifm_active |= IFM_1000_SX;
1508 			break;
1509 		case I40E_PHY_TYPE_1000BASE_LX:
1510 			ifmr->ifm_active |= IFM_1000_LX;
1511 			break;
1512 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1513 			ifmr->ifm_active |= IFM_1000_T;
1514 			break;
1515 		/* 2.5 G */
1516 		case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1517 			ifmr->ifm_active |= IFM_2500_T;
1518 			break;
1519 		/* 5 G */
1520 		case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1521 			ifmr->ifm_active |= IFM_5000_T;
1522 			break;
1523 		/* 10 G */
1524 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1525 			ifmr->ifm_active |= IFM_10G_TWINAX;
1526 			break;
1527 		case I40E_PHY_TYPE_10GBASE_SR:
1528 			ifmr->ifm_active |= IFM_10G_SR;
1529 			break;
1530 		case I40E_PHY_TYPE_10GBASE_LR:
1531 			ifmr->ifm_active |= IFM_10G_LR;
1532 			break;
1533 		case I40E_PHY_TYPE_10GBASE_T:
1534 			ifmr->ifm_active |= IFM_10G_T;
1535 			break;
1536 		case I40E_PHY_TYPE_XAUI:
1537 		case I40E_PHY_TYPE_XFI:
1538 			ifmr->ifm_active |= IFM_10G_TWINAX;
1539 			break;
1540 		case I40E_PHY_TYPE_10GBASE_AOC:
1541 			ifmr->ifm_active |= IFM_10G_AOC;
1542 			break;
1543 		/* 25 G */
1544 		case I40E_PHY_TYPE_25GBASE_KR:
1545 			ifmr->ifm_active |= IFM_25G_KR;
1546 			break;
1547 		case I40E_PHY_TYPE_25GBASE_CR:
1548 			ifmr->ifm_active |= IFM_25G_CR;
1549 			break;
1550 		case I40E_PHY_TYPE_25GBASE_SR:
1551 			ifmr->ifm_active |= IFM_25G_SR;
1552 			break;
1553 		case I40E_PHY_TYPE_25GBASE_LR:
1554 			ifmr->ifm_active |= IFM_25G_LR;
1555 			break;
1556 		case I40E_PHY_TYPE_25GBASE_AOC:
1557 			ifmr->ifm_active |= IFM_25G_AOC;
1558 			break;
1559 		case I40E_PHY_TYPE_25GBASE_ACC:
1560 			ifmr->ifm_active |= IFM_25G_ACC;
1561 			break;
1562 		/* 40 G */
1563 		case I40E_PHY_TYPE_40GBASE_CR4:
1564 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1565 			ifmr->ifm_active |= IFM_40G_CR4;
1566 			break;
1567 		case I40E_PHY_TYPE_40GBASE_SR4:
1568 			ifmr->ifm_active |= IFM_40G_SR4;
1569 			break;
1570 		case I40E_PHY_TYPE_40GBASE_LR4:
1571 			ifmr->ifm_active |= IFM_40G_LR4;
1572 			break;
1573 		case I40E_PHY_TYPE_XLAUI:
1574 			ifmr->ifm_active |= IFM_OTHER;
1575 			break;
1576 		case I40E_PHY_TYPE_1000BASE_KX:
1577 			ifmr->ifm_active |= IFM_1000_KX;
1578 			break;
1579 		case I40E_PHY_TYPE_SGMII:
1580 			ifmr->ifm_active |= IFM_1000_SGMII;
1581 			break;
1582 		/* ERJ: What's the difference between these? */
1583 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1584 		case I40E_PHY_TYPE_10GBASE_CR1:
1585 			ifmr->ifm_active |= IFM_10G_CR1;
1586 			break;
1587 		case I40E_PHY_TYPE_10GBASE_KX4:
1588 			ifmr->ifm_active |= IFM_10G_KX4;
1589 			break;
1590 		case I40E_PHY_TYPE_10GBASE_KR:
1591 			ifmr->ifm_active |= IFM_10G_KR;
1592 			break;
1593 		case I40E_PHY_TYPE_SFI:
1594 			ifmr->ifm_active |= IFM_10G_SFI;
1595 			break;
1596 		/* Our single 20G media type */
1597 		case I40E_PHY_TYPE_20GBASE_KR2:
1598 			ifmr->ifm_active |= IFM_20G_KR2;
1599 			break;
1600 		case I40E_PHY_TYPE_40GBASE_KR4:
1601 			ifmr->ifm_active |= IFM_40G_KR4;
1602 			break;
1603 		case I40E_PHY_TYPE_XLPPI:
1604 		case I40E_PHY_TYPE_40GBASE_AOC:
1605 			ifmr->ifm_active |= IFM_40G_XLPPI;
1606 			break;
1607 		/* Unknown to driver */
1608 		default:
1609 			ifmr->ifm_active |= IFM_UNKNOWN;
1610 			break;
1611 	}
1612 	/* Report flow control status as well */
1613 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1614 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1615 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1616 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1617 }
1618 
1619 static int
1620 ixl_if_media_change(if_ctx_t ctx)
1621 {
1622 	struct ifmedia *ifm = iflib_get_media(ctx);
1623 
1624 	INIT_DEBUGOUT("ixl_media_change: begin");
1625 
1626 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1627 		return (EINVAL);
1628 
1629 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1630 	return (ENODEV);
1631 }
1632 
1633 static int
1634 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1635 {
1636 	struct ixl_pf *pf = iflib_get_softc(ctx);
1637 	struct ixl_vsi *vsi = &pf->vsi;
1638 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1639 	struct i40e_hw	*hw = vsi->hw;
1640 	int		err;
1641 	bool		uni = FALSE, multi = FALSE;
1642 
1643 	if (flags & IFF_PROMISC)
1644 		uni = multi = TRUE;
1645 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1646 	    MAX_MULTICAST_ADDR)
1647 		multi = TRUE;
1648 
1649 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1650 	    vsi->seid, uni, NULL, true);
1651 	if (err)
1652 		return (err);
1653 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1654 	    vsi->seid, multi, NULL);
1655 	return (err);
1656 }
1657 
1658 static void
1659 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1660 {
1661 	struct ixl_pf *pf = iflib_get_softc(ctx);
1662 
1663 	if (qid != 0)
1664 		return;
1665 
1666 	ixl_update_stats_counters(pf);
1667 }
1668 
1669 static void
1670 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1671 {
1672 	struct ixl_pf *pf = iflib_get_softc(ctx);
1673 	struct ixl_vsi *vsi = &pf->vsi;
1674 	struct i40e_hw	*hw = vsi->hw;
1675 	if_t ifp = iflib_get_ifp(ctx);
1676 
1677 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1678 		return;
1679 
1680 	/*
1681 	 * Keep track of registered VLANS to know what
1682 	 * filters have to be configured when VLAN_HWFILTER
1683 	 * capability is enabled.
1684 	 */
1685 	++vsi->num_vlans;
1686 	bit_set(vsi->vlans_map, vtag);
1687 
1688 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1689 		return;
1690 
1691 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1692 		ixl_add_filter(vsi, hw->mac.addr, vtag);
1693 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1694 		/*
1695 		 * There is not enough HW resources to add filters
1696 		 * for all registered VLANs. Re-configure filtering
1697 		 * to allow reception of all expected traffic.
1698 		 */
1699 		device_printf(vsi->dev,
1700 		    "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1701 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1702 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1703 	}
1704 }
1705 
1706 static void
1707 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1708 {
1709 	struct ixl_pf *pf = iflib_get_softc(ctx);
1710 	struct ixl_vsi *vsi = &pf->vsi;
1711 	struct i40e_hw	*hw = vsi->hw;
1712 	if_t ifp = iflib_get_ifp(ctx);
1713 
1714 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1715 		return;
1716 
1717 	--vsi->num_vlans;
1718 	bit_clear(vsi->vlans_map, vtag);
1719 
1720 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1721 		return;
1722 
1723 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1724 		ixl_del_filter(vsi, hw->mac.addr, vtag);
1725 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1726 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1727 		ixl_add_vlan_filters(vsi, hw->mac.addr);
1728 	}
1729 }
1730 
1731 static uint64_t
1732 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1733 {
1734 	struct ixl_pf *pf = iflib_get_softc(ctx);
1735 	struct ixl_vsi *vsi = &pf->vsi;
1736 	if_t ifp = iflib_get_ifp(ctx);
1737 
1738 	switch (cnt) {
1739 	case IFCOUNTER_IPACKETS:
1740 		return (vsi->ipackets);
1741 	case IFCOUNTER_IERRORS:
1742 		return (vsi->ierrors);
1743 	case IFCOUNTER_OPACKETS:
1744 		return (vsi->opackets);
1745 	case IFCOUNTER_OERRORS:
1746 		return (vsi->oerrors);
1747 	case IFCOUNTER_COLLISIONS:
1748 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1749 		return (0);
1750 	case IFCOUNTER_IBYTES:
1751 		return (vsi->ibytes);
1752 	case IFCOUNTER_OBYTES:
1753 		return (vsi->obytes);
1754 	case IFCOUNTER_IMCASTS:
1755 		return (vsi->imcasts);
1756 	case IFCOUNTER_OMCASTS:
1757 		return (vsi->omcasts);
1758 	case IFCOUNTER_IQDROPS:
1759 		return (vsi->iqdrops);
1760 	case IFCOUNTER_OQDROPS:
1761 		return (vsi->oqdrops);
1762 	case IFCOUNTER_NOPROTO:
1763 		return (vsi->noproto);
1764 	default:
1765 		return (if_get_counter_default(ifp, cnt));
1766 	}
1767 }
1768 
1769 #ifdef PCI_IOV
1770 static void
1771 ixl_if_vflr_handle(if_ctx_t ctx)
1772 {
1773 	struct ixl_pf *pf = iflib_get_softc(ctx);
1774 
1775 	ixl_handle_vflr(pf);
1776 }
1777 #endif
1778 
1779 static int
1780 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1781 {
1782 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1783 
1784 	if (pf->read_i2c_byte == NULL)
1785 		return (EINVAL);
1786 
1787 	for (int i = 0; i < req->len; i++)
1788 		if (pf->read_i2c_byte(pf, req->offset + i,
1789 		    req->dev_addr, &req->data[i]))
1790 			return (EIO);
1791 	return (0);
1792 }
1793 
1794 static int
1795 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1796 {
1797 	struct ixl_pf *pf = iflib_get_softc(ctx);
1798 	struct ifdrv *ifd = (struct ifdrv *)data;
1799 	int error = 0;
1800 
1801 	/*
1802 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1803 	 * performing privilege checks. It is important that this function
1804 	 * perform the necessary checks for commands which should only be
1805 	 * executed by privileged threads.
1806 	 */
1807 
1808 	switch(command) {
1809 	case SIOCGDRVSPEC:
1810 	case SIOCSDRVSPEC:
1811 		/* NVM update command */
1812 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1813 			error = priv_check(curthread, PRIV_DRIVER);
1814 			if (error)
1815 				break;
1816 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1817 		} else {
1818 			error = EINVAL;
1819 		}
1820 		break;
1821 	default:
1822 		error = EOPNOTSUPP;
1823 	}
1824 
1825 	return (error);
1826 }
1827 
1828 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1829  * @ctx: iflib context
1830  * @event: event code to check
1831  *
1832  * Defaults to returning false for every event.
1833  *
1834  * @returns true if iflib needs to reinit the interface, false otherwise
1835  */
1836 static bool
1837 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1838 {
1839 	switch (event) {
1840 	case IFLIB_RESTART_VLAN_CONFIG:
1841 	default:
1842 		return (false);
1843 	}
1844 }
1845 
1846 /*
1847  * Sanity check and save off tunable values.
1848  */
1849 static void
1850 ixl_save_pf_tunables(struct ixl_pf *pf)
1851 {
1852 	device_t dev = pf->dev;
1853 
1854 	/* Save tunable information */
1855 #ifdef IXL_DEBUG_FC
1856 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1857 #endif
1858 #ifdef IXL_DEBUG
1859 	pf->recovery_mode = ixl_debug_recovery_mode;
1860 #endif
1861 	pf->dbg_mask = ixl_core_debug_mask;
1862 	pf->hw.debug_mask = ixl_shared_debug_mask;
1863 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1864 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1865 #if 0
1866 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1867 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1868 #endif
1869 
1870 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1871 		pf->i2c_access_method = 0;
1872 	else
1873 		pf->i2c_access_method = ixl_i2c_access_method;
1874 
1875 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1876 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1877 		    ixl_tx_itr);
1878 		device_printf(dev, "tx_itr must be between %d and %d, "
1879 		    "inclusive\n",
1880 		    0, IXL_MAX_ITR);
1881 		device_printf(dev, "Using default value of %d instead\n",
1882 		    IXL_ITR_4K);
1883 		pf->tx_itr = IXL_ITR_4K;
1884 	} else
1885 		pf->tx_itr = ixl_tx_itr;
1886 
1887 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1888 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1889 		    ixl_rx_itr);
1890 		device_printf(dev, "rx_itr must be between %d and %d, "
1891 		    "inclusive\n",
1892 		    0, IXL_MAX_ITR);
1893 		device_printf(dev, "Using default value of %d instead\n",
1894 		    IXL_ITR_8K);
1895 		pf->rx_itr = IXL_ITR_8K;
1896 	} else
1897 		pf->rx_itr = ixl_rx_itr;
1898 
1899 	pf->fc = -1;
1900 	if (ixl_flow_control != -1) {
1901 		if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1902 			device_printf(dev,
1903 			    "Invalid flow_control value of %d set!\n",
1904 			    ixl_flow_control);
1905 			device_printf(dev,
1906 			    "flow_control must be between %d and %d, "
1907 			    "inclusive\n", 0, 3);
1908 			device_printf(dev,
1909 			    "Using default configuration instead\n");
1910 		} else
1911 			pf->fc = ixl_flow_control;
1912 	}
1913 }
1914 
1915