xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 6a3095aa6d0350dda89bac66d26f22a01e2257c4)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	3
52 #define IXL_DRIVER_VERSION_BUILD	0
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89 	/* required last entry */
90 	PVID_END
91 };
92 
93 /*********************************************************************
94  *  Function prototypes
95  *********************************************************************/
96 /*** IFLIB interface ***/
97 static void	*ixl_register(device_t dev);
98 static int	 ixl_if_attach_pre(if_ctx_t ctx);
99 static int	 ixl_if_attach_post(if_ctx_t ctx);
100 static int	 ixl_if_detach(if_ctx_t ctx);
101 static int	 ixl_if_shutdown(if_ctx_t ctx);
102 static int	 ixl_if_suspend(if_ctx_t ctx);
103 static int	 ixl_if_resume(if_ctx_t ctx);
104 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
105 static void	 ixl_if_enable_intr(if_ctx_t ctx);
106 static void	 ixl_if_disable_intr(if_ctx_t ctx);
107 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
108 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
109 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
110 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
111 static void	 ixl_if_queues_free(if_ctx_t ctx);
112 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
113 static void	 ixl_if_multi_set(if_ctx_t ctx);
114 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
115 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
116 static int	 ixl_if_media_change(if_ctx_t ctx);
117 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
118 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
119 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
120 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
121 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
122 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
123 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
125 #ifdef PCI_IOV
126 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
127 #endif
128 
129 /*** Other ***/
130 static void	 ixl_save_pf_tunables(struct ixl_pf *);
131 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
132 static void	 ixl_setup_ssctx(struct ixl_pf *pf);
133 static void	 ixl_admin_timer(void *arg);
134 
135 /*********************************************************************
136  *  FreeBSD Device Interface Entry Points
137  *********************************************************************/
138 
139 static device_method_t ixl_methods[] = {
140 	/* Device interface */
141 	DEVMETHOD(device_register, ixl_register),
142 	DEVMETHOD(device_probe, iflib_device_probe),
143 	DEVMETHOD(device_attach, iflib_device_attach),
144 	DEVMETHOD(device_detach, iflib_device_detach),
145 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
146 #ifdef PCI_IOV
147 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
148 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
149 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
150 #endif
151 	DEVMETHOD_END
152 };
153 
154 static driver_t ixl_driver = {
155 	"ixl", ixl_methods, sizeof(struct ixl_pf),
156 };
157 
158 devclass_t ixl_devclass;
159 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
160 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
161 MODULE_VERSION(ixl, 3);
162 
163 MODULE_DEPEND(ixl, pci, 1, 1, 1);
164 MODULE_DEPEND(ixl, ether, 1, 1, 1);
165 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
166 
167 static device_method_t ixl_if_methods[] = {
168 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
169 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
170 	DEVMETHOD(ifdi_detach, ixl_if_detach),
171 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
172 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
173 	DEVMETHOD(ifdi_resume, ixl_if_resume),
174 	DEVMETHOD(ifdi_init, ixl_if_init),
175 	DEVMETHOD(ifdi_stop, ixl_if_stop),
176 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
177 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
178 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
179 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
180 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
181 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
182 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
183 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
184 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
185 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
186 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
187 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
188 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
189 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
190 	DEVMETHOD(ifdi_timer, ixl_if_timer),
191 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
192 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
193 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
194 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
195 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
196 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
197 #ifdef PCI_IOV
198 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
199 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
200 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
201 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
202 #endif
203 	// ifdi_led_func
204 	// ifdi_debug
205 	DEVMETHOD_END
206 };
207 
208 static driver_t ixl_if_driver = {
209 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
210 };
211 
212 /*
213 ** TUNEABLE PARAMETERS:
214 */
215 
216 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
217     "ixl driver parameters");
218 
219 #ifdef IXL_DEBUG_FC
220 /*
221  * Leave this on unless you need to send flow control
222  * frames (or other control frames) from software
223  */
224 static int ixl_enable_tx_fc_filter = 1;
225 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
226     &ixl_enable_tx_fc_filter);
227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
228     &ixl_enable_tx_fc_filter, 0,
229     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
230 #endif
231 
232 #ifdef IXL_DEBUG
233 static int ixl_debug_recovery_mode = 0;
234 TUNABLE_INT("hw.ixl.debug_recovery_mode",
235     &ixl_debug_recovery_mode);
236 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
237     &ixl_debug_recovery_mode, 0,
238     "Act like when FW entered recovery mode (for debuging)");
239 #endif
240 
241 static int ixl_i2c_access_method = 0;
242 TUNABLE_INT("hw.ixl.i2c_access_method",
243     &ixl_i2c_access_method);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
245     &ixl_i2c_access_method, 0,
246     IXL_SYSCTL_HELP_I2C_METHOD);
247 
248 static int ixl_enable_vf_loopback = 1;
249 TUNABLE_INT("hw.ixl.enable_vf_loopback",
250     &ixl_enable_vf_loopback);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
252     &ixl_enable_vf_loopback, 0,
253     IXL_SYSCTL_HELP_VF_LOOPBACK);
254 
255 /*
256  * Different method for processing TX descriptor
257  * completion.
258  */
259 static int ixl_enable_head_writeback = 1;
260 TUNABLE_INT("hw.ixl.enable_head_writeback",
261     &ixl_enable_head_writeback);
262 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
263     &ixl_enable_head_writeback, 0,
264     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
265 
266 static int ixl_core_debug_mask = 0;
267 TUNABLE_INT("hw.ixl.core_debug_mask",
268     &ixl_core_debug_mask);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
270     &ixl_core_debug_mask, 0,
271     "Display debug statements that are printed in non-shared code");
272 
273 static int ixl_shared_debug_mask = 0;
274 TUNABLE_INT("hw.ixl.shared_debug_mask",
275     &ixl_shared_debug_mask);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
277     &ixl_shared_debug_mask, 0,
278     "Display debug statements that are printed in shared code");
279 
280 #if 0
281 /*
282 ** Controls for Interrupt Throttling
283 **	- true/false for dynamic adjustment
284 ** 	- default values for static ITR
285 */
286 static int ixl_dynamic_rx_itr = 0;
287 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
289     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
290 
291 static int ixl_dynamic_tx_itr = 0;
292 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
294     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
295 #endif
296 
297 static int ixl_rx_itr = IXL_ITR_8K;
298 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
299 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
300     &ixl_rx_itr, 0, "RX Interrupt Rate");
301 
302 static int ixl_tx_itr = IXL_ITR_4K;
303 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
304 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
305     &ixl_tx_itr, 0, "TX Interrupt Rate");
306 
307 #ifdef IXL_IW
308 int ixl_enable_iwarp = 0;
309 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
310 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
311     &ixl_enable_iwarp, 0, "iWARP enabled");
312 
313 #if __FreeBSD_version < 1100000
314 int ixl_limit_iwarp_msix = 1;
315 #else
316 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
317 #endif
318 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
319 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
320     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
321 #endif
322 
323 extern struct if_txrx ixl_txrx_hwb;
324 extern struct if_txrx ixl_txrx_dwb;
325 
326 static struct if_shared_ctx ixl_sctx_init = {
327 	.isc_magic = IFLIB_MAGIC,
328 	.isc_q_align = PAGE_SIZE,
329 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
330 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
331 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
332 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
333 	.isc_rx_maxsize = 16384,
334 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
335 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
336 	.isc_nfl = 1,
337 	.isc_ntxqs = 1,
338 	.isc_nrxqs = 1,
339 
340 	.isc_admin_intrcnt = 1,
341 	.isc_vendor_info = ixl_vendor_info_array,
342 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
343 	.isc_driver = &ixl_if_driver,
344 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
345 
346 	.isc_nrxd_min = {IXL_MIN_RING},
347 	.isc_ntxd_min = {IXL_MIN_RING},
348 	.isc_nrxd_max = {IXL_MAX_RING},
349 	.isc_ntxd_max = {IXL_MAX_RING},
350 	.isc_nrxd_default = {IXL_DEFAULT_RING},
351 	.isc_ntxd_default = {IXL_DEFAULT_RING},
352 };
353 
354 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
355 
356 /*** Functions ***/
357 static void *
358 ixl_register(device_t dev)
359 {
360 	return (ixl_sctx);
361 }
362 
363 static int
364 ixl_allocate_pci_resources(struct ixl_pf *pf)
365 {
366 	device_t dev = iflib_get_dev(pf->vsi.ctx);
367 	struct i40e_hw *hw = &pf->hw;
368 	int             rid;
369 
370 	/* Map BAR0 */
371 	rid = PCIR_BAR(0);
372 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
373 	    &rid, RF_ACTIVE);
374 
375 	if (!(pf->pci_mem)) {
376 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
377 		return (ENXIO);
378 	}
379 
380 	/* Save off the PCI information */
381 	hw->vendor_id = pci_get_vendor(dev);
382 	hw->device_id = pci_get_device(dev);
383 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
384 	hw->subsystem_vendor_id =
385 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
386 	hw->subsystem_device_id =
387 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
388 
389 	hw->bus.device = pci_get_slot(dev);
390 	hw->bus.func = pci_get_function(dev);
391 
392 	/* Save off register access information */
393 	pf->osdep.mem_bus_space_tag =
394 		rman_get_bustag(pf->pci_mem);
395 	pf->osdep.mem_bus_space_handle =
396 		rman_get_bushandle(pf->pci_mem);
397 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
398 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
399 	pf->osdep.dev = dev;
400 
401 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
402 	pf->hw.back = &pf->osdep;
403 
404  	return (0);
405 }
406 
407 static void
408 ixl_setup_ssctx(struct ixl_pf *pf)
409 {
410 	if_softc_ctx_t scctx = pf->vsi.shared;
411 	struct i40e_hw *hw = &pf->hw;
412 
413 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
414 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
415 		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
416 	} else if (hw->mac.type == I40E_MAC_X722)
417 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
418 	else
419 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
420 
421 	if (pf->vsi.enable_head_writeback) {
422 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
423 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
424 		scctx->isc_txrx = &ixl_txrx_hwb;
425 	} else {
426 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
427 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
428 		scctx->isc_txrx = &ixl_txrx_dwb;
429 	}
430 
431 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
432 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
433 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
434 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
435 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
436 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
437 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
438 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
439 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
440 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
441 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
442 }
443 
444 static void
445 ixl_admin_timer(void *arg)
446 {
447 	struct ixl_pf *pf = (struct ixl_pf *)arg;
448 
449 	/* Fire off the admin task */
450 	iflib_admin_intr_deferred(pf->vsi.ctx);
451 
452 	/* Reschedule the admin timer */
453 	callout_schedule(&pf->admin_timer, hz/2);
454 }
455 
456 static int
457 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
458 {
459 	struct ixl_vsi *vsi = &pf->vsi;
460 	struct i40e_hw *hw = &pf->hw;
461 	device_t dev = pf->dev;
462 
463 	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
464 
465 	i40e_get_mac_addr(hw, hw->mac.addr);
466 
467 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
468 		ixl_configure_intr0_msix(pf);
469 		ixl_enable_intr0(hw);
470 	}
471 
472 	ixl_setup_ssctx(pf);
473 
474 	return (0);
475 }
476 
477 static int
478 ixl_if_attach_pre(if_ctx_t ctx)
479 {
480 	device_t dev;
481 	struct ixl_pf *pf;
482 	struct i40e_hw *hw;
483 	struct ixl_vsi *vsi;
484 	enum i40e_get_fw_lldp_status_resp lldp_status;
485 	struct i40e_filter_control_settings filter;
486 	enum i40e_status_code status;
487 	int error = 0;
488 
489 	dev = iflib_get_dev(ctx);
490 	pf = iflib_get_softc(ctx);
491 
492 	INIT_DBG_DEV(dev, "begin");
493 
494 	vsi = &pf->vsi;
495 	vsi->back = pf;
496 	pf->dev = dev;
497 	hw = &pf->hw;
498 
499 	vsi->dev = dev;
500 	vsi->hw = &pf->hw;
501 	vsi->id = 0;
502 	vsi->num_vlans = 0;
503 	vsi->ctx = ctx;
504 	vsi->media = iflib_get_media(ctx);
505 	vsi->shared = iflib_get_softc_ctx(ctx);
506 
507 	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
508 	    "%s:admin", device_get_nameunit(dev));
509 	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
510 	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
511 
512 	/* Save tunable values */
513 	ixl_save_pf_tunables(pf);
514 
515 	/* Do PCI setup - map BAR0, etc */
516 	if (ixl_allocate_pci_resources(pf)) {
517 		device_printf(dev, "Allocation of PCI resources failed\n");
518 		error = ENXIO;
519 		goto err_pci_res;
520 	}
521 
522 	/* Establish a clean starting point */
523 	i40e_clear_hw(hw);
524 	i40e_set_mac_type(hw);
525 
526 	error = ixl_pf_reset(pf);
527 	if (error)
528 		goto err_out;
529 
530 	/* Initialize the shared code */
531 	status = i40e_init_shared_code(hw);
532 	if (status) {
533 		device_printf(dev, "Unable to initialize shared code, error %s\n",
534 		    i40e_stat_str(hw, status));
535 		error = EIO;
536 		goto err_out;
537 	}
538 
539 	/* Set up the admin queue */
540 	hw->aq.num_arq_entries = IXL_AQ_LEN;
541 	hw->aq.num_asq_entries = IXL_AQ_LEN;
542 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
543 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
544 
545 	status = i40e_init_adminq(hw);
546 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
547 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
548 		    i40e_stat_str(hw, status));
549 		error = EIO;
550 		goto err_out;
551 	}
552 	ixl_print_nvm_version(pf);
553 
554 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
555 		device_printf(dev, "The driver for the device stopped "
556 		    "because the NVM image is newer than expected.\n");
557 		device_printf(dev, "You must install the most recent version of "
558 		    "the network driver.\n");
559 		error = EIO;
560 		goto err_out;
561 	}
562 
563         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
564 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
565 		device_printf(dev, "The driver for the device detected "
566 		    "a newer version of the NVM image than expected.\n");
567 		device_printf(dev, "Please install the most recent version "
568 		    "of the network driver.\n");
569 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
570 		device_printf(dev, "The driver for the device detected "
571 		    "an older version of the NVM image than expected.\n");
572 		device_printf(dev, "Please update the NVM image.\n");
573 	}
574 
575 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
576 		error = ixl_attach_pre_recovery_mode(pf);
577 		if (error)
578 			goto err_out;
579 		return (error);
580 	}
581 
582 	/* Clear PXE mode */
583 	i40e_clear_pxe_mode(hw);
584 
585 	/* Get capabilities from the device */
586 	error = ixl_get_hw_capabilities(pf);
587 	if (error) {
588 		device_printf(dev, "get_hw_capabilities failed: %d\n",
589 		    error);
590 		goto err_get_cap;
591 	}
592 
593 	/* Set up host memory cache */
594 	error = ixl_setup_hmc(pf);
595 	if (error)
596 		goto err_mac_hmc;
597 
598 	/* Disable LLDP from the firmware for certain NVM versions */
599 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
600 	    (pf->hw.aq.fw_maj_ver < 4)) {
601 		i40e_aq_stop_lldp(hw, true, false, NULL);
602 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
603 	}
604 
605 	/* Try enabling Energy Efficient Ethernet (EEE) mode */
606 	if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
607 		atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
608 	else
609 		atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
610 
611 	/* Get MAC addresses from hardware */
612 	i40e_get_mac_addr(hw, hw->mac.addr);
613 	error = i40e_validate_mac_addr(hw->mac.addr);
614 	if (error) {
615 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
616 		goto err_mac_hmc;
617 	}
618 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
619 	iflib_set_mac(ctx, hw->mac.addr);
620 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
621 
622 	/* Set up the device filtering */
623 	bzero(&filter, sizeof(filter));
624 	filter.enable_ethtype = TRUE;
625 	filter.enable_macvlan = TRUE;
626 	filter.enable_fdir = FALSE;
627 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
628 	if (i40e_set_filter_control(hw, &filter))
629 		device_printf(dev, "i40e_set_filter_control() failed\n");
630 
631 	/* Query device FW LLDP status */
632 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
633 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
634 			atomic_set_32(&pf->state,
635 			    IXL_PF_STATE_FW_LLDP_DISABLED);
636 		} else {
637 			atomic_clear_32(&pf->state,
638 			    IXL_PF_STATE_FW_LLDP_DISABLED);
639 		}
640 	}
641 
642 	/* Tell FW to apply DCB config on link up */
643 	i40e_aq_set_dcb_parameters(hw, true, NULL);
644 
645 	/* Fill out iflib parameters */
646 	ixl_setup_ssctx(pf);
647 
648 	INIT_DBG_DEV(dev, "end");
649 	return (0);
650 
651 err_mac_hmc:
652 	ixl_shutdown_hmc(pf);
653 err_get_cap:
654 	i40e_shutdown_adminq(hw);
655 err_out:
656 	ixl_free_pci_resources(pf);
657 err_pci_res:
658 	mtx_lock(&pf->admin_mtx);
659 	callout_stop(&pf->admin_timer);
660 	mtx_unlock(&pf->admin_mtx);
661 	mtx_destroy(&pf->admin_mtx);
662 	return (error);
663 }
664 
665 static int
666 ixl_if_attach_post(if_ctx_t ctx)
667 {
668 	device_t dev;
669 	struct ixl_pf *pf;
670 	struct i40e_hw *hw;
671 	struct ixl_vsi *vsi;
672 	int error = 0;
673 	enum i40e_status_code status;
674 
675 	dev = iflib_get_dev(ctx);
676 	pf = iflib_get_softc(ctx);
677 
678 	INIT_DBG_DEV(dev, "begin");
679 
680 	vsi = &pf->vsi;
681 	vsi->ifp = iflib_get_ifp(ctx);
682 	hw = &pf->hw;
683 
684 	/* Save off determined number of queues for interface */
685 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
686 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
687 
688 	/* Setup OS network interface / ifnet */
689 	if (ixl_setup_interface(dev, pf)) {
690 		device_printf(dev, "interface setup failed!\n");
691 		error = EIO;
692 		goto err;
693 	}
694 
695 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
696 		/* Keep admin queue interrupts active while driver is loaded */
697 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
698 			ixl_configure_intr0_msix(pf);
699 			ixl_enable_intr0(hw);
700 		}
701 
702 		ixl_add_sysctls_recovery_mode(pf);
703 
704 		/* Start the admin timer */
705 		mtx_lock(&pf->admin_mtx);
706 		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
707 		mtx_unlock(&pf->admin_mtx);
708 		return (0);
709 	}
710 
711 	/* Determine link state */
712 	if (ixl_attach_get_link_status(pf)) {
713 		error = EINVAL;
714 		goto err;
715 	}
716 
717 	error = ixl_switch_config(pf);
718 	if (error) {
719 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
720 		     error);
721 		goto err;
722 	}
723 
724 	/* Add protocol filters to list */
725 	ixl_init_filters(vsi);
726 
727 	/* Init queue allocation manager */
728 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
729 	if (error) {
730 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
731 		    error);
732 		goto err;
733 	}
734 	/* reserve a contiguous allocation for the PF's VSI */
735 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
736 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
737 	if (error) {
738 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
739 		    error);
740 		goto err;
741 	}
742 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
743 	    pf->qtag.num_allocated, pf->qtag.num_active);
744 
745 	/* Limit PHY interrupts to link, autoneg, and modules failure */
746 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
747 	    NULL);
748         if (status) {
749 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
750 		    " aq_err %s\n", i40e_stat_str(hw, status),
751 		    i40e_aq_str(hw, hw->aq.asq_last_status));
752 		goto err;
753 	}
754 
755 	/* Get the bus configuration and set the shared code */
756 	ixl_get_bus_info(pf);
757 
758 	/* Keep admin queue interrupts active while driver is loaded */
759 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
760  		ixl_configure_intr0_msix(pf);
761  		ixl_enable_intr0(hw);
762 	}
763 
764 	/* Set initial advertised speed sysctl value */
765 	ixl_set_initial_advertised_speeds(pf);
766 
767 	/* Initialize statistics & add sysctls */
768 	ixl_add_device_sysctls(pf);
769 	ixl_pf_reset_stats(pf);
770 	ixl_update_stats_counters(pf);
771 	ixl_add_hw_stats(pf);
772 
773 	/*
774 	 * Driver may have been reloaded. Ensure that the link state
775 	 * is consistent with current settings.
776 	 */
777 	ixl_set_link(pf, (pf->state & IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) != 0);
778 
779 	hw->phy.get_link_info = true;
780 	i40e_get_link_status(hw, &pf->link_up);
781 	ixl_update_link_status(pf);
782 
783 #ifdef PCI_IOV
784 	ixl_initialize_sriov(pf);
785 #endif
786 
787 #ifdef IXL_IW
788 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
789 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
790 		if (pf->iw_enabled) {
791 			error = ixl_iw_pf_attach(pf);
792 			if (error) {
793 				device_printf(dev,
794 				    "interfacing to iWARP driver failed: %d\n",
795 				    error);
796 				goto err;
797 			} else
798 				device_printf(dev, "iWARP ready\n");
799 		} else
800 			device_printf(dev, "iWARP disabled on this device "
801 			    "(no MSI-X vectors)\n");
802 	} else {
803 		pf->iw_enabled = false;
804 		device_printf(dev, "The device is not iWARP enabled\n");
805 	}
806 #endif
807 	/* Start the admin timer */
808 	mtx_lock(&pf->admin_mtx);
809 	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
810 	mtx_unlock(&pf->admin_mtx);
811 
812 	INIT_DBG_DEV(dev, "end");
813 	return (0);
814 
815 err:
816 	INIT_DEBUGOUT("end: error %d", error);
817 	/* ixl_if_detach() is called on error from this */
818 	return (error);
819 }
820 
821 /**
822  * XXX: iflib always ignores the return value of detach()
823  * -> This means that this isn't allowed to fail
824  */
825 static int
826 ixl_if_detach(if_ctx_t ctx)
827 {
828 	struct ixl_pf *pf = iflib_get_softc(ctx);
829 	struct ixl_vsi *vsi = &pf->vsi;
830 	struct i40e_hw *hw = &pf->hw;
831 	device_t dev = pf->dev;
832 	enum i40e_status_code	status;
833 #ifdef IXL_IW
834 	int			error;
835 #endif
836 
837 	INIT_DBG_DEV(dev, "begin");
838 
839 	/* Stop the admin timer */
840 	mtx_lock(&pf->admin_mtx);
841 	callout_stop(&pf->admin_timer);
842 	mtx_unlock(&pf->admin_mtx);
843 	mtx_destroy(&pf->admin_mtx);
844 
845 #ifdef IXL_IW
846 	if (ixl_enable_iwarp && pf->iw_enabled) {
847 		error = ixl_iw_pf_detach(pf);
848 		if (error == EBUSY) {
849 			device_printf(dev, "iwarp in use; stop it first.\n");
850 			//return (error);
851 		}
852 	}
853 #endif
854 	/* Remove all previously allocated media types */
855 	ifmedia_removeall(vsi->media);
856 
857 	/* Shutdown LAN HMC */
858 	ixl_shutdown_hmc(pf);
859 
860 	/* Shutdown admin queue */
861 	ixl_disable_intr0(hw);
862 	status = i40e_shutdown_adminq(hw);
863 	if (status)
864 		device_printf(dev,
865 		    "i40e_shutdown_adminq() failed with status %s\n",
866 		    i40e_stat_str(hw, status));
867 
868 	ixl_pf_qmgr_destroy(&pf->qmgr);
869 	ixl_free_pci_resources(pf);
870 	ixl_free_filters(&vsi->ftl);
871 	INIT_DBG_DEV(dev, "end");
872 	return (0);
873 }
874 
875 static int
876 ixl_if_shutdown(if_ctx_t ctx)
877 {
878 	int error = 0;
879 
880 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
881 
882 	/* TODO: Call ixl_if_stop()? */
883 
884 	/* TODO: Then setup low power mode */
885 
886 	return (error);
887 }
888 
889 static int
890 ixl_if_suspend(if_ctx_t ctx)
891 {
892 	int error = 0;
893 
894 	INIT_DEBUGOUT("ixl_if_suspend: begin");
895 
896 	/* TODO: Call ixl_if_stop()? */
897 
898 	/* TODO: Then setup low power mode */
899 
900 	return (error);
901 }
902 
903 static int
904 ixl_if_resume(if_ctx_t ctx)
905 {
906 	struct ifnet *ifp = iflib_get_ifp(ctx);
907 
908 	INIT_DEBUGOUT("ixl_if_resume: begin");
909 
910 	/* Read & clear wake-up registers */
911 
912 	/* Required after D3->D0 transition */
913 	if (ifp->if_flags & IFF_UP)
914 		ixl_if_init(ctx);
915 
916 	return (0);
917 }
918 
919 void
920 ixl_if_init(if_ctx_t ctx)
921 {
922 	struct ixl_pf *pf = iflib_get_softc(ctx);
923 	struct ixl_vsi *vsi = &pf->vsi;
924 	struct i40e_hw	*hw = &pf->hw;
925 	struct ifnet *ifp = iflib_get_ifp(ctx);
926 	device_t 	dev = iflib_get_dev(ctx);
927 	u8		tmpaddr[ETHER_ADDR_LEN];
928 	int		ret;
929 
930 	if (IXL_PF_IN_RECOVERY_MODE(pf))
931 		return;
932 	/*
933 	 * If the aq is dead here, it probably means something outside of the driver
934 	 * did something to the adapter, like a PF reset.
935 	 * So, rebuild the driver's state here if that occurs.
936 	 */
937 	if (!i40e_check_asq_alive(&pf->hw)) {
938 		device_printf(dev, "Admin Queue is down; resetting...\n");
939 		ixl_teardown_hw_structs(pf);
940 		ixl_rebuild_hw_structs_after_reset(pf, false);
941 	}
942 
943 	/* Get the latest mac address... User might use a LAA */
944 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
945 	if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
946 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
947 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
948 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
949 		ret = i40e_aq_mac_address_write(hw,
950 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
951 		    hw->mac.addr, NULL);
952 		if (ret) {
953 			device_printf(dev, "LLA address change failed!!\n");
954 			return;
955 		}
956 		/*
957 		 * New filters are configured by ixl_reconfigure_filters
958 		 * at the end of ixl_init_locked.
959 		 */
960 	}
961 
962 	iflib_set_mac(ctx, hw->mac.addr);
963 
964 	/* Prepare the VSI: rings, hmc contexts, etc... */
965 	if (ixl_initialize_vsi(vsi)) {
966 		device_printf(dev, "initialize vsi failed!!\n");
967 		return;
968 	}
969 
970 	ixl_set_link(pf, true);
971 
972 	/* Reconfigure multicast filters in HW */
973 	ixl_if_multi_set(ctx);
974 
975 	/* Set up RSS */
976 	ixl_config_rss(pf);
977 
978 	/* Set up MSI-X routing and the ITR settings */
979 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
980 		ixl_configure_queue_intr_msix(pf);
981 		ixl_configure_itr(pf);
982 	} else
983 		ixl_configure_legacy(pf);
984 
985 	if (vsi->enable_head_writeback)
986 		ixl_init_tx_cidx(vsi);
987 	else
988 		ixl_init_tx_rsqs(vsi);
989 
990 	ixl_enable_rings(vsi);
991 
992 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
993 
994 	/* Re-add configure filters to HW */
995 	ixl_reconfigure_filters(vsi);
996 
997 	/* Configure promiscuous mode */
998 	ixl_if_promisc_set(ctx, if_getflags(ifp));
999 
1000 #ifdef IXL_IW
1001 	if (ixl_enable_iwarp && pf->iw_enabled) {
1002 		ret = ixl_iw_pf_init(pf);
1003 		if (ret)
1004 			device_printf(dev,
1005 			    "initialize iwarp failed, code %d\n", ret);
1006 	}
1007 #endif
1008 }
1009 
1010 void
1011 ixl_if_stop(if_ctx_t ctx)
1012 {
1013 	struct ixl_pf *pf = iflib_get_softc(ctx);
1014 	struct ifnet *ifp = iflib_get_ifp(ctx);
1015 	struct ixl_vsi *vsi = &pf->vsi;
1016 
1017 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
1018 
1019 	if (IXL_PF_IN_RECOVERY_MODE(pf))
1020 		return;
1021 
1022 	// TODO: This may need to be reworked
1023 #ifdef IXL_IW
1024 	/* Stop iWARP device */
1025 	if (ixl_enable_iwarp && pf->iw_enabled)
1026 		ixl_iw_pf_stop(pf);
1027 #endif
1028 
1029 	ixl_disable_rings_intr(vsi);
1030 	ixl_disable_rings(pf, vsi, &pf->qtag);
1031 
1032 	/*
1033 	 * Don't set link state if only reconfiguring
1034 	 * e.g. on MTU change.
1035 	 */
1036 	if ((if_getflags(ifp) & IFF_UP) == 0 &&
1037 	    (atomic_load_acq_32(&pf->state) &
1038 	    IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) == 0)
1039 		ixl_set_link(pf, false);
1040 }
1041 
1042 static int
1043 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1044 {
1045 	struct ixl_pf *pf = iflib_get_softc(ctx);
1046 	struct ixl_vsi *vsi = &pf->vsi;
1047 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1048 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1049 	int err, i, rid, vector = 0;
1050 	char buf[16];
1051 
1052 	MPASS(vsi->shared->isc_nrxqsets > 0);
1053 	MPASS(vsi->shared->isc_ntxqsets > 0);
1054 
1055 	/* Admin Que must use vector 0*/
1056 	rid = vector + 1;
1057 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1058 	    ixl_msix_adminq, pf, 0, "aq");
1059 	if (err) {
1060 		iflib_irq_free(ctx, &vsi->irq);
1061 		device_printf(iflib_get_dev(ctx),
1062 		    "Failed to register Admin Que handler");
1063 		return (err);
1064 	}
1065 	/* Create soft IRQ for handling VFLRs */
1066 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1067 
1068 	/* Now set up the stations */
1069 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1070 		rid = vector + 1;
1071 
1072 		snprintf(buf, sizeof(buf), "rxq%d", i);
1073 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1074 		    IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1075 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1076 		 * what's expected in the iflib context? */
1077 		if (err) {
1078 			device_printf(iflib_get_dev(ctx),
1079 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1080 			vsi->num_rx_queues = i + 1;
1081 			goto fail;
1082 		}
1083 		rx_que->msix = vector;
1084 	}
1085 
1086 	bzero(buf, sizeof(buf));
1087 
1088 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1089 		snprintf(buf, sizeof(buf), "txq%d", i);
1090 		iflib_softirq_alloc_generic(ctx,
1091 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1092 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1093 
1094 		/* TODO: Maybe call a strategy function for this to figure out which
1095 		* interrupts to map Tx queues to. I don't know if there's an immediately
1096 		* better way than this other than a user-supplied map, though. */
1097 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1098 	}
1099 
1100 	return (0);
1101 fail:
1102 	iflib_irq_free(ctx, &vsi->irq);
1103 	rx_que = vsi->rx_queues;
1104 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1105 		iflib_irq_free(ctx, &rx_que->que_irq);
1106 	return (err);
1107 }
1108 
1109 /*
1110  * Enable all interrupts
1111  *
1112  * Called in:
1113  * iflib_init_locked, after ixl_if_init()
1114  */
1115 static void
1116 ixl_if_enable_intr(if_ctx_t ctx)
1117 {
1118 	struct ixl_pf *pf = iflib_get_softc(ctx);
1119 	struct ixl_vsi *vsi = &pf->vsi;
1120 	struct i40e_hw		*hw = vsi->hw;
1121 	struct ixl_rx_queue	*que = vsi->rx_queues;
1122 
1123 	ixl_enable_intr0(hw);
1124 	/* Enable queue interrupts */
1125 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1126 		/* TODO: Queue index parameter is probably wrong */
1127 		ixl_enable_queue(hw, que->rxr.me);
1128 }
1129 
1130 /*
1131  * Disable queue interrupts
1132  *
1133  * Other interrupt causes need to remain active.
1134  */
1135 static void
1136 ixl_if_disable_intr(if_ctx_t ctx)
1137 {
1138 	struct ixl_pf *pf = iflib_get_softc(ctx);
1139 	struct ixl_vsi *vsi = &pf->vsi;
1140 	struct i40e_hw		*hw = vsi->hw;
1141 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1142 
1143 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1144 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1145 			ixl_disable_queue(hw, rx_que->msix - 1);
1146 	} else {
1147 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1148 		// stops queues from triggering interrupts
1149 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1150 	}
1151 }
1152 
1153 static int
1154 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1155 {
1156 	struct ixl_pf *pf = iflib_get_softc(ctx);
1157 	struct ixl_vsi *vsi = &pf->vsi;
1158 	struct i40e_hw		*hw = vsi->hw;
1159 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1160 
1161 	ixl_enable_queue(hw, rx_que->msix - 1);
1162 	return (0);
1163 }
1164 
1165 static int
1166 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1167 {
1168 	struct ixl_pf *pf = iflib_get_softc(ctx);
1169 	struct ixl_vsi *vsi = &pf->vsi;
1170 	struct i40e_hw *hw = vsi->hw;
1171 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1172 
1173 	ixl_enable_queue(hw, tx_que->msix - 1);
1174 	return (0);
1175 }
1176 
1177 static int
1178 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1179 {
1180 	struct ixl_pf *pf = iflib_get_softc(ctx);
1181 	struct ixl_vsi *vsi = &pf->vsi;
1182 	if_softc_ctx_t scctx = vsi->shared;
1183 	struct ixl_tx_queue *que;
1184 	int i, j, error = 0;
1185 
1186 	MPASS(scctx->isc_ntxqsets > 0);
1187 	MPASS(ntxqs == 1);
1188 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1189 
1190 	/* Allocate queue structure memory */
1191 	if (!(vsi->tx_queues =
1192 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1193 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1194 		return (ENOMEM);
1195 	}
1196 
1197 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1198 		struct tx_ring *txr = &que->txr;
1199 
1200 		txr->me = i;
1201 		que->vsi = vsi;
1202 
1203 		if (!vsi->enable_head_writeback) {
1204 			/* Allocate report status array */
1205 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1206 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1207 				error = ENOMEM;
1208 				goto fail;
1209 			}
1210 			/* Init report status array */
1211 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1212 				txr->tx_rsq[j] = QIDX_INVALID;
1213 		}
1214 		/* get the virtual and physical address of the hardware queues */
1215 		txr->tail = I40E_QTX_TAIL(txr->me);
1216 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1217 		txr->tx_paddr = paddrs[i * ntxqs];
1218 		txr->que = que;
1219 	}
1220 
1221 	return (0);
1222 fail:
1223 	ixl_if_queues_free(ctx);
1224 	return (error);
1225 }
1226 
1227 static int
1228 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1229 {
1230 	struct ixl_pf *pf = iflib_get_softc(ctx);
1231 	struct ixl_vsi *vsi = &pf->vsi;
1232 	struct ixl_rx_queue *que;
1233 	int i, error = 0;
1234 
1235 #ifdef INVARIANTS
1236 	if_softc_ctx_t scctx = vsi->shared;
1237 	MPASS(scctx->isc_nrxqsets > 0);
1238 	MPASS(nrxqs == 1);
1239 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1240 #endif
1241 
1242 	/* Allocate queue structure memory */
1243 	if (!(vsi->rx_queues =
1244 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1245 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1246 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1247 		error = ENOMEM;
1248 		goto fail;
1249 	}
1250 
1251 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1252 		struct rx_ring *rxr = &que->rxr;
1253 
1254 		rxr->me = i;
1255 		que->vsi = vsi;
1256 
1257 		/* get the virtual and physical address of the hardware queues */
1258 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1259 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1260 		rxr->rx_paddr = paddrs[i * nrxqs];
1261 		rxr->que = que;
1262 	}
1263 
1264 	return (0);
1265 fail:
1266 	ixl_if_queues_free(ctx);
1267 	return (error);
1268 }
1269 
1270 static void
1271 ixl_if_queues_free(if_ctx_t ctx)
1272 {
1273 	struct ixl_pf *pf = iflib_get_softc(ctx);
1274 	struct ixl_vsi *vsi = &pf->vsi;
1275 
1276 	if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1277 		struct ixl_tx_queue *que;
1278 		int i = 0;
1279 
1280 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1281 			struct tx_ring *txr = &que->txr;
1282 			if (txr->tx_rsq != NULL) {
1283 				free(txr->tx_rsq, M_IXL);
1284 				txr->tx_rsq = NULL;
1285 			}
1286 		}
1287 	}
1288 
1289 	if (vsi->tx_queues != NULL) {
1290 		free(vsi->tx_queues, M_IXL);
1291 		vsi->tx_queues = NULL;
1292 	}
1293 	if (vsi->rx_queues != NULL) {
1294 		free(vsi->rx_queues, M_IXL);
1295 		vsi->rx_queues = NULL;
1296 	}
1297 
1298 	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1299 		sysctl_ctx_free(&vsi->sysctl_ctx);
1300 }
1301 
1302 void
1303 ixl_update_link_status(struct ixl_pf *pf)
1304 {
1305 	struct ixl_vsi *vsi = &pf->vsi;
1306 	struct i40e_hw *hw = &pf->hw;
1307 	u64 baudrate;
1308 
1309 	if (pf->link_up) {
1310 		if (vsi->link_active == FALSE) {
1311 			vsi->link_active = TRUE;
1312 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1313 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1314 			ixl_link_up_msg(pf);
1315 #ifdef PCI_IOV
1316 			ixl_broadcast_link_state(pf);
1317 #endif
1318 		}
1319 	} else { /* Link down */
1320 		if (vsi->link_active == TRUE) {
1321 			vsi->link_active = FALSE;
1322 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1323 #ifdef PCI_IOV
1324 			ixl_broadcast_link_state(pf);
1325 #endif
1326 		}
1327 	}
1328 }
1329 
1330 static void
1331 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1332 {
1333 	device_t dev = pf->dev;
1334 	u32 rxq_idx, qtx_ctl;
1335 
1336 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1337 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1338 	qtx_ctl = e->desc.params.external.param1;
1339 
1340 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1341 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1342 }
1343 
1344 static int
1345 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1346 {
1347 	enum i40e_status_code status = I40E_SUCCESS;
1348 	struct i40e_arq_event_info event;
1349 	struct i40e_hw *hw = &pf->hw;
1350 	device_t dev = pf->dev;
1351 	u16 opcode;
1352 	u32 loop = 0, reg;
1353 
1354 	event.buf_len = IXL_AQ_BUF_SZ;
1355 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1356 	if (!event.msg_buf) {
1357 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1358 		    " Queue event!\n", __func__);
1359 		return (ENOMEM);
1360 	}
1361 
1362 	/* clean and process any events */
1363 	do {
1364 		status = i40e_clean_arq_element(hw, &event, pending);
1365 		if (status)
1366 			break;
1367 		opcode = LE16_TO_CPU(event.desc.opcode);
1368 		ixl_dbg(pf, IXL_DBG_AQ,
1369 		    "Admin Queue event: %#06x\n", opcode);
1370 		switch (opcode) {
1371 		case i40e_aqc_opc_get_link_status:
1372 			ixl_link_event(pf, &event);
1373 			break;
1374 		case i40e_aqc_opc_send_msg_to_pf:
1375 #ifdef PCI_IOV
1376 			ixl_handle_vf_msg(pf, &event);
1377 #endif
1378 			break;
1379 		/*
1380 		 * This should only occur on no-drop queues, which
1381 		 * aren't currently configured.
1382 		 */
1383 		case i40e_aqc_opc_event_lan_overflow:
1384 			ixl_handle_lan_overflow_event(pf, &event);
1385 			break;
1386 		default:
1387 			break;
1388 		}
1389 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1390 
1391 	free(event.msg_buf, M_IXL);
1392 
1393 	/* Re-enable admin queue interrupt cause */
1394 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1395 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1396 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1397 
1398 	return (status);
1399 }
1400 
1401 static void
1402 ixl_if_update_admin_status(if_ctx_t ctx)
1403 {
1404 	struct ixl_pf	*pf = iflib_get_softc(ctx);
1405 	struct i40e_hw	*hw = &pf->hw;
1406 	u16		pending;
1407 
1408 	if (IXL_PF_IS_RESETTING(pf))
1409 		ixl_handle_empr_reset(pf);
1410 
1411 	/*
1412 	 * Admin Queue is shut down while handling reset.
1413 	 * Don't proceed if it hasn't been re-initialized
1414 	 * e.g due to an issue with new FW.
1415 	 */
1416 	if (!i40e_check_asq_alive(&pf->hw))
1417 		return;
1418 
1419 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1420 		ixl_handle_mdd_event(pf);
1421 
1422 	ixl_process_adminq(pf, &pending);
1423 	ixl_update_link_status(pf);
1424 
1425 	/*
1426 	 * If there are still messages to process, reschedule ourselves.
1427 	 * Otherwise, re-enable our interrupt and go to sleep.
1428 	 */
1429 	if (pending > 0)
1430 		iflib_admin_intr_deferred(ctx);
1431 	else
1432 		ixl_enable_intr0(hw);
1433 }
1434 
1435 static void
1436 ixl_if_multi_set(if_ctx_t ctx)
1437 {
1438 	struct ixl_pf *pf = iflib_get_softc(ctx);
1439 	struct ixl_vsi *vsi = &pf->vsi;
1440 	struct i40e_hw *hw = vsi->hw;
1441 	int mcnt;
1442 
1443 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1444 
1445 	/* Delete filters for removed multicast addresses */
1446 	ixl_del_multi(vsi, false);
1447 
1448 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1449 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1450 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1451 		    vsi->seid, TRUE, NULL);
1452 		ixl_del_multi(vsi, true);
1453 		return;
1454 	}
1455 
1456 	ixl_add_multi(vsi);
1457 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1458 }
1459 
1460 static int
1461 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1462 {
1463 	struct ixl_pf *pf = iflib_get_softc(ctx);
1464 	struct ixl_vsi *vsi = &pf->vsi;
1465 
1466 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1467 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1468 		ETHER_VLAN_ENCAP_LEN)
1469 		return (EINVAL);
1470 
1471 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1472 		ETHER_VLAN_ENCAP_LEN;
1473 
1474 	return (0);
1475 }
1476 
1477 static void
1478 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1479 {
1480 	struct ixl_pf *pf = iflib_get_softc(ctx);
1481 	struct i40e_hw  *hw = &pf->hw;
1482 
1483 	INIT_DEBUGOUT("ixl_media_status: begin");
1484 
1485 	ifmr->ifm_status = IFM_AVALID;
1486 	ifmr->ifm_active = IFM_ETHER;
1487 
1488 	if (!pf->link_up) {
1489 		return;
1490 	}
1491 
1492 	ifmr->ifm_status |= IFM_ACTIVE;
1493 	/* Hardware is always full-duplex */
1494 	ifmr->ifm_active |= IFM_FDX;
1495 
1496 	switch (hw->phy.link_info.phy_type) {
1497 		/* 100 M */
1498 		case I40E_PHY_TYPE_100BASE_TX:
1499 			ifmr->ifm_active |= IFM_100_TX;
1500 			break;
1501 		/* 1 G */
1502 		case I40E_PHY_TYPE_1000BASE_T:
1503 			ifmr->ifm_active |= IFM_1000_T;
1504 			break;
1505 		case I40E_PHY_TYPE_1000BASE_SX:
1506 			ifmr->ifm_active |= IFM_1000_SX;
1507 			break;
1508 		case I40E_PHY_TYPE_1000BASE_LX:
1509 			ifmr->ifm_active |= IFM_1000_LX;
1510 			break;
1511 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1512 			ifmr->ifm_active |= IFM_1000_T;
1513 			break;
1514 		/* 2.5 G */
1515 		case I40E_PHY_TYPE_2_5GBASE_T:
1516 			ifmr->ifm_active |= IFM_2500_T;
1517 			break;
1518 		/* 5 G */
1519 		case I40E_PHY_TYPE_5GBASE_T:
1520 			ifmr->ifm_active |= IFM_5000_T;
1521 			break;
1522 		/* 10 G */
1523 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1524 			ifmr->ifm_active |= IFM_10G_TWINAX;
1525 			break;
1526 		case I40E_PHY_TYPE_10GBASE_SR:
1527 			ifmr->ifm_active |= IFM_10G_SR;
1528 			break;
1529 		case I40E_PHY_TYPE_10GBASE_LR:
1530 			ifmr->ifm_active |= IFM_10G_LR;
1531 			break;
1532 		case I40E_PHY_TYPE_10GBASE_T:
1533 			ifmr->ifm_active |= IFM_10G_T;
1534 			break;
1535 		case I40E_PHY_TYPE_XAUI:
1536 		case I40E_PHY_TYPE_XFI:
1537 			ifmr->ifm_active |= IFM_10G_TWINAX;
1538 			break;
1539 		case I40E_PHY_TYPE_10GBASE_AOC:
1540 			ifmr->ifm_active |= IFM_10G_AOC;
1541 			break;
1542 		/* 25 G */
1543 		case I40E_PHY_TYPE_25GBASE_KR:
1544 			ifmr->ifm_active |= IFM_25G_KR;
1545 			break;
1546 		case I40E_PHY_TYPE_25GBASE_CR:
1547 			ifmr->ifm_active |= IFM_25G_CR;
1548 			break;
1549 		case I40E_PHY_TYPE_25GBASE_SR:
1550 			ifmr->ifm_active |= IFM_25G_SR;
1551 			break;
1552 		case I40E_PHY_TYPE_25GBASE_LR:
1553 			ifmr->ifm_active |= IFM_25G_LR;
1554 			break;
1555 		case I40E_PHY_TYPE_25GBASE_AOC:
1556 			ifmr->ifm_active |= IFM_25G_AOC;
1557 			break;
1558 		case I40E_PHY_TYPE_25GBASE_ACC:
1559 			ifmr->ifm_active |= IFM_25G_ACC;
1560 			break;
1561 		/* 40 G */
1562 		case I40E_PHY_TYPE_40GBASE_CR4:
1563 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1564 			ifmr->ifm_active |= IFM_40G_CR4;
1565 			break;
1566 		case I40E_PHY_TYPE_40GBASE_SR4:
1567 			ifmr->ifm_active |= IFM_40G_SR4;
1568 			break;
1569 		case I40E_PHY_TYPE_40GBASE_LR4:
1570 			ifmr->ifm_active |= IFM_40G_LR4;
1571 			break;
1572 		case I40E_PHY_TYPE_XLAUI:
1573 			ifmr->ifm_active |= IFM_OTHER;
1574 			break;
1575 		case I40E_PHY_TYPE_1000BASE_KX:
1576 			ifmr->ifm_active |= IFM_1000_KX;
1577 			break;
1578 		case I40E_PHY_TYPE_SGMII:
1579 			ifmr->ifm_active |= IFM_1000_SGMII;
1580 			break;
1581 		/* ERJ: What's the difference between these? */
1582 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1583 		case I40E_PHY_TYPE_10GBASE_CR1:
1584 			ifmr->ifm_active |= IFM_10G_CR1;
1585 			break;
1586 		case I40E_PHY_TYPE_10GBASE_KX4:
1587 			ifmr->ifm_active |= IFM_10G_KX4;
1588 			break;
1589 		case I40E_PHY_TYPE_10GBASE_KR:
1590 			ifmr->ifm_active |= IFM_10G_KR;
1591 			break;
1592 		case I40E_PHY_TYPE_SFI:
1593 			ifmr->ifm_active |= IFM_10G_SFI;
1594 			break;
1595 		/* Our single 20G media type */
1596 		case I40E_PHY_TYPE_20GBASE_KR2:
1597 			ifmr->ifm_active |= IFM_20G_KR2;
1598 			break;
1599 		case I40E_PHY_TYPE_40GBASE_KR4:
1600 			ifmr->ifm_active |= IFM_40G_KR4;
1601 			break;
1602 		case I40E_PHY_TYPE_XLPPI:
1603 		case I40E_PHY_TYPE_40GBASE_AOC:
1604 			ifmr->ifm_active |= IFM_40G_XLPPI;
1605 			break;
1606 		/* Unknown to driver */
1607 		default:
1608 			ifmr->ifm_active |= IFM_UNKNOWN;
1609 			break;
1610 	}
1611 	/* Report flow control status as well */
1612 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1613 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1614 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1615 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1616 }
1617 
1618 static int
1619 ixl_if_media_change(if_ctx_t ctx)
1620 {
1621 	struct ifmedia *ifm = iflib_get_media(ctx);
1622 
1623 	INIT_DEBUGOUT("ixl_media_change: begin");
1624 
1625 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1626 		return (EINVAL);
1627 
1628 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1629 	return (ENODEV);
1630 }
1631 
1632 static int
1633 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1634 {
1635 	struct ixl_pf *pf = iflib_get_softc(ctx);
1636 	struct ixl_vsi *vsi = &pf->vsi;
1637 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1638 	struct i40e_hw	*hw = vsi->hw;
1639 	int		err;
1640 	bool		uni = FALSE, multi = FALSE;
1641 
1642 	if (flags & IFF_PROMISC)
1643 		uni = multi = TRUE;
1644 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1645 	    MAX_MULTICAST_ADDR)
1646 		multi = TRUE;
1647 
1648 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1649 	    vsi->seid, uni, NULL, true);
1650 	if (err)
1651 		return (err);
1652 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1653 	    vsi->seid, multi, NULL);
1654 	return (err);
1655 }
1656 
1657 static void
1658 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1659 {
1660 	struct ixl_pf *pf = iflib_get_softc(ctx);
1661 
1662 	if (qid != 0)
1663 		return;
1664 
1665 	ixl_update_stats_counters(pf);
1666 }
1667 
1668 static void
1669 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1670 {
1671 	struct ixl_pf *pf = iflib_get_softc(ctx);
1672 	struct ixl_vsi *vsi = &pf->vsi;
1673 	struct i40e_hw	*hw = vsi->hw;
1674 	if_t ifp = iflib_get_ifp(ctx);
1675 
1676 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1677 		return;
1678 
1679 	/*
1680 	 * Keep track of registered VLANS to know what
1681 	 * filters have to be configured when VLAN_HWFILTER
1682 	 * capability is enabled.
1683 	 */
1684 	++vsi->num_vlans;
1685 	bit_set(vsi->vlans_map, vtag);
1686 
1687 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1688 		return;
1689 
1690 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1691 		ixl_add_filter(vsi, hw->mac.addr, vtag);
1692 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1693 		/*
1694 		 * There is not enough HW resources to add filters
1695 		 * for all registered VLANs. Re-configure filtering
1696 		 * to allow reception of all expected traffic.
1697 		 */
1698 		device_printf(vsi->dev,
1699 		    "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1700 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1701 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1702 	}
1703 }
1704 
1705 static void
1706 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1707 {
1708 	struct ixl_pf *pf = iflib_get_softc(ctx);
1709 	struct ixl_vsi *vsi = &pf->vsi;
1710 	struct i40e_hw	*hw = vsi->hw;
1711 	if_t ifp = iflib_get_ifp(ctx);
1712 
1713 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1714 		return;
1715 
1716 	--vsi->num_vlans;
1717 	bit_clear(vsi->vlans_map, vtag);
1718 
1719 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1720 		return;
1721 
1722 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1723 		ixl_del_filter(vsi, hw->mac.addr, vtag);
1724 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1725 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1726 		ixl_add_vlan_filters(vsi, hw->mac.addr);
1727 	}
1728 }
1729 
1730 static uint64_t
1731 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1732 {
1733 	struct ixl_pf *pf = iflib_get_softc(ctx);
1734 	struct ixl_vsi *vsi = &pf->vsi;
1735 	if_t ifp = iflib_get_ifp(ctx);
1736 
1737 	switch (cnt) {
1738 	case IFCOUNTER_IPACKETS:
1739 		return (vsi->ipackets);
1740 	case IFCOUNTER_IERRORS:
1741 		return (vsi->ierrors);
1742 	case IFCOUNTER_OPACKETS:
1743 		return (vsi->opackets);
1744 	case IFCOUNTER_OERRORS:
1745 		return (vsi->oerrors);
1746 	case IFCOUNTER_COLLISIONS:
1747 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1748 		return (0);
1749 	case IFCOUNTER_IBYTES:
1750 		return (vsi->ibytes);
1751 	case IFCOUNTER_OBYTES:
1752 		return (vsi->obytes);
1753 	case IFCOUNTER_IMCASTS:
1754 		return (vsi->imcasts);
1755 	case IFCOUNTER_OMCASTS:
1756 		return (vsi->omcasts);
1757 	case IFCOUNTER_IQDROPS:
1758 		return (vsi->iqdrops);
1759 	case IFCOUNTER_OQDROPS:
1760 		return (vsi->oqdrops);
1761 	case IFCOUNTER_NOPROTO:
1762 		return (vsi->noproto);
1763 	default:
1764 		return (if_get_counter_default(ifp, cnt));
1765 	}
1766 }
1767 
1768 #ifdef PCI_IOV
1769 static void
1770 ixl_if_vflr_handle(if_ctx_t ctx)
1771 {
1772 	struct ixl_pf *pf = iflib_get_softc(ctx);
1773 
1774 	ixl_handle_vflr(pf);
1775 }
1776 #endif
1777 
1778 static int
1779 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1780 {
1781 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1782 
1783 	if (pf->read_i2c_byte == NULL)
1784 		return (EINVAL);
1785 
1786 	for (int i = 0; i < req->len; i++)
1787 		if (pf->read_i2c_byte(pf, req->offset + i,
1788 		    req->dev_addr, &req->data[i]))
1789 			return (EIO);
1790 	return (0);
1791 }
1792 
1793 static int
1794 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1795 {
1796 	struct ixl_pf *pf = iflib_get_softc(ctx);
1797 	struct ifdrv *ifd = (struct ifdrv *)data;
1798 	int error = 0;
1799 
1800 	/*
1801 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1802 	 * performing privilege checks. It is important that this function
1803 	 * perform the necessary checks for commands which should only be
1804 	 * executed by privileged threads.
1805 	 */
1806 
1807 	switch(command) {
1808 	case SIOCGDRVSPEC:
1809 	case SIOCSDRVSPEC:
1810 		/* NVM update command */
1811 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1812 			error = priv_check(curthread, PRIV_DRIVER);
1813 			if (error)
1814 				break;
1815 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1816 		} else {
1817 			error = EINVAL;
1818 		}
1819 		break;
1820 	default:
1821 		error = EOPNOTSUPP;
1822 	}
1823 
1824 	return (error);
1825 }
1826 
1827 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1828  * @ctx: iflib context
1829  * @event: event code to check
1830  *
1831  * Defaults to returning false for every event.
1832  *
1833  * @returns true if iflib needs to reinit the interface, false otherwise
1834  */
1835 static bool
1836 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1837 {
1838 	switch (event) {
1839 	case IFLIB_RESTART_VLAN_CONFIG:
1840 	default:
1841 		return (false);
1842 	}
1843 }
1844 
1845 /*
1846  * Sanity check and save off tunable values.
1847  */
1848 static void
1849 ixl_save_pf_tunables(struct ixl_pf *pf)
1850 {
1851 	device_t dev = pf->dev;
1852 
1853 	/* Save tunable information */
1854 #ifdef IXL_DEBUG_FC
1855 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1856 #endif
1857 #ifdef IXL_DEBUG
1858 	pf->recovery_mode = ixl_debug_recovery_mode;
1859 #endif
1860 	pf->dbg_mask = ixl_core_debug_mask;
1861 	pf->hw.debug_mask = ixl_shared_debug_mask;
1862 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1863 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1864 #if 0
1865 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1866 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1867 #endif
1868 
1869 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1870 		pf->i2c_access_method = 0;
1871 	else
1872 		pf->i2c_access_method = ixl_i2c_access_method;
1873 
1874 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1875 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1876 		    ixl_tx_itr);
1877 		device_printf(dev, "tx_itr must be between %d and %d, "
1878 		    "inclusive\n",
1879 		    0, IXL_MAX_ITR);
1880 		device_printf(dev, "Using default value of %d instead\n",
1881 		    IXL_ITR_4K);
1882 		pf->tx_itr = IXL_ITR_4K;
1883 	} else
1884 		pf->tx_itr = ixl_tx_itr;
1885 
1886 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1887 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1888 		    ixl_rx_itr);
1889 		device_printf(dev, "rx_itr must be between %d and %d, "
1890 		    "inclusive\n",
1891 		    0, IXL_MAX_ITR);
1892 		device_printf(dev, "Using default value of %d instead\n",
1893 		    IXL_ITR_8K);
1894 		pf->rx_itr = IXL_ITR_8K;
1895 	} else
1896 		pf->rx_itr = ixl_rx_itr;
1897 }
1898 
1899