xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 0b37c1590418417c894529d371800dfac71ef887)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	1
52 #define IXL_DRIVER_VERSION_BUILD	0
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	/* required last entry */
86 	PVID_END
87 };
88 
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 /*** IFLIB interface ***/
93 static void	*ixl_register(device_t dev);
94 static int	 ixl_if_attach_pre(if_ctx_t ctx);
95 static int	 ixl_if_attach_post(if_ctx_t ctx);
96 static int	 ixl_if_detach(if_ctx_t ctx);
97 static int	 ixl_if_shutdown(if_ctx_t ctx);
98 static int	 ixl_if_suspend(if_ctx_t ctx);
99 static int	 ixl_if_resume(if_ctx_t ctx);
100 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
101 static void	 ixl_if_enable_intr(if_ctx_t ctx);
102 static void	 ixl_if_disable_intr(if_ctx_t ctx);
103 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
104 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
105 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
106 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
107 static void	 ixl_if_queues_free(if_ctx_t ctx);
108 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
109 static void	 ixl_if_multi_set(if_ctx_t ctx);
110 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
111 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
112 static int	 ixl_if_media_change(if_ctx_t ctx);
113 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
114 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
115 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
116 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
117 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
118 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
119 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
120 #ifdef PCI_IOV
121 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
122 #endif
123 
124 /*** Other ***/
125 static u_int	 ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int);
126 static void	 ixl_save_pf_tunables(struct ixl_pf *);
127 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
128 
129 /*********************************************************************
130  *  FreeBSD Device Interface Entry Points
131  *********************************************************************/
132 
133 static device_method_t ixl_methods[] = {
134 	/* Device interface */
135 	DEVMETHOD(device_register, ixl_register),
136 	DEVMETHOD(device_probe, iflib_device_probe),
137 	DEVMETHOD(device_attach, iflib_device_attach),
138 	DEVMETHOD(device_detach, iflib_device_detach),
139 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
140 #ifdef PCI_IOV
141 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
142 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
143 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
144 #endif
145 	DEVMETHOD_END
146 };
147 
148 static driver_t ixl_driver = {
149 	"ixl", ixl_methods, sizeof(struct ixl_pf),
150 };
151 
152 devclass_t ixl_devclass;
153 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
154 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
155 MODULE_VERSION(ixl, 3);
156 
157 MODULE_DEPEND(ixl, pci, 1, 1, 1);
158 MODULE_DEPEND(ixl, ether, 1, 1, 1);
159 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
160 
161 static device_method_t ixl_if_methods[] = {
162 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
163 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
164 	DEVMETHOD(ifdi_detach, ixl_if_detach),
165 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
166 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
167 	DEVMETHOD(ifdi_resume, ixl_if_resume),
168 	DEVMETHOD(ifdi_init, ixl_if_init),
169 	DEVMETHOD(ifdi_stop, ixl_if_stop),
170 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
171 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
172 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
173 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
174 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
175 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
176 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
177 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
178 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
179 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
180 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
181 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
182 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
183 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
184 	DEVMETHOD(ifdi_timer, ixl_if_timer),
185 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
186 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
187 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
188 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
189 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
190 #ifdef PCI_IOV
191 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
192 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
193 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
194 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
195 #endif
196 	// ifdi_led_func
197 	// ifdi_debug
198 	DEVMETHOD_END
199 };
200 
201 static driver_t ixl_if_driver = {
202 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
203 };
204 
205 /*
206 ** TUNEABLE PARAMETERS:
207 */
208 
209 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
210     "ixl driver parameters");
211 
212 /*
213  * Leave this on unless you need to send flow control
214  * frames (or other control frames) from software
215  */
216 static int ixl_enable_tx_fc_filter = 1;
217 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
218     &ixl_enable_tx_fc_filter);
219 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
220     &ixl_enable_tx_fc_filter, 0,
221     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
222 
223 static int ixl_i2c_access_method = 0;
224 TUNABLE_INT("hw.ixl.i2c_access_method",
225     &ixl_i2c_access_method);
226 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
227     &ixl_i2c_access_method, 0,
228     IXL_SYSCTL_HELP_I2C_METHOD);
229 
230 static int ixl_enable_vf_loopback = 1;
231 TUNABLE_INT("hw.ixl.enable_vf_loopback",
232     &ixl_enable_vf_loopback);
233 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
234     &ixl_enable_vf_loopback, 0,
235     IXL_SYSCTL_HELP_VF_LOOPBACK);
236 
237 /*
238  * Different method for processing TX descriptor
239  * completion.
240  */
241 static int ixl_enable_head_writeback = 1;
242 TUNABLE_INT("hw.ixl.enable_head_writeback",
243     &ixl_enable_head_writeback);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
245     &ixl_enable_head_writeback, 0,
246     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
247 
248 static int ixl_core_debug_mask = 0;
249 TUNABLE_INT("hw.ixl.core_debug_mask",
250     &ixl_core_debug_mask);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
252     &ixl_core_debug_mask, 0,
253     "Display debug statements that are printed in non-shared code");
254 
255 static int ixl_shared_debug_mask = 0;
256 TUNABLE_INT("hw.ixl.shared_debug_mask",
257     &ixl_shared_debug_mask);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
259     &ixl_shared_debug_mask, 0,
260     "Display debug statements that are printed in shared code");
261 
262 #if 0
263 /*
264 ** Controls for Interrupt Throttling
265 **	- true/false for dynamic adjustment
266 ** 	- default values for static ITR
267 */
268 static int ixl_dynamic_rx_itr = 0;
269 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
270 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
271     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
272 
273 static int ixl_dynamic_tx_itr = 0;
274 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
276     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
277 #endif
278 
279 static int ixl_rx_itr = IXL_ITR_8K;
280 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
281 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
282     &ixl_rx_itr, 0, "RX Interrupt Rate");
283 
284 static int ixl_tx_itr = IXL_ITR_4K;
285 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
286 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
287     &ixl_tx_itr, 0, "TX Interrupt Rate");
288 
289 #ifdef IXL_IW
290 int ixl_enable_iwarp = 0;
291 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
292 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
293     &ixl_enable_iwarp, 0, "iWARP enabled");
294 
295 #if __FreeBSD_version < 1100000
296 int ixl_limit_iwarp_msix = 1;
297 #else
298 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
299 #endif
300 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
301 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
302     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
303 #endif
304 
305 extern struct if_txrx ixl_txrx_hwb;
306 extern struct if_txrx ixl_txrx_dwb;
307 
308 static struct if_shared_ctx ixl_sctx_init = {
309 	.isc_magic = IFLIB_MAGIC,
310 	.isc_q_align = PAGE_SIZE,
311 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
312 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
313 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
314 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
315 	.isc_rx_maxsize = 16384,
316 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
317 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
318 	.isc_nfl = 1,
319 	.isc_ntxqs = 1,
320 	.isc_nrxqs = 1,
321 
322 	.isc_admin_intrcnt = 1,
323 	.isc_vendor_info = ixl_vendor_info_array,
324 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
325 	.isc_driver = &ixl_if_driver,
326 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
327 
328 	.isc_nrxd_min = {IXL_MIN_RING},
329 	.isc_ntxd_min = {IXL_MIN_RING},
330 	.isc_nrxd_max = {IXL_MAX_RING},
331 	.isc_ntxd_max = {IXL_MAX_RING},
332 	.isc_nrxd_default = {IXL_DEFAULT_RING},
333 	.isc_ntxd_default = {IXL_DEFAULT_RING},
334 };
335 
336 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
337 
338 /*** Functions ***/
339 static void *
340 ixl_register(device_t dev)
341 {
342 	return (ixl_sctx);
343 }
344 
345 static int
346 ixl_allocate_pci_resources(struct ixl_pf *pf)
347 {
348 	device_t dev = iflib_get_dev(pf->vsi.ctx);
349 	struct i40e_hw *hw = &pf->hw;
350 	int             rid;
351 
352 	/* Map BAR0 */
353 	rid = PCIR_BAR(0);
354 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
355 	    &rid, RF_ACTIVE);
356 
357 	if (!(pf->pci_mem)) {
358 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
359 		return (ENXIO);
360 	}
361 
362 	/* Save off the PCI information */
363 	hw->vendor_id = pci_get_vendor(dev);
364 	hw->device_id = pci_get_device(dev);
365 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
366 	hw->subsystem_vendor_id =
367 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
368 	hw->subsystem_device_id =
369 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
370 
371 	hw->bus.device = pci_get_slot(dev);
372 	hw->bus.func = pci_get_function(dev);
373 
374 	/* Save off register access information */
375 	pf->osdep.mem_bus_space_tag =
376 		rman_get_bustag(pf->pci_mem);
377 	pf->osdep.mem_bus_space_handle =
378 		rman_get_bushandle(pf->pci_mem);
379 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
380 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
381 	pf->osdep.dev = dev;
382 
383 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
384 	pf->hw.back = &pf->osdep;
385 
386  	return (0);
387  }
388 
389 static int
390 ixl_if_attach_pre(if_ctx_t ctx)
391 {
392 	device_t dev;
393 	struct ixl_pf *pf;
394 	struct i40e_hw *hw;
395 	struct ixl_vsi *vsi;
396 	if_softc_ctx_t scctx;
397 	struct i40e_filter_control_settings filter;
398 	enum i40e_status_code status;
399 	int error = 0;
400 
401 	INIT_DBG_DEV(dev, "begin");
402 
403 	dev = iflib_get_dev(ctx);
404 	pf = iflib_get_softc(ctx);
405 
406 	vsi = &pf->vsi;
407 	vsi->back = pf;
408 	pf->dev = dev;
409 	hw = &pf->hw;
410 
411 	vsi->dev = dev;
412 	vsi->hw = &pf->hw;
413 	vsi->id = 0;
414 	vsi->num_vlans = 0;
415 	vsi->ctx = ctx;
416 	vsi->media = iflib_get_media(ctx);
417 	vsi->shared = scctx = iflib_get_softc_ctx(ctx);
418 
419 	/* Save tunable values */
420 	ixl_save_pf_tunables(pf);
421 
422 	/* Do PCI setup - map BAR0, etc */
423 	if (ixl_allocate_pci_resources(pf)) {
424 		device_printf(dev, "Allocation of PCI resources failed\n");
425 		error = ENXIO;
426 		goto err_pci_res;
427 	}
428 
429 	/* Establish a clean starting point */
430 	i40e_clear_hw(hw);
431 	status = i40e_pf_reset(hw);
432 	if (status) {
433 		device_printf(dev, "PF reset failure %s\n",
434 		    i40e_stat_str(hw, status));
435 		error = EIO;
436 		goto err_out;
437 	}
438 
439 	/* Initialize the shared code */
440 	status = i40e_init_shared_code(hw);
441 	if (status) {
442 		device_printf(dev, "Unable to initialize shared code, error %s\n",
443 		    i40e_stat_str(hw, status));
444 		error = EIO;
445 		goto err_out;
446 	}
447 
448 	/* Set up the admin queue */
449 	hw->aq.num_arq_entries = IXL_AQ_LEN;
450 	hw->aq.num_asq_entries = IXL_AQ_LEN;
451 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
452 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
453 
454 	status = i40e_init_adminq(hw);
455 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
456 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
457 		    i40e_stat_str(hw, status));
458 		error = EIO;
459 		goto err_out;
460 	}
461 	ixl_print_nvm_version(pf);
462 
463 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
464 		device_printf(dev, "The driver for the device stopped "
465 		    "because the NVM image is newer than expected.\n");
466 		device_printf(dev, "You must install the most recent version of "
467 		    "the network driver.\n");
468 		error = EIO;
469 		goto err_out;
470 	}
471 
472         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
473 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
474 		device_printf(dev, "The driver for the device detected "
475 		    "a newer version of the NVM image than expected.\n");
476 		device_printf(dev, "Please install the most recent version "
477 		    "of the network driver.\n");
478 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
479 		device_printf(dev, "The driver for the device detected "
480 		    "an older version of the NVM image than expected.\n");
481 		device_printf(dev, "Please update the NVM image.\n");
482 	}
483 
484 	/* Clear PXE mode */
485 	i40e_clear_pxe_mode(hw);
486 
487 	/* Get capabilities from the device */
488 	error = ixl_get_hw_capabilities(pf);
489 	if (error) {
490 		device_printf(dev, "get_hw_capabilities failed: %d\n",
491 		    error);
492 		goto err_get_cap;
493 	}
494 
495 	/* Set up host memory cache */
496 	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
497 	    hw->func_caps.num_rx_qp, 0, 0);
498 	if (status) {
499 		device_printf(dev, "init_lan_hmc failed: %s\n",
500 		    i40e_stat_str(hw, status));
501 		goto err_get_cap;
502 	}
503 	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
504 	if (status) {
505 		device_printf(dev, "configure_lan_hmc failed: %s\n",
506 		    i40e_stat_str(hw, status));
507 		goto err_mac_hmc;
508 	}
509 
510 	/* Disable LLDP from the firmware for certain NVM versions */
511 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
512 	    (pf->hw.aq.fw_maj_ver < 4)) {
513 		i40e_aq_stop_lldp(hw, TRUE, NULL);
514 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
515 	}
516 
517 	/* Get MAC addresses from hardware */
518 	i40e_get_mac_addr(hw, hw->mac.addr);
519 	error = i40e_validate_mac_addr(hw->mac.addr);
520 	if (error) {
521 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
522 		goto err_mac_hmc;
523 	}
524 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
525 	iflib_set_mac(ctx, hw->mac.addr);
526 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
527 
528 	/* Set up the device filtering */
529 	bzero(&filter, sizeof(filter));
530 	filter.enable_ethtype = TRUE;
531 	filter.enable_macvlan = TRUE;
532 	filter.enable_fdir = FALSE;
533 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
534 	if (i40e_set_filter_control(hw, &filter))
535 		device_printf(dev, "i40e_set_filter_control() failed\n");
536 
537 	/* Query device FW LLDP status */
538 	ixl_get_fw_lldp_status(pf);
539 	/* Tell FW to apply DCB config on link up */
540 	i40e_aq_set_dcb_parameters(hw, true, NULL);
541 
542 	/* Fill out iflib parameters */
543 	if (hw->mac.type == I40E_MAC_X722)
544 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
545 	else
546 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
547 	if (vsi->enable_head_writeback) {
548 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
549 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
550 		scctx->isc_txrx = &ixl_txrx_hwb;
551 	} else {
552 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
553 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
554 		scctx->isc_txrx = &ixl_txrx_dwb;
555 	}
556 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
557 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
558 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
559 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
560 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
561 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
562 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
563 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
564 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
565 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
566 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
567 
568 	INIT_DBG_DEV(dev, "end");
569 	return (0);
570 
571 err_mac_hmc:
572 	i40e_shutdown_lan_hmc(hw);
573 err_get_cap:
574 	i40e_shutdown_adminq(hw);
575 err_out:
576 	ixl_free_pci_resources(pf);
577 err_pci_res:
578 	return (error);
579 }
580 
581 static int
582 ixl_if_attach_post(if_ctx_t ctx)
583 {
584 	device_t dev;
585 	struct ixl_pf *pf;
586 	struct i40e_hw *hw;
587 	struct ixl_vsi *vsi;
588 	int error = 0;
589 	enum i40e_status_code status;
590 
591 	INIT_DBG_DEV(dev, "begin");
592 
593 	dev = iflib_get_dev(ctx);
594 	pf = iflib_get_softc(ctx);
595 	vsi = &pf->vsi;
596 	vsi->ifp = iflib_get_ifp(ctx);
597 	hw = &pf->hw;
598 
599 	/* Save off determined number of queues for interface */
600 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
601 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
602 
603 	/* Setup OS network interface / ifnet */
604 	if (ixl_setup_interface(dev, pf)) {
605 		device_printf(dev, "interface setup failed!\n");
606 		error = EIO;
607 		goto err;
608 	}
609 
610 	/* Determine link state */
611 	if (ixl_attach_get_link_status(pf)) {
612 		error = EINVAL;
613 		goto err;
614 	}
615 
616 	error = ixl_switch_config(pf);
617 	if (error) {
618 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
619 		     error);
620 		goto err;
621 	}
622 
623 	/* Add protocol filters to list */
624 	ixl_init_filters(vsi);
625 
626 	/* Init queue allocation manager */
627 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
628 	if (error) {
629 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
630 		    error);
631 		goto err;
632 	}
633 	/* reserve a contiguous allocation for the PF's VSI */
634 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
635 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
636 	if (error) {
637 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
638 		    error);
639 		goto err;
640 	}
641 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
642 	    pf->qtag.num_allocated, pf->qtag.num_active);
643 
644 	/* Limit PHY interrupts to link, autoneg, and modules failure */
645 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
646 	    NULL);
647         if (status) {
648 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
649 		    " aq_err %s\n", i40e_stat_str(hw, status),
650 		    i40e_aq_str(hw, hw->aq.asq_last_status));
651 		goto err;
652 	}
653 
654 	/* Get the bus configuration and set the shared code */
655 	ixl_get_bus_info(pf);
656 
657 	/* Keep admin queue interrupts active while driver is loaded */
658 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
659  		ixl_configure_intr0_msix(pf);
660  		ixl_enable_intr0(hw);
661 	}
662 
663 	/* Set initial advertised speed sysctl value */
664 	ixl_set_initial_advertised_speeds(pf);
665 
666 	/* Initialize statistics & add sysctls */
667 	ixl_add_device_sysctls(pf);
668 	ixl_pf_reset_stats(pf);
669 	ixl_update_stats_counters(pf);
670 	ixl_add_hw_stats(pf);
671 
672 	hw->phy.get_link_info = true;
673 	i40e_get_link_status(hw, &pf->link_up);
674 	ixl_update_link_status(pf);
675 
676 #ifdef PCI_IOV
677 	ixl_initialize_sriov(pf);
678 #endif
679 
680 #ifdef IXL_IW
681 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
682 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
683 		if (pf->iw_enabled) {
684 			error = ixl_iw_pf_attach(pf);
685 			if (error) {
686 				device_printf(dev,
687 				    "interfacing to iWARP driver failed: %d\n",
688 				    error);
689 				goto err;
690 			} else
691 				device_printf(dev, "iWARP ready\n");
692 		} else
693 			device_printf(dev, "iWARP disabled on this device "
694 			    "(no MSI-X vectors)\n");
695 	} else {
696 		pf->iw_enabled = false;
697 		device_printf(dev, "The device is not iWARP enabled\n");
698 	}
699 #endif
700 
701 	INIT_DBG_DEV(dev, "end");
702 	return (0);
703 
704 err:
705 	INIT_DEBUGOUT("end: error %d", error);
706 	/* ixl_if_detach() is called on error from this */
707 	return (error);
708 }
709 
710 /**
711  * XXX: iflib always ignores the return value of detach()
712  * -> This means that this isn't allowed to fail
713  */
714 static int
715 ixl_if_detach(if_ctx_t ctx)
716 {
717 	struct ixl_pf *pf = iflib_get_softc(ctx);
718 	struct ixl_vsi *vsi = &pf->vsi;
719 	struct i40e_hw *hw = &pf->hw;
720 	device_t dev = pf->dev;
721 	enum i40e_status_code	status;
722 #ifdef IXL_IW
723 	int			error;
724 #endif
725 
726 	INIT_DBG_DEV(dev, "begin");
727 
728 #ifdef IXL_IW
729 	if (ixl_enable_iwarp && pf->iw_enabled) {
730 		error = ixl_iw_pf_detach(pf);
731 		if (error == EBUSY) {
732 			device_printf(dev, "iwarp in use; stop it first.\n");
733 			//return (error);
734 		}
735 	}
736 #endif
737 	/* Remove all previously allocated media types */
738 	ifmedia_removeall(vsi->media);
739 
740 	/* Shutdown LAN HMC */
741 	if (hw->hmc.hmc_obj) {
742 		status = i40e_shutdown_lan_hmc(hw);
743 		if (status)
744 			device_printf(dev,
745 			    "i40e_shutdown_lan_hmc() failed with status %s\n",
746 			    i40e_stat_str(hw, status));
747 	}
748 
749 	/* Shutdown admin queue */
750 	ixl_disable_intr0(hw);
751 	status = i40e_shutdown_adminq(hw);
752 	if (status)
753 		device_printf(dev,
754 		    "i40e_shutdown_adminq() failed with status %s\n",
755 		    i40e_stat_str(hw, status));
756 
757 	ixl_pf_qmgr_destroy(&pf->qmgr);
758 	ixl_free_pci_resources(pf);
759 	ixl_free_mac_filters(vsi);
760 	INIT_DBG_DEV(dev, "end");
761 	return (0);
762 }
763 
764 static int
765 ixl_if_shutdown(if_ctx_t ctx)
766 {
767 	int error = 0;
768 
769 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
770 
771 	/* TODO: Call ixl_if_stop()? */
772 
773 	/* TODO: Then setup low power mode */
774 
775 	return (error);
776 }
777 
778 static int
779 ixl_if_suspend(if_ctx_t ctx)
780 {
781 	int error = 0;
782 
783 	INIT_DEBUGOUT("ixl_if_suspend: begin");
784 
785 	/* TODO: Call ixl_if_stop()? */
786 
787 	/* TODO: Then setup low power mode */
788 
789 	return (error);
790 }
791 
792 static int
793 ixl_if_resume(if_ctx_t ctx)
794 {
795 	struct ifnet *ifp = iflib_get_ifp(ctx);
796 
797 	INIT_DEBUGOUT("ixl_if_resume: begin");
798 
799 	/* Read & clear wake-up registers */
800 
801 	/* Required after D3->D0 transition */
802 	if (ifp->if_flags & IFF_UP)
803 		ixl_if_init(ctx);
804 
805 	return (0);
806 }
807 
808 void
809 ixl_if_init(if_ctx_t ctx)
810 {
811 	struct ixl_pf *pf = iflib_get_softc(ctx);
812 	struct ixl_vsi *vsi = &pf->vsi;
813 	struct i40e_hw	*hw = &pf->hw;
814 	struct ifnet *ifp = iflib_get_ifp(ctx);
815 	device_t 	dev = iflib_get_dev(ctx);
816 	u8		tmpaddr[ETHER_ADDR_LEN];
817 	int		ret;
818 
819 	/*
820 	 * If the aq is dead here, it probably means something outside of the driver
821 	 * did something to the adapter, like a PF reset.
822 	 * So, rebuild the driver's state here if that occurs.
823 	 */
824 	if (!i40e_check_asq_alive(&pf->hw)) {
825 		device_printf(dev, "Admin Queue is down; resetting...\n");
826 		ixl_teardown_hw_structs(pf);
827 		ixl_rebuild_hw_structs_after_reset(pf);
828 	}
829 
830 	/* Get the latest mac address... User might use a LAA */
831 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
832 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
833 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
834 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
835 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
836 		ret = i40e_aq_mac_address_write(hw,
837 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
838 		    hw->mac.addr, NULL);
839 		if (ret) {
840 			device_printf(dev, "LLA address change failed!!\n");
841 			return;
842 		}
843 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
844 	}
845 
846 	iflib_set_mac(ctx, hw->mac.addr);
847 
848 	/* Prepare the VSI: rings, hmc contexts, etc... */
849 	if (ixl_initialize_vsi(vsi)) {
850 		device_printf(dev, "initialize vsi failed!!\n");
851 		return;
852 	}
853 
854 	/* Reconfigure multicast filters in HW */
855 	ixl_if_multi_set(ctx);
856 
857 	/* Set up RSS */
858 	ixl_config_rss(pf);
859 
860 	/* Set up MSI-X routing and the ITR settings */
861 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
862 		ixl_configure_queue_intr_msix(pf);
863 		ixl_configure_itr(pf);
864 	} else
865 		ixl_configure_legacy(pf);
866 
867 	if (vsi->enable_head_writeback)
868 		ixl_init_tx_cidx(vsi);
869 	else
870 		ixl_init_tx_rsqs(vsi);
871 
872 	ixl_enable_rings(vsi);
873 
874 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
875 
876 	/* Re-add configure filters to HW */
877 	ixl_reconfigure_filters(vsi);
878 
879 	/* Configure promiscuous mode */
880 	ixl_if_promisc_set(ctx, if_getflags(ifp));
881 
882 #ifdef IXL_IW
883 	if (ixl_enable_iwarp && pf->iw_enabled) {
884 		ret = ixl_iw_pf_init(pf);
885 		if (ret)
886 			device_printf(dev,
887 			    "initialize iwarp failed, code %d\n", ret);
888 	}
889 #endif
890 }
891 
892 void
893 ixl_if_stop(if_ctx_t ctx)
894 {
895 	struct ixl_pf *pf = iflib_get_softc(ctx);
896 	struct ixl_vsi *vsi = &pf->vsi;
897 
898 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
899 
900 	// TODO: This may need to be reworked
901 #ifdef IXL_IW
902 	/* Stop iWARP device */
903 	if (ixl_enable_iwarp && pf->iw_enabled)
904 		ixl_iw_pf_stop(pf);
905 #endif
906 
907 	ixl_disable_rings_intr(vsi);
908 	ixl_disable_rings(pf, vsi, &pf->qtag);
909 }
910 
911 static int
912 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
913 {
914 	struct ixl_pf *pf = iflib_get_softc(ctx);
915 	struct ixl_vsi *vsi = &pf->vsi;
916 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
917 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
918 	int err, i, rid, vector = 0;
919 	char buf[16];
920 
921 	MPASS(vsi->shared->isc_nrxqsets > 0);
922 	MPASS(vsi->shared->isc_ntxqsets > 0);
923 
924 	/* Admin Que must use vector 0*/
925 	rid = vector + 1;
926 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
927 	    ixl_msix_adminq, pf, 0, "aq");
928 	if (err) {
929 		iflib_irq_free(ctx, &vsi->irq);
930 		device_printf(iflib_get_dev(ctx),
931 		    "Failed to register Admin Que handler");
932 		return (err);
933 	}
934 	/* Create soft IRQ for handling VFLRs */
935 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
936 
937 	/* Now set up the stations */
938 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
939 		rid = vector + 1;
940 
941 		snprintf(buf, sizeof(buf), "rxq%d", i);
942 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
943 		    IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
944 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
945 		 * what's expected in the iflib context? */
946 		if (err) {
947 			device_printf(iflib_get_dev(ctx),
948 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
949 			vsi->num_rx_queues = i + 1;
950 			goto fail;
951 		}
952 		rx_que->msix = vector;
953 	}
954 
955 	bzero(buf, sizeof(buf));
956 
957 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
958 		snprintf(buf, sizeof(buf), "txq%d", i);
959 		iflib_softirq_alloc_generic(ctx,
960 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
961 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
962 
963 		/* TODO: Maybe call a strategy function for this to figure out which
964 		* interrupts to map Tx queues to. I don't know if there's an immediately
965 		* better way than this other than a user-supplied map, though. */
966 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
967 	}
968 
969 	return (0);
970 fail:
971 	iflib_irq_free(ctx, &vsi->irq);
972 	rx_que = vsi->rx_queues;
973 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
974 		iflib_irq_free(ctx, &rx_que->que_irq);
975 	return (err);
976 }
977 
978 /*
979  * Enable all interrupts
980  *
981  * Called in:
982  * iflib_init_locked, after ixl_if_init()
983  */
984 static void
985 ixl_if_enable_intr(if_ctx_t ctx)
986 {
987 	struct ixl_pf *pf = iflib_get_softc(ctx);
988 	struct ixl_vsi *vsi = &pf->vsi;
989 	struct i40e_hw		*hw = vsi->hw;
990 	struct ixl_rx_queue	*que = vsi->rx_queues;
991 
992 	ixl_enable_intr0(hw);
993 	/* Enable queue interrupts */
994 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
995 		/* TODO: Queue index parameter is probably wrong */
996 		ixl_enable_queue(hw, que->rxr.me);
997 }
998 
999 /*
1000  * Disable queue interrupts
1001  *
1002  * Other interrupt causes need to remain active.
1003  */
1004 static void
1005 ixl_if_disable_intr(if_ctx_t ctx)
1006 {
1007 	struct ixl_pf *pf = iflib_get_softc(ctx);
1008 	struct ixl_vsi *vsi = &pf->vsi;
1009 	struct i40e_hw		*hw = vsi->hw;
1010 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1011 
1012 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1013 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1014 			ixl_disable_queue(hw, rx_que->msix - 1);
1015 	} else {
1016 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1017 		// stops queues from triggering interrupts
1018 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1019 	}
1020 }
1021 
1022 static int
1023 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1024 {
1025 	struct ixl_pf *pf = iflib_get_softc(ctx);
1026 	struct ixl_vsi *vsi = &pf->vsi;
1027 	struct i40e_hw		*hw = vsi->hw;
1028 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1029 
1030 	ixl_enable_queue(hw, rx_que->msix - 1);
1031 	return (0);
1032 }
1033 
1034 static int
1035 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1036 {
1037 	struct ixl_pf *pf = iflib_get_softc(ctx);
1038 	struct ixl_vsi *vsi = &pf->vsi;
1039 	struct i40e_hw *hw = vsi->hw;
1040 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1041 
1042 	ixl_enable_queue(hw, tx_que->msix - 1);
1043 	return (0);
1044 }
1045 
1046 static int
1047 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1048 {
1049 	struct ixl_pf *pf = iflib_get_softc(ctx);
1050 	struct ixl_vsi *vsi = &pf->vsi;
1051 	if_softc_ctx_t scctx = vsi->shared;
1052 	struct ixl_tx_queue *que;
1053 	int i, j, error = 0;
1054 
1055 	MPASS(scctx->isc_ntxqsets > 0);
1056 	MPASS(ntxqs == 1);
1057 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1058 
1059 	/* Allocate queue structure memory */
1060 	if (!(vsi->tx_queues =
1061 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1062 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1063 		return (ENOMEM);
1064 	}
1065 
1066 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1067 		struct tx_ring *txr = &que->txr;
1068 
1069 		txr->me = i;
1070 		que->vsi = vsi;
1071 
1072 		if (!vsi->enable_head_writeback) {
1073 			/* Allocate report status array */
1074 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1075 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1076 				error = ENOMEM;
1077 				goto fail;
1078 			}
1079 			/* Init report status array */
1080 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1081 				txr->tx_rsq[j] = QIDX_INVALID;
1082 		}
1083 		/* get the virtual and physical address of the hardware queues */
1084 		txr->tail = I40E_QTX_TAIL(txr->me);
1085 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1086 		txr->tx_paddr = paddrs[i * ntxqs];
1087 		txr->que = que;
1088 	}
1089 
1090 	return (0);
1091 fail:
1092 	ixl_if_queues_free(ctx);
1093 	return (error);
1094 }
1095 
1096 static int
1097 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1098 {
1099 	struct ixl_pf *pf = iflib_get_softc(ctx);
1100 	struct ixl_vsi *vsi = &pf->vsi;
1101 	struct ixl_rx_queue *que;
1102 	int i, error = 0;
1103 
1104 #ifdef INVARIANTS
1105 	if_softc_ctx_t scctx = vsi->shared;
1106 	MPASS(scctx->isc_nrxqsets > 0);
1107 	MPASS(nrxqs == 1);
1108 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1109 #endif
1110 
1111 	/* Allocate queue structure memory */
1112 	if (!(vsi->rx_queues =
1113 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1114 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1115 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1116 		error = ENOMEM;
1117 		goto fail;
1118 	}
1119 
1120 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1121 		struct rx_ring *rxr = &que->rxr;
1122 
1123 		rxr->me = i;
1124 		que->vsi = vsi;
1125 
1126 		/* get the virtual and physical address of the hardware queues */
1127 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1128 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1129 		rxr->rx_paddr = paddrs[i * nrxqs];
1130 		rxr->que = que;
1131 	}
1132 
1133 	return (0);
1134 fail:
1135 	ixl_if_queues_free(ctx);
1136 	return (error);
1137 }
1138 
1139 static void
1140 ixl_if_queues_free(if_ctx_t ctx)
1141 {
1142 	struct ixl_pf *pf = iflib_get_softc(ctx);
1143 	struct ixl_vsi *vsi = &pf->vsi;
1144 
1145 	if (!vsi->enable_head_writeback) {
1146 		struct ixl_tx_queue *que;
1147 		int i = 0;
1148 
1149 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1150 			struct tx_ring *txr = &que->txr;
1151 			if (txr->tx_rsq != NULL) {
1152 				free(txr->tx_rsq, M_IXL);
1153 				txr->tx_rsq = NULL;
1154 			}
1155 		}
1156 	}
1157 
1158 	if (vsi->tx_queues != NULL) {
1159 		free(vsi->tx_queues, M_IXL);
1160 		vsi->tx_queues = NULL;
1161 	}
1162 	if (vsi->rx_queues != NULL) {
1163 		free(vsi->rx_queues, M_IXL);
1164 		vsi->rx_queues = NULL;
1165 	}
1166 }
1167 
1168 void
1169 ixl_update_link_status(struct ixl_pf *pf)
1170 {
1171 	struct ixl_vsi *vsi = &pf->vsi;
1172 	struct i40e_hw *hw = &pf->hw;
1173 	u64 baudrate;
1174 
1175 	if (pf->link_up) {
1176 		if (vsi->link_active == FALSE) {
1177 			vsi->link_active = TRUE;
1178 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1179 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1180 			ixl_link_up_msg(pf);
1181 #ifdef PCI_IOV
1182 			ixl_broadcast_link_state(pf);
1183 #endif
1184 
1185 		}
1186 	} else { /* Link down */
1187 		if (vsi->link_active == TRUE) {
1188 			vsi->link_active = FALSE;
1189 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1190 #ifdef PCI_IOV
1191 			ixl_broadcast_link_state(pf);
1192 #endif
1193 		}
1194 	}
1195 }
1196 
1197 static void
1198 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1199 {
1200 	device_t dev = pf->dev;
1201 	u32 rxq_idx, qtx_ctl;
1202 
1203 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1204 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1205 	qtx_ctl = e->desc.params.external.param1;
1206 
1207 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1208 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1209 }
1210 
1211 static int
1212 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1213 {
1214 	enum i40e_status_code status = I40E_SUCCESS;
1215 	struct i40e_arq_event_info event;
1216 	struct i40e_hw *hw = &pf->hw;
1217 	device_t dev = pf->dev;
1218 	u16 opcode;
1219 	u32 loop = 0, reg;
1220 
1221 	event.buf_len = IXL_AQ_BUF_SZ;
1222 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1223 	if (!event.msg_buf) {
1224 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1225 		    " Queue event!\n", __func__);
1226 		return (ENOMEM);
1227 	}
1228 
1229 	/* clean and process any events */
1230 	do {
1231 		status = i40e_clean_arq_element(hw, &event, pending);
1232 		if (status)
1233 			break;
1234 		opcode = LE16_TO_CPU(event.desc.opcode);
1235 		ixl_dbg(pf, IXL_DBG_AQ,
1236 		    "Admin Queue event: %#06x\n", opcode);
1237 		switch (opcode) {
1238 		case i40e_aqc_opc_get_link_status:
1239 			ixl_link_event(pf, &event);
1240 			break;
1241 		case i40e_aqc_opc_send_msg_to_pf:
1242 #ifdef PCI_IOV
1243 			ixl_handle_vf_msg(pf, &event);
1244 #endif
1245 			break;
1246 		/*
1247 		 * This should only occur on no-drop queues, which
1248 		 * aren't currently configured.
1249 		 */
1250 		case i40e_aqc_opc_event_lan_overflow:
1251 			ixl_handle_lan_overflow_event(pf, &event);
1252 			break;
1253 		default:
1254 			break;
1255 		}
1256 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1257 
1258 	free(event.msg_buf, M_IXL);
1259 
1260 	/* Re-enable admin queue interrupt cause */
1261 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1262 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1263 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1264 
1265 	return (status);
1266 }
1267 
1268 static void
1269 ixl_if_update_admin_status(if_ctx_t ctx)
1270 {
1271 	struct ixl_pf			*pf = iflib_get_softc(ctx);
1272 	struct i40e_hw			*hw = &pf->hw;
1273 	u16				pending;
1274 
1275 	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1276 		ixl_handle_empr_reset(pf);
1277 
1278 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1279 		ixl_handle_mdd_event(pf);
1280 
1281 	ixl_process_adminq(pf, &pending);
1282 	ixl_update_link_status(pf);
1283 	ixl_update_stats_counters(pf);
1284 
1285 	/*
1286 	 * If there are still messages to process, reschedule ourselves.
1287 	 * Otherwise, re-enable our interrupt and go to sleep.
1288 	 */
1289 	if (pending > 0)
1290 		iflib_admin_intr_deferred(ctx);
1291 	else
1292 		ixl_enable_intr0(hw);
1293 }
1294 
1295 static void
1296 ixl_if_multi_set(if_ctx_t ctx)
1297 {
1298 	struct ixl_pf *pf = iflib_get_softc(ctx);
1299 	struct ixl_vsi *vsi = &pf->vsi;
1300 	struct i40e_hw *hw = vsi->hw;
1301 	int mcnt, flags;
1302 	int del_mcnt;
1303 
1304 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1305 
1306 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1307 	/* Delete filters for removed multicast addresses */
1308 	del_mcnt = ixl_del_multi(vsi);
1309 	vsi->num_macs -= del_mcnt;
1310 
1311 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1312 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1313 		    vsi->seid, TRUE, NULL);
1314 		return;
1315 	}
1316 	/* (re-)install filters for all mcast addresses */
1317 	/* XXX: This bypasses filter count tracking code! */
1318 	mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1319 	if (mcnt > 0) {
1320 		vsi->num_macs += mcnt;
1321 		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1322 		ixl_add_hw_filters(vsi, flags, mcnt);
1323 	}
1324 
1325 	ixl_dbg_filter(pf, "%s: filter mac total: %d\n",
1326 	    __func__, vsi->num_macs);
1327 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1328 }
1329 
1330 static int
1331 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1332 {
1333 	struct ixl_pf *pf = iflib_get_softc(ctx);
1334 	struct ixl_vsi *vsi = &pf->vsi;
1335 
1336 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1337 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1338 		ETHER_VLAN_ENCAP_LEN)
1339 		return (EINVAL);
1340 
1341 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1342 		ETHER_VLAN_ENCAP_LEN;
1343 
1344 	return (0);
1345 }
1346 
1347 static void
1348 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1349 {
1350 	struct ixl_pf *pf = iflib_get_softc(ctx);
1351 	struct i40e_hw  *hw = &pf->hw;
1352 
1353 	INIT_DEBUGOUT("ixl_media_status: begin");
1354 
1355 	ifmr->ifm_status = IFM_AVALID;
1356 	ifmr->ifm_active = IFM_ETHER;
1357 
1358 	if (!pf->link_up) {
1359 		return;
1360 	}
1361 
1362 	ifmr->ifm_status |= IFM_ACTIVE;
1363 	/* Hardware is always full-duplex */
1364 	ifmr->ifm_active |= IFM_FDX;
1365 
1366 	switch (hw->phy.link_info.phy_type) {
1367 		/* 100 M */
1368 		case I40E_PHY_TYPE_100BASE_TX:
1369 			ifmr->ifm_active |= IFM_100_TX;
1370 			break;
1371 		/* 1 G */
1372 		case I40E_PHY_TYPE_1000BASE_T:
1373 			ifmr->ifm_active |= IFM_1000_T;
1374 			break;
1375 		case I40E_PHY_TYPE_1000BASE_SX:
1376 			ifmr->ifm_active |= IFM_1000_SX;
1377 			break;
1378 		case I40E_PHY_TYPE_1000BASE_LX:
1379 			ifmr->ifm_active |= IFM_1000_LX;
1380 			break;
1381 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1382 			ifmr->ifm_active |= IFM_1000_T;
1383 			break;
1384 		/* 10 G */
1385 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1386 			ifmr->ifm_active |= IFM_10G_TWINAX;
1387 			break;
1388 		case I40E_PHY_TYPE_10GBASE_SR:
1389 			ifmr->ifm_active |= IFM_10G_SR;
1390 			break;
1391 		case I40E_PHY_TYPE_10GBASE_LR:
1392 			ifmr->ifm_active |= IFM_10G_LR;
1393 			break;
1394 		case I40E_PHY_TYPE_10GBASE_T:
1395 			ifmr->ifm_active |= IFM_10G_T;
1396 			break;
1397 		case I40E_PHY_TYPE_XAUI:
1398 		case I40E_PHY_TYPE_XFI:
1399 			ifmr->ifm_active |= IFM_10G_TWINAX;
1400 			break;
1401 		case I40E_PHY_TYPE_10GBASE_AOC:
1402 			ifmr->ifm_active |= IFM_10G_AOC;
1403 			break;
1404 		/* 25 G */
1405 		case I40E_PHY_TYPE_25GBASE_KR:
1406 			ifmr->ifm_active |= IFM_25G_KR;
1407 			break;
1408 		case I40E_PHY_TYPE_25GBASE_CR:
1409 			ifmr->ifm_active |= IFM_25G_CR;
1410 			break;
1411 		case I40E_PHY_TYPE_25GBASE_SR:
1412 			ifmr->ifm_active |= IFM_25G_SR;
1413 			break;
1414 		case I40E_PHY_TYPE_25GBASE_LR:
1415 			ifmr->ifm_active |= IFM_25G_LR;
1416 			break;
1417 		case I40E_PHY_TYPE_25GBASE_AOC:
1418 			ifmr->ifm_active |= IFM_25G_AOC;
1419 			break;
1420 		case I40E_PHY_TYPE_25GBASE_ACC:
1421 			ifmr->ifm_active |= IFM_25G_ACC;
1422 			break;
1423 		/* 40 G */
1424 		case I40E_PHY_TYPE_40GBASE_CR4:
1425 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1426 			ifmr->ifm_active |= IFM_40G_CR4;
1427 			break;
1428 		case I40E_PHY_TYPE_40GBASE_SR4:
1429 			ifmr->ifm_active |= IFM_40G_SR4;
1430 			break;
1431 		case I40E_PHY_TYPE_40GBASE_LR4:
1432 			ifmr->ifm_active |= IFM_40G_LR4;
1433 			break;
1434 		case I40E_PHY_TYPE_XLAUI:
1435 			ifmr->ifm_active |= IFM_OTHER;
1436 			break;
1437 		case I40E_PHY_TYPE_1000BASE_KX:
1438 			ifmr->ifm_active |= IFM_1000_KX;
1439 			break;
1440 		case I40E_PHY_TYPE_SGMII:
1441 			ifmr->ifm_active |= IFM_1000_SGMII;
1442 			break;
1443 		/* ERJ: What's the difference between these? */
1444 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1445 		case I40E_PHY_TYPE_10GBASE_CR1:
1446 			ifmr->ifm_active |= IFM_10G_CR1;
1447 			break;
1448 		case I40E_PHY_TYPE_10GBASE_KX4:
1449 			ifmr->ifm_active |= IFM_10G_KX4;
1450 			break;
1451 		case I40E_PHY_TYPE_10GBASE_KR:
1452 			ifmr->ifm_active |= IFM_10G_KR;
1453 			break;
1454 		case I40E_PHY_TYPE_SFI:
1455 			ifmr->ifm_active |= IFM_10G_SFI;
1456 			break;
1457 		/* Our single 20G media type */
1458 		case I40E_PHY_TYPE_20GBASE_KR2:
1459 			ifmr->ifm_active |= IFM_20G_KR2;
1460 			break;
1461 		case I40E_PHY_TYPE_40GBASE_KR4:
1462 			ifmr->ifm_active |= IFM_40G_KR4;
1463 			break;
1464 		case I40E_PHY_TYPE_XLPPI:
1465 		case I40E_PHY_TYPE_40GBASE_AOC:
1466 			ifmr->ifm_active |= IFM_40G_XLPPI;
1467 			break;
1468 		/* Unknown to driver */
1469 		default:
1470 			ifmr->ifm_active |= IFM_UNKNOWN;
1471 			break;
1472 	}
1473 	/* Report flow control status as well */
1474 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1475 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1476 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1477 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1478 }
1479 
1480 static int
1481 ixl_if_media_change(if_ctx_t ctx)
1482 {
1483 	struct ifmedia *ifm = iflib_get_media(ctx);
1484 
1485 	INIT_DEBUGOUT("ixl_media_change: begin");
1486 
1487 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1488 		return (EINVAL);
1489 
1490 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1491 	return (ENODEV);
1492 }
1493 
1494 static int
1495 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1496 {
1497 	struct ixl_pf *pf = iflib_get_softc(ctx);
1498 	struct ixl_vsi *vsi = &pf->vsi;
1499 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1500 	struct i40e_hw	*hw = vsi->hw;
1501 	int		err;
1502 	bool		uni = FALSE, multi = FALSE;
1503 
1504 	if (flags & IFF_PROMISC)
1505 		uni = multi = TRUE;
1506 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1507 	    MAX_MULTICAST_ADDR)
1508 		multi = TRUE;
1509 
1510 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1511 	    vsi->seid, uni, NULL, true);
1512 	if (err)
1513 		return (err);
1514 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1515 	    vsi->seid, multi, NULL);
1516 	return (err);
1517 }
1518 
1519 static void
1520 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1521 {
1522 	if (qid != 0)
1523 		return;
1524 
1525 	/* Fire off the adminq task */
1526 	iflib_admin_intr_deferred(ctx);
1527 }
1528 
1529 static void
1530 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1531 {
1532 	struct ixl_pf *pf = iflib_get_softc(ctx);
1533 	struct ixl_vsi *vsi = &pf->vsi;
1534 	struct i40e_hw	*hw = vsi->hw;
1535 
1536 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1537 		return;
1538 
1539 	++vsi->num_vlans;
1540 	ixl_add_filter(vsi, hw->mac.addr, vtag);
1541 }
1542 
1543 static void
1544 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1545 {
1546 	struct ixl_pf *pf = iflib_get_softc(ctx);
1547 	struct ixl_vsi *vsi = &pf->vsi;
1548 	struct i40e_hw	*hw = vsi->hw;
1549 
1550 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1551 		return;
1552 
1553 	--vsi->num_vlans;
1554 	ixl_del_filter(vsi, hw->mac.addr, vtag);
1555 }
1556 
1557 static uint64_t
1558 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1559 {
1560 	struct ixl_pf *pf = iflib_get_softc(ctx);
1561 	struct ixl_vsi *vsi = &pf->vsi;
1562 	if_t ifp = iflib_get_ifp(ctx);
1563 
1564 	switch (cnt) {
1565 	case IFCOUNTER_IPACKETS:
1566 		return (vsi->ipackets);
1567 	case IFCOUNTER_IERRORS:
1568 		return (vsi->ierrors);
1569 	case IFCOUNTER_OPACKETS:
1570 		return (vsi->opackets);
1571 	case IFCOUNTER_OERRORS:
1572 		return (vsi->oerrors);
1573 	case IFCOUNTER_COLLISIONS:
1574 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1575 		return (0);
1576 	case IFCOUNTER_IBYTES:
1577 		return (vsi->ibytes);
1578 	case IFCOUNTER_OBYTES:
1579 		return (vsi->obytes);
1580 	case IFCOUNTER_IMCASTS:
1581 		return (vsi->imcasts);
1582 	case IFCOUNTER_OMCASTS:
1583 		return (vsi->omcasts);
1584 	case IFCOUNTER_IQDROPS:
1585 		return (vsi->iqdrops);
1586 	case IFCOUNTER_OQDROPS:
1587 		return (vsi->oqdrops);
1588 	case IFCOUNTER_NOPROTO:
1589 		return (vsi->noproto);
1590 	default:
1591 		return (if_get_counter_default(ifp, cnt));
1592 	}
1593 }
1594 
1595 #ifdef PCI_IOV
1596 static void
1597 ixl_if_vflr_handle(if_ctx_t ctx)
1598 {
1599 	struct ixl_pf *pf = iflib_get_softc(ctx);
1600 
1601 	ixl_handle_vflr(pf);
1602 }
1603 #endif
1604 
1605 static int
1606 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1607 {
1608 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1609 
1610 	if (pf->read_i2c_byte == NULL)
1611 		return (EINVAL);
1612 
1613 	for (int i = 0; i < req->len; i++)
1614 		if (pf->read_i2c_byte(pf, req->offset + i,
1615 		    req->dev_addr, &req->data[i]))
1616 			return (EIO);
1617 	return (0);
1618 }
1619 
1620 static int
1621 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1622 {
1623 	struct ixl_pf *pf = iflib_get_softc(ctx);
1624 	struct ifdrv *ifd = (struct ifdrv *)data;
1625 	int error = 0;
1626 
1627 	/*
1628 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1629 	 * performing privilege checks. It is important that this function
1630 	 * perform the necessary checks for commands which should only be
1631 	 * executed by privileged threads.
1632 	 */
1633 
1634 	switch(command) {
1635 	case SIOCGDRVSPEC:
1636 	case SIOCSDRVSPEC:
1637 		/* NVM update command */
1638 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1639 			error = priv_check(curthread, PRIV_DRIVER);
1640 			if (error)
1641 				break;
1642 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1643 		} else {
1644 			error = EINVAL;
1645 		}
1646 		break;
1647 	default:
1648 		error = EOPNOTSUPP;
1649 	}
1650 
1651 	return (error);
1652 }
1653 
1654 static u_int
1655 ixl_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1656 {
1657 	struct ixl_vsi *vsi = arg;
1658 
1659 	ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
1660 	return (1);
1661 }
1662 
1663 /*
1664  * Sanity check and save off tunable values.
1665  */
1666 static void
1667 ixl_save_pf_tunables(struct ixl_pf *pf)
1668 {
1669 	device_t dev = pf->dev;
1670 
1671 	/* Save tunable information */
1672 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1673 	pf->dbg_mask = ixl_core_debug_mask;
1674 	pf->hw.debug_mask = ixl_shared_debug_mask;
1675 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1676 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1677 #if 0
1678 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1679 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1680 #endif
1681 
1682 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1683 		pf->i2c_access_method = 0;
1684 	else
1685 		pf->i2c_access_method = ixl_i2c_access_method;
1686 
1687 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1688 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1689 		    ixl_tx_itr);
1690 		device_printf(dev, "tx_itr must be between %d and %d, "
1691 		    "inclusive\n",
1692 		    0, IXL_MAX_ITR);
1693 		device_printf(dev, "Using default value of %d instead\n",
1694 		    IXL_ITR_4K);
1695 		pf->tx_itr = IXL_ITR_4K;
1696 	} else
1697 		pf->tx_itr = ixl_tx_itr;
1698 
1699 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1700 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1701 		    ixl_rx_itr);
1702 		device_printf(dev, "rx_itr must be between %d and %d, "
1703 		    "inclusive\n",
1704 		    0, IXL_MAX_ITR);
1705 		device_printf(dev, "Using default value of %d instead\n",
1706 		    IXL_ITR_8K);
1707 		pf->rx_itr = IXL_ITR_8K;
1708 	} else
1709 		pf->rx_itr = ixl_rx_itr;
1710 }
1711 
1712